CombinedText stringlengths 4 3.42M |
|---|
package actions
import (
"database/sql"
"fmt"
"net/http"
"os"
"time"
"github.com/news-maily/app/entities"
"github.com/news-maily/app/utils"
"github.com/gorilla/csrf"
valid "github.com/asaskevich/govalidator"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ses"
"github.com/gin-gonic/gin"
"github.com/news-maily/app/emails"
"github.com/news-maily/app/routes/middleware"
"github.com/news-maily/app/storage"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/bcrypt"
)
func GetMe(c *gin.Context) {
c.Header("X-CSRF-Token", csrf.Token(c.Request))
c.JSON(http.StatusOK, middleware.GetUser(c))
}
type changePassParams struct {
Password string `form:"password" valid:"required"`
NewPassword string `form:"new_password" valid:"required"`
}
func ChangePassword(c *gin.Context) {
u := middleware.GetUser(c)
if u == nil {
c.JSON(http.StatusUnauthorized, gin.H{
"message": "Unable to fetch user.",
})
return
}
params := &changePassParams{}
err := c.Bind(params)
if err != nil {
logrus.WithError(err).Error("Unable to bind params")
c.JSON(http.StatusUnprocessableEntity, gin.H{
"message": "Invalid parameters, please try again.",
})
return
}
v, err := valid.ValidateStruct(params)
if !v {
msg := "Unable to change password, invalid request parameters."
if err != nil {
msg = err.Error()
}
c.JSON(http.StatusUnprocessableEntity, gin.H{
"message": msg,
})
return
}
if len(params.NewPassword) < 8 {
c.JSON(http.StatusUnprocessableEntity, gin.H{
"new_password": "The new password must be atleast 8 characters.",
})
return
}
err = bcrypt.CompareHashAndPassword([]byte(u.Password.String), []byte(params.Password))
if err != nil {
logrus.Errorf("Invalid credentials. %s", err)
c.JSON(http.StatusForbidden, gin.H{
"message": "The password that you entered is incorrect.",
})
return
}
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(params.NewPassword), bcrypt.DefaultCost)
if err != nil {
logrus.WithFields(logrus.Fields{
"user": u.ID,
}).Println(err)
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to update your password. Please try again.",
})
return
}
u.Password = sql.NullString{
String: string(hashedPassword),
Valid: true,
}
err = storage.UpdateUser(c, u)
if err != nil {
logrus.WithFields(logrus.Fields{
"user": u.ID,
}).Println(err)
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to update your password. Please try again.",
})
return
}
c.JSON(http.StatusOK, gin.H{
"message": "Your password was updated successfully.",
})
}
type forgotPassParams struct {
Email string `form:"email" valid:"email"`
}
func PostForgotPassword(c *gin.Context) {
params := &forgotPassParams{}
err := c.Bind(params)
if err != nil {
logrus.WithError(err).Error("Unable to bind params")
c.JSON(http.StatusUnprocessableEntity, gin.H{
"message": "Invalid parameters, please try again.",
})
return
}
v, err := valid.ValidateStruct(params)
if !v {
emailError := valid.ErrorByField(err, "Email")
if emailError == "" {
emailError = "Email must be in valid format."
}
c.JSON(http.StatusUnprocessableEntity, gin.H{
"message": emailError,
})
return
}
u, err := storage.GetUserByUsername(c, params.Email)
if err == nil {
sender, err := emails.NewSesSender(
os.Getenv("AWS_SES_ACCESS_KEY"),
os.Getenv("AWS_SES_SECRET_KEY"),
os.Getenv("AWS_SES_REGION"),
)
if err == nil {
tokenStr, err := utils.GenerateRandomString(32)
if err != nil {
logrus.WithError(err).Error("Unable to generate random string.")
}
t := &entities.Token{
UserID: u.ID,
Token: tokenStr,
Type: entities.ForgotPasswordTokenType,
ExpiresAt: time.Now().Add(time.Hour * 1),
}
err = storage.CreateToken(c, t)
if err != nil {
logrus.WithError(err).Error("Cannot create token.")
} else {
go sendForgotPasswordEmail(tokenStr, u.Email, sender)
}
} else {
logrus.WithError(err).Error("Unable to create SES sender.")
}
}
c.JSON(http.StatusOK, gin.H{
"message": "Email will be sent to you with the information on how to update your password.",
})
}
func sendForgotPasswordEmail(token, email string, sender emails.Sender) {
url := os.Getenv("APP_URL") + "/forgot-password/" + token
_, err := sender.SendTemplatedEmail(&ses.SendTemplatedEmailInput{
Template: aws.String("ForgotPassword"),
Source: aws.String(os.Getenv("SYSTEM_EMAIL_SOURCE")),
TemplateData: aws.String(fmt.Sprintf(`{"url": "%s"}`, url)),
Destination: &ses.Destination{
ToAddresses: []*string{aws.String(email)},
},
})
if err != nil {
logrus.WithError(err).Error("forgot password email failure")
}
}
type putForgotPassParams struct {
Password string `form:"password" valid:"required"`
}
func PutForgotPassword(c *gin.Context) {
tokenStr := c.Param("token")
t, err := storage.GetToken(c, tokenStr)
if err != nil || t.Type != entities.ForgotPasswordTokenType {
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to update your password. The token is invalid.",
})
return
}
params := &putForgotPassParams{}
err = c.Bind(params)
if err != nil {
logrus.WithError(err).Error("Unable to bind params")
c.JSON(http.StatusUnprocessableEntity, gin.H{
"message": "Invalid parameters, please try again.",
})
return
}
v, err := valid.ValidateStruct(params)
if !v {
passError := valid.ErrorByField(err, "Password")
if passError == "" {
passError = "The password must not be empty."
}
c.JSON(http.StatusUnprocessableEntity, gin.H{
"message": passError,
})
return
}
if len(params.Password) < 8 {
c.JSON(http.StatusUnprocessableEntity, gin.H{
"password": "The new password must be atleast 8 characters.",
})
return
}
user, err := storage.GetUser(c, t.UserID)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{
"message": "Unable to update your password. The user associated with the token is not found.",
})
return
}
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(params.Password), bcrypt.DefaultCost)
if err != nil {
logrus.WithFields(logrus.Fields{
"user": user.ID,
}).Error(err)
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to update your password. Please try again.",
})
return
}
user.Password = sql.NullString{
String: string(hashedPassword),
Valid: true,
}
err = storage.UpdateUser(c, user)
if err != nil {
logrus.WithFields(logrus.Fields{
"user": user.ID,
}).Error(err)
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to update your password. Please try again.",
})
return
}
err = storage.DeleteToken(c, tokenStr)
if err != nil {
logrus.WithFields(logrus.Fields{
"token": tokenStr,
"user_id": user.ID,
}).WithError(err).Error("Unable to delete token.")
}
c.JSON(http.StatusOK, gin.H{
"message": "Your password has been updated successfully.",
})
}
func PutVerifyEmail(c *gin.Context) {
tokenStr := c.Param("token")
t, err := storage.GetToken(c, tokenStr)
if err != nil || t.Type != entities.VerifyEmailTokenType {
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to verify your email. The token is invalid.",
})
return
}
user, err := storage.GetUser(c, t.UserID)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{
"message": "Unable to verify your email. The user associated with the token is not found.",
})
return
}
if !user.Verified {
user.Verified = true
err = storage.UpdateUser(c, user)
if err != nil {
logrus.WithFields(logrus.Fields{
"user": user.ID,
}).Error(err)
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to verify your email. Please try again.",
})
return
}
}
err = storage.DeleteToken(c, tokenStr)
if err != nil {
logrus.WithFields(logrus.Fields{
"token": tokenStr,
"user_id": user.ID,
}).WithError(err).Error("Unable to delete token.")
}
c.JSON(http.StatusOK, gin.H{
"message": "Your email has been verified.",
})
}
Update users.go
package actions
import (
"database/sql"
"fmt"
"net/http"
"os"
"time"
"github.com/news-maily/app/entities"
"github.com/news-maily/app/utils"
"github.com/gorilla/csrf"
valid "github.com/asaskevich/govalidator"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ses"
"github.com/gin-gonic/gin"
"github.com/news-maily/app/emails"
"github.com/news-maily/app/routes/middleware"
"github.com/news-maily/app/storage"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/bcrypt"
)
func GetMe(c *gin.Context) {
c.Header("X-CSRF-Token", csrf.Token(c.Request))
c.JSON(http.StatusOK, middleware.GetUser(c))
}
type changePassParams struct {
Password string `form:"password" valid:"required"`
NewPassword string `form:"new_password" valid:"required"`
}
func ChangePassword(c *gin.Context) {
u := middleware.GetUser(c)
if u == nil {
c.JSON(http.StatusUnauthorized, gin.H{
"message": "Unable to fetch user.",
})
return
}
params := &changePassParams{}
err := c.Bind(params)
if err != nil {
logrus.WithError(err).Error("Unable to bind params")
c.JSON(http.StatusUnprocessableEntity, gin.H{
"message": "Invalid parameters, please try again.",
})
return
}
v, err := valid.ValidateStruct(params)
if !v {
msg := "Unable to change password, invalid request parameters."
if err != nil {
msg = err.Error()
}
c.JSON(http.StatusUnprocessableEntity, gin.H{
"message": msg,
})
return
}
if len(params.NewPassword) < 8 {
c.JSON(http.StatusUnprocessableEntity, gin.H{
"new_password": "The new password must be atleast 8 characters.",
})
return
}
err = bcrypt.CompareHashAndPassword([]byte(u.Password.String), []byte(params.Password))
if err != nil {
logrus.Errorf("Invalid credentials. %s", err)
c.JSON(http.StatusForbidden, gin.H{
"message": "The password that you entered is incorrect.",
})
return
}
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(params.NewPassword), bcrypt.DefaultCost)
if err != nil {
logrus.WithFields(logrus.Fields{
"user": u.ID,
}).Println(err)
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to update your password. Please try again.",
})
return
}
u.Password = sql.NullString{
String: string(hashedPassword),
Valid: true,
}
err = storage.UpdateUser(c, u)
if err != nil {
logrus.WithFields(logrus.Fields{
"user": u.ID,
}).Println(err)
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to update your password. Please try again.",
})
return
}
c.JSON(http.StatusOK, gin.H{
"message": "Your password was updated successfully.",
})
}
type forgotPassParams struct {
Email string `form:"email" valid:"email"`
}
func PostForgotPassword(c *gin.Context) {
params := &forgotPassParams{}
err := c.Bind(params)
if err != nil {
logrus.WithError(err).Error("Unable to bind params")
c.JSON(http.StatusUnprocessableEntity, gin.H{
"message": "Invalid parameters, please try again.",
})
return
}
v, err := valid.ValidateStruct(params)
if !v {
emailError := valid.ErrorByField(err, "Email")
if emailError == "" {
emailError = "Email must be in valid format."
}
c.JSON(http.StatusUnprocessableEntity, gin.H{
"message": emailError,
})
return
}
u, err := storage.GetUserByUsername(c, params.Email)
if err == nil {
sender, err := emails.NewSesSender(
os.Getenv("AWS_SES_ACCESS_KEY"),
os.Getenv("AWS_SES_SECRET_KEY"),
os.Getenv("AWS_SES_REGION"),
)
if err == nil {
tokenStr, err := utils.GenerateRandomString(32)
if err != nil {
logrus.WithError(err).Error("Unable to generate random string.")
}
t := &entities.Token{
UserID: u.ID,
Token: tokenStr,
Type: entities.ForgotPasswordTokenType,
ExpiresAt: time.Now().Add(time.Hour * 1),
}
err = storage.CreateToken(c, t)
if err != nil {
logrus.WithError(err).Error("Cannot create token.")
} else {
go sendForgotPasswordEmail(tokenStr, u.Username, sender)
}
} else {
logrus.WithError(err).Error("Unable to create SES sender.")
}
}
c.JSON(http.StatusOK, gin.H{
"message": "Email will be sent to you with the information on how to update your password.",
})
}
func sendForgotPasswordEmail(token, email string, sender emails.Sender) {
url := os.Getenv("APP_URL") + "/forgot-password/" + token
_, err := sender.SendTemplatedEmail(&ses.SendTemplatedEmailInput{
Template: aws.String("ForgotPassword"),
Source: aws.String(os.Getenv("SYSTEM_EMAIL_SOURCE")),
TemplateData: aws.String(fmt.Sprintf(`{"url": "%s"}`, url)),
Destination: &ses.Destination{
ToAddresses: []*string{aws.String(email)},
},
})
if err != nil {
logrus.WithError(err).Error("forgot password email failure")
}
}
type putForgotPassParams struct {
Password string `form:"password" valid:"required"`
}
func PutForgotPassword(c *gin.Context) {
tokenStr := c.Param("token")
t, err := storage.GetToken(c, tokenStr)
if err != nil || t.Type != entities.ForgotPasswordTokenType {
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to update your password. The token is invalid.",
})
return
}
params := &putForgotPassParams{}
err = c.Bind(params)
if err != nil {
logrus.WithError(err).Error("Unable to bind params")
c.JSON(http.StatusUnprocessableEntity, gin.H{
"message": "Invalid parameters, please try again.",
})
return
}
v, err := valid.ValidateStruct(params)
if !v {
passError := valid.ErrorByField(err, "Password")
if passError == "" {
passError = "The password must not be empty."
}
c.JSON(http.StatusUnprocessableEntity, gin.H{
"message": passError,
})
return
}
if len(params.Password) < 8 {
c.JSON(http.StatusUnprocessableEntity, gin.H{
"password": "The new password must be atleast 8 characters.",
})
return
}
user, err := storage.GetUser(c, t.UserID)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{
"message": "Unable to update your password. The user associated with the token is not found.",
})
return
}
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(params.Password), bcrypt.DefaultCost)
if err != nil {
logrus.WithFields(logrus.Fields{
"user": user.ID,
}).Error(err)
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to update your password. Please try again.",
})
return
}
user.Password = sql.NullString{
String: string(hashedPassword),
Valid: true,
}
err = storage.UpdateUser(c, user)
if err != nil {
logrus.WithFields(logrus.Fields{
"user": user.ID,
}).Error(err)
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to update your password. Please try again.",
})
return
}
err = storage.DeleteToken(c, tokenStr)
if err != nil {
logrus.WithFields(logrus.Fields{
"token": tokenStr,
"user_id": user.ID,
}).WithError(err).Error("Unable to delete token.")
}
c.JSON(http.StatusOK, gin.H{
"message": "Your password has been updated successfully.",
})
}
func PutVerifyEmail(c *gin.Context) {
tokenStr := c.Param("token")
t, err := storage.GetToken(c, tokenStr)
if err != nil || t.Type != entities.VerifyEmailTokenType {
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to verify your email. The token is invalid.",
})
return
}
user, err := storage.GetUser(c, t.UserID)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{
"message": "Unable to verify your email. The user associated with the token is not found.",
})
return
}
if !user.Verified {
user.Verified = true
err = storage.UpdateUser(c, user)
if err != nil {
logrus.WithFields(logrus.Fields{
"user": user.ID,
}).Error(err)
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to verify your email. Please try again.",
})
return
}
}
err = storage.DeleteToken(c, tokenStr)
if err != nil {
logrus.WithFields(logrus.Fields{
"token": tokenStr,
"user_id": user.ID,
}).WithError(err).Error("Unable to delete token.")
}
c.JSON(http.StatusOK, gin.H{
"message": "Your email has been verified.",
})
}
|
package main
import (
"fmt"
"os"
"text/template"
"github.com/spf13/cobra"
"github.com/squaremo/ambergreen/common/backends"
"github.com/squaremo/ambergreen/common/data"
)
type listOpts struct {
backend *backends.Backend
format string
formatInstance string
verbose bool
}
func (opts *listOpts) addCommandTo(top *cobra.Command) {
cmd := &cobra.Command{
Use: "list [options]",
Short: "list the services defined",
Run: opts.run,
}
cmd.Flags().StringVar(&opts.format, "format", "", "format each service with the go template expression given")
cmd.Flags().StringVar(&opts.formatInstance, "format-instance", "", "format each instance with the go template expression given (implies verbose)")
cmd.Flags().BoolVar(&opts.verbose, "verbose", false, "show the list of instances for each service")
top.AddCommand(cmd)
}
func (opts *listOpts) run(_ *cobra.Command, args []string) {
printService := func(name string, value data.Service) { fmt.Println(name, value) }
if opts.format != "" {
tmpl := template.Must(template.New("service").Parse(opts.format))
printService = func(name string, serv data.Service) {
var info serviceInfo
info.Service = serv
info.Name = name
err := tmpl.Execute(os.Stdout, info)
if err != nil {
panic(err)
}
fmt.Println()
}
}
var printInstance func(name string, service string, value data.Instance)
if opts.verbose {
printInstance = func(name string, service string, value data.Instance) { fmt.Println(" ", name) }
}
if opts.formatInstance != "" {
tmpl := template.Must(template.New("instance").Parse(opts.formatInstance))
printInstance = func(name string, service string, inst data.Instance) {
var info instanceInfo
info.Instance = inst
info.Name = name
info.Service = service
err := tmpl.Execute(os.Stdout, info)
if err != nil {
panic(err)
}
fmt.Println()
}
}
err := opts.backend.ForeachServiceInstance(func(name string, serv data.Service) {
printService(name, serv)
}, func(serviceName string, name string, inst data.Instance) {
printInstance(name, serviceName, inst)
})
if err != nil {
exitWithErrorf("Unable to enumerate services: ", err)
}
}
Simplify amberctl list display
.. and in doing so, avoid a NPE (calling an uninitialised
printInstance func)
package main
import (
"fmt"
"os"
"text/template"
"github.com/spf13/cobra"
"github.com/squaremo/ambergreen/common/backends"
"github.com/squaremo/ambergreen/common/data"
)
type listOpts struct {
backend *backends.Backend
format string
formatInstance string
verbose bool
}
func (opts *listOpts) addCommandTo(top *cobra.Command) {
cmd := &cobra.Command{
Use: "list [options]",
Short: "list the services defined",
Run: opts.run,
}
cmd.Flags().StringVar(&opts.format, "format", "", "format each service with the go template expression given")
cmd.Flags().StringVar(&opts.formatInstance, "format-instance", "", "format each instance with the go template expression given (implies verbose)")
cmd.Flags().BoolVar(&opts.verbose, "verbose", false, "show the list of instances for each service")
top.AddCommand(cmd)
}
func (opts *listOpts) run(_ *cobra.Command, args []string) {
printService := func(name string, _ data.Service) { fmt.Println(name) }
if opts.format != "" {
tmpl := template.Must(template.New("service").Parse(opts.format))
printService = func(name string, serv data.Service) {
var info serviceInfo
info.Service = serv
info.Name = name
err := tmpl.Execute(os.Stdout, info)
if err != nil {
panic(err)
}
fmt.Println()
}
}
var printInstance backends.ServiceInstanceFunc
if opts.verbose {
printInstance = func(service, name string, value data.Instance) { fmt.Println(" ", name) }
}
if opts.formatInstance != "" {
tmpl := template.Must(template.New("instance").Parse(opts.formatInstance))
printInstance = func(name string, service string, inst data.Instance) {
var info instanceInfo
info.Instance = inst
info.Name = name
info.Service = service
err := tmpl.Execute(os.Stdout, info)
if err != nil {
panic(err)
}
fmt.Println()
}
}
err := opts.backend.ForeachServiceInstance(printService, printInstance)
if err != nil {
exitWithErrorf("Unable to enumerate services: ", err)
}
}
|
package gps
import (
"fmt"
"go/build"
"go/scanner"
"go/token"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
)
// PackageTree.ExternalReach() uses an easily separable algorithm, wmToReach(),
// to turn a discovered set of packages and their imports into a proper external
// reach map.
//
// That algorithm is purely symbolic (no filesystem interaction), and thus is
// easy to test. This is that test.
func TestWorkmapToReach(t *testing.T) {
empty := func() map[string]bool {
return make(map[string]bool)
}
table := map[string]struct {
workmap map[string]wm
basedir string
out map[string][]string
}{
"single": {
workmap: map[string]wm{
"foo": {
ex: empty(),
in: empty(),
},
},
out: map[string][]string{
"foo": nil,
},
},
"no external": {
workmap: map[string]wm{
"foo": {
ex: empty(),
in: empty(),
},
"foo/bar": {
ex: empty(),
in: empty(),
},
},
out: map[string][]string{
"foo": nil,
"foo/bar": nil,
},
},
"no external with subpkg": {
workmap: map[string]wm{
"foo": {
ex: empty(),
in: map[string]bool{
"foo/bar": true,
},
},
"foo/bar": {
ex: empty(),
in: empty(),
},
},
out: map[string][]string{
"foo": nil,
"foo/bar": nil,
},
},
"simple base transitive": {
workmap: map[string]wm{
"foo": {
ex: empty(),
in: map[string]bool{
"foo/bar": true,
},
},
"foo/bar": {
ex: map[string]bool{
"baz": true,
},
in: empty(),
},
},
out: map[string][]string{
"foo": {
"baz",
},
"foo/bar": {
"baz",
},
},
},
"missing package is poison": {
workmap: map[string]wm{
"A": {
ex: map[string]bool{
"B/foo": true,
},
in: map[string]bool{
"A/foo": true, // missing
"A/bar": true,
},
},
"A/bar": {
ex: map[string]bool{
"B/baz": true,
},
in: empty(),
},
},
out: map[string][]string{
"A/bar": {
"B/baz",
},
},
},
"transitive missing package is poison": {
workmap: map[string]wm{
"A": {
ex: map[string]bool{
"B/foo": true,
},
in: map[string]bool{
"A/foo": true, // transitively missing
"A/quux": true,
},
},
"A/foo": {
ex: map[string]bool{
"C/flugle": true,
},
in: map[string]bool{
"A/bar": true, // missing
},
},
"A/quux": {
ex: map[string]bool{
"B/baz": true,
},
in: empty(),
},
},
out: map[string][]string{
"A/quux": {
"B/baz",
},
},
},
"err'd package is poison": {
workmap: map[string]wm{
"A": {
ex: map[string]bool{
"B/foo": true,
},
in: map[string]bool{
"A/foo": true, // err'd
"A/bar": true,
},
},
"A/foo": {
err: fmt.Errorf("err pkg"),
},
"A/bar": {
ex: map[string]bool{
"B/baz": true,
},
in: empty(),
},
},
out: map[string][]string{
"A/bar": {
"B/baz",
},
},
},
"transitive err'd package is poison": {
workmap: map[string]wm{
"A": {
ex: map[string]bool{
"B/foo": true,
},
in: map[string]bool{
"A/foo": true, // transitively err'd
"A/quux": true,
},
},
"A/foo": {
ex: map[string]bool{
"C/flugle": true,
},
in: map[string]bool{
"A/bar": true, // err'd
},
},
"A/bar": {
err: fmt.Errorf("err pkg"),
},
"A/quux": {
ex: map[string]bool{
"B/baz": true,
},
in: empty(),
},
},
out: map[string][]string{
"A/quux": {
"B/baz",
},
},
},
}
for name, fix := range table {
out := wmToReach(fix.workmap, fix.basedir)
if !reflect.DeepEqual(out, fix.out) {
t.Errorf("wmToReach(%q): Did not get expected reach map:\n\t(GOT): %s\n\t(WNT): %s", name, out, fix.out)
}
}
}
func TestListPackages(t *testing.T) {
srcdir := filepath.Join(getwd(t), "_testdata", "src")
j := func(s ...string) string {
return filepath.Join(srcdir, filepath.Join(s...))
}
table := map[string]struct {
fileRoot string
importRoot string
out PackageTree
err error
}{
"empty": {
fileRoot: j("empty"),
importRoot: "empty",
out: PackageTree{
ImportRoot: "empty",
Packages: map[string]PackageOrErr{
"empty": {
Err: &build.NoGoError{
Dir: j("empty"),
},
},
},
},
err: nil,
},
"code only": {
fileRoot: j("simple"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
},
},
},
},
},
},
"impose import path": {
fileRoot: j("simple"),
importRoot: "arbitrary",
out: PackageTree{
ImportRoot: "arbitrary",
Packages: map[string]PackageOrErr{
"arbitrary": {
P: Package{
ImportPath: "arbitrary",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
},
},
},
},
},
},
"test only": {
fileRoot: j("t"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{},
TestImports: []string{
"math/rand",
"strconv",
},
},
},
},
},
},
"xtest only": {
fileRoot: j("xt"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{},
TestImports: []string{
"sort",
"strconv",
},
},
},
},
},
},
"code and test": {
fileRoot: j("simplet"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
},
TestImports: []string{
"math/rand",
"strconv",
},
},
},
},
},
},
"code and xtest": {
fileRoot: j("simplext"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
},
TestImports: []string{
"sort",
"strconv",
},
},
},
},
},
},
"code, test, xtest": {
fileRoot: j("simpleallt"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
},
TestImports: []string{
"math/rand",
"sort",
"strconv",
},
},
},
},
},
},
"one pkg multifile": {
fileRoot: j("m1p"),
importRoot: "m1p",
out: PackageTree{
ImportRoot: "m1p",
Packages: map[string]PackageOrErr{
"m1p": {
P: Package{
ImportPath: "m1p",
CommentPath: "",
Name: "m1p",
Imports: []string{
"github.com/sdboyer/gps",
"os",
"sort",
},
},
},
},
},
},
"one nested below": {
fileRoot: j("nest"),
importRoot: "nest",
out: PackageTree{
ImportRoot: "nest",
Packages: map[string]PackageOrErr{
"nest": {
P: Package{
ImportPath: "nest",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
},
},
},
"nest/m1p": {
P: Package{
ImportPath: "nest/m1p",
CommentPath: "",
Name: "m1p",
Imports: []string{
"github.com/sdboyer/gps",
"os",
"sort",
},
},
},
},
},
},
"malformed go file": {
fileRoot: j("bad"),
importRoot: "bad",
out: PackageTree{
ImportRoot: "bad",
Packages: map[string]PackageOrErr{
"bad": {
Err: scanner.ErrorList{
&scanner.Error{
Pos: token.Position{
Filename: j("bad", "bad.go"),
Offset: 113,
Line: 2,
Column: 43,
},
Msg: "expected 'package', found 'EOF'",
},
},
},
},
},
},
"two nested under empty root": {
fileRoot: j("ren"),
importRoot: "ren",
out: PackageTree{
ImportRoot: "ren",
Packages: map[string]PackageOrErr{
"ren": {
Err: &build.NoGoError{
Dir: j("ren"),
},
},
"ren/m1p": {
P: Package{
ImportPath: "ren/m1p",
CommentPath: "",
Name: "m1p",
Imports: []string{
"github.com/sdboyer/gps",
"os",
"sort",
},
},
},
"ren/simple": {
P: Package{
ImportPath: "ren/simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
},
},
},
},
},
},
"internal name mismatch": {
fileRoot: j("doublenest"),
importRoot: "doublenest",
out: PackageTree{
ImportRoot: "doublenest",
Packages: map[string]PackageOrErr{
"doublenest": {
P: Package{
ImportPath: "doublenest",
CommentPath: "",
Name: "base",
Imports: []string{
"github.com/sdboyer/gps",
"go/parser",
},
},
},
"doublenest/namemismatch": {
P: Package{
ImportPath: "doublenest/namemismatch",
CommentPath: "",
Name: "nm",
Imports: []string{
"github.com/Masterminds/semver",
"os",
},
},
},
"doublenest/namemismatch/m1p": {
P: Package{
ImportPath: "doublenest/namemismatch/m1p",
CommentPath: "",
Name: "m1p",
Imports: []string{
"github.com/sdboyer/gps",
"os",
"sort",
},
},
},
},
},
},
"file and importroot mismatch": {
fileRoot: j("doublenest"),
importRoot: "other",
out: PackageTree{
ImportRoot: "other",
Packages: map[string]PackageOrErr{
"other": {
P: Package{
ImportPath: "other",
CommentPath: "",
Name: "base",
Imports: []string{
"github.com/sdboyer/gps",
"go/parser",
},
},
},
"other/namemismatch": {
P: Package{
ImportPath: "other/namemismatch",
CommentPath: "",
Name: "nm",
Imports: []string{
"github.com/Masterminds/semver",
"os",
},
},
},
"other/namemismatch/m1p": {
P: Package{
ImportPath: "other/namemismatch/m1p",
CommentPath: "",
Name: "m1p",
Imports: []string{
"github.com/sdboyer/gps",
"os",
"sort",
},
},
},
},
},
},
"code and ignored main": {
fileRoot: j("igmain"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
"unicode",
},
},
},
},
},
},
"code and ignored main with comment leader": {
fileRoot: j("igmainlong"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
"unicode",
},
},
},
},
},
},
"code, tests, and ignored main": {
fileRoot: j("igmaint"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
"unicode",
},
TestImports: []string{
"math/rand",
"strconv",
},
},
},
},
},
},
// New code allows this because it doesn't care if the code compiles (kinda) or not,
// so maybe this is actually not an error anymore?
/*"two pkgs": {
fileRoot: j("twopkgs"),
importRoot: "twopkgs",
out: PackageTree{
ImportRoot: "twopkgs",
Packages: map[string]PackageOrErr{
"twopkgs": {
Err: &build.MultiplePackageError{
Dir: j("twopkgs"),
Packages: []string{"simple", "m1p"},
Files: []string{"a.go", "b.go"},
},
},
},
},
}, */
// imports a missing pkg
"missing import": {
fileRoot: j("missing"),
importRoot: "missing",
out: PackageTree{
ImportRoot: "missing",
Packages: map[string]PackageOrErr{
"missing": {
P: Package{
ImportPath: "missing",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"missing/missing",
"sort",
},
},
},
"missing/m1p": {
P: Package{
ImportPath: "missing/m1p",
CommentPath: "",
Name: "m1p",
Imports: []string{
"github.com/sdboyer/gps",
"os",
"sort",
},
},
},
},
},
},
// has disallowed dir names
"disallowed dirs": {
fileRoot: j("disallow"),
importRoot: "disallow",
out: PackageTree{
ImportRoot: "disallow",
Packages: map[string]PackageOrErr{
"disallow": {
P: Package{
ImportPath: "disallow",
CommentPath: "",
Name: "disallow",
Imports: []string{
"disallow/testdata",
"github.com/sdboyer/gps",
"sort",
},
},
},
// disallow/.m1p is ignored by listPackages...for now. Kept
// here commented because this might change again...
//"disallow/.m1p": {
//P: Package{
//ImportPath: "disallow/.m1p",
//CommentPath: "",
//Name: "m1p",
//Imports: []string{
//"github.com/sdboyer/gps",
//"os",
//"sort",
//},
//},
//},
"disallow/testdata": {
P: Package{
ImportPath: "disallow/testdata",
CommentPath: "",
Name: "testdata",
Imports: []string{
"hash",
},
},
},
},
},
},
// This case mostly exists for the PackageTree methods, but it does
// cover a bit of range
"varied": {
fileRoot: j("varied"),
importRoot: "varied",
out: PackageTree{
ImportRoot: "varied",
Packages: map[string]PackageOrErr{
"varied": {
P: Package{
ImportPath: "varied",
CommentPath: "",
Name: "main",
Imports: []string{
"net/http",
"varied/namemismatch",
"varied/otherpath",
"varied/simple",
},
},
},
"varied/otherpath": {
P: Package{
ImportPath: "varied/otherpath",
CommentPath: "",
Name: "otherpath",
Imports: []string{},
TestImports: []string{
"varied/m1p",
},
},
},
"varied/simple": {
P: Package{
ImportPath: "varied/simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"go/parser",
"varied/simple/another",
},
},
},
"varied/simple/another": {
P: Package{
ImportPath: "varied/simple/another",
CommentPath: "",
Name: "another",
Imports: []string{
"hash",
"varied/m1p",
},
TestImports: []string{
"encoding/binary",
},
},
},
"varied/namemismatch": {
P: Package{
ImportPath: "varied/namemismatch",
CommentPath: "",
Name: "nm",
Imports: []string{
"github.com/Masterminds/semver",
"os",
},
},
},
"varied/m1p": {
P: Package{
ImportPath: "varied/m1p",
CommentPath: "",
Name: "m1p",
Imports: []string{
"github.com/sdboyer/gps",
"os",
"sort",
},
},
},
},
},
},
}
for name, fix := range table {
if _, err := os.Stat(fix.fileRoot); err != nil {
t.Errorf("listPackages(%q): error on fileRoot %s: %s", name, fix.fileRoot, err)
continue
}
out, err := ListPackages(fix.fileRoot, fix.importRoot)
if err != nil && fix.err == nil {
t.Errorf("listPackages(%q): Received error but none expected: %s", name, err)
} else if fix.err != nil && err == nil {
t.Errorf("listPackages(%q): Error expected but none received", name)
} else if fix.err != nil && err != nil {
if !reflect.DeepEqual(fix.err, err) {
t.Errorf("listPackages(%q): Did not receive expected error:\n\t(GOT): %s\n\t(WNT): %s", name, err, fix.err)
}
}
if fix.out.ImportRoot != "" && fix.out.Packages != nil {
if !reflect.DeepEqual(out, fix.out) {
if fix.out.ImportRoot != out.ImportRoot {
t.Errorf("listPackages(%q): Expected ImportRoot %s, got %s", name, fix.out.ImportRoot, out.ImportRoot)
}
// overwrite the out one to see if we still have a real problem
out.ImportRoot = fix.out.ImportRoot
if !reflect.DeepEqual(out, fix.out) {
if len(fix.out.Packages) < 2 {
t.Errorf("listPackages(%q): Did not get expected PackageOrErrs:\n\t(GOT): %#v\n\t(WNT): %#v", name, out, fix.out)
} else {
seen := make(map[string]bool)
for path, perr := range fix.out.Packages {
seen[path] = true
if operr, exists := out.Packages[path]; !exists {
t.Errorf("listPackages(%q): Expected PackageOrErr for path %s was missing from output:\n\t%s", name, path, perr)
} else {
if !reflect.DeepEqual(perr, operr) {
t.Errorf("listPackages(%q): PkgOrErr for path %s was not as expected:\n\t(GOT): %#v\n\t(WNT): %#v", name, path, operr, perr)
}
}
}
for path, operr := range out.Packages {
if seen[path] {
continue
}
t.Errorf("listPackages(%q): Got PackageOrErr for path %s, but none was expected:\n\t%s", name, path, operr)
}
}
}
}
}
}
}
func TestListExternalImports(t *testing.T) {
// There's enough in the 'varied' test case to test most of what matters
vptree, err := ListPackages(filepath.Join(getwd(t), "_testdata", "src", "varied"), "varied")
if err != nil {
t.Fatalf("listPackages failed on varied test case: %s", err)
}
var expect []string
var name string
var ignore map[string]bool
var main, tests bool
validate := func() {
result := vptree.ExternalReach(main, tests, ignore).ListExternalImports()
if !reflect.DeepEqual(expect, result) {
t.Errorf("Wrong imports in %q case:\n\t(GOT): %s\n\t(WNT): %s", name, result, expect)
}
}
all := []string{
"encoding/binary",
"github.com/Masterminds/semver",
"github.com/sdboyer/gps",
"go/parser",
"hash",
"net/http",
"os",
"sort",
}
// helper to rewrite expect, except for a couple packages
//
// this makes it easier to see what we're taking out on each test
except := func(not ...string) {
expect = make([]string, len(all)-len(not))
drop := make(map[string]bool)
for _, npath := range not {
drop[npath] = true
}
k := 0
for _, path := range all {
if !drop[path] {
expect[k] = path
k++
}
}
}
// everything on
name = "simple"
except()
main, tests = true, true
validate()
// Now without tests, which should just cut one
name = "no tests"
tests = false
except("encoding/binary")
validate()
// Now skip main, which still just cuts out one
name = "no main"
main, tests = false, true
except("net/http")
validate()
// No test and no main, which should be additive
name = "no test, no main"
main, tests = false, false
except("net/http", "encoding/binary")
validate()
// now, the ignore tests. turn main and tests back on
main, tests = true, true
// start with non-matching
name = "non-matching ignore"
ignore = map[string]bool{
"nomatch": true,
}
except()
validate()
// should have the same effect as ignoring main
name = "ignore the root"
ignore = map[string]bool{
"varied": true,
}
except("net/http")
validate()
// now drop a more interesting one
name = "ignore simple"
ignore = map[string]bool{
"varied/simple": true,
}
// we get github.com/sdboyer/gps from m1p, too, so it should still be there
except("go/parser")
validate()
// now drop two
name = "ignore simple and namemismatch"
ignore = map[string]bool{
"varied/simple": true,
"varied/namemismatch": true,
}
except("go/parser", "github.com/Masterminds/semver")
validate()
// make sure tests and main play nice with ignore
name = "ignore simple and namemismatch, and no tests"
tests = false
except("go/parser", "github.com/Masterminds/semver", "encoding/binary")
validate()
name = "ignore simple and namemismatch, and no main"
main, tests = false, true
except("go/parser", "github.com/Masterminds/semver", "net/http")
validate()
name = "ignore simple and namemismatch, and no main or tests"
main, tests = false, false
except("go/parser", "github.com/Masterminds/semver", "net/http", "encoding/binary")
validate()
main, tests = true, true
// ignore two that should knock out gps
name = "ignore both importers"
ignore = map[string]bool{
"varied/simple": true,
"varied/m1p": true,
}
except("sort", "github.com/sdboyer/gps", "go/parser")
validate()
// finally, directly ignore some external packages
name = "ignore external"
ignore = map[string]bool{
"github.com/sdboyer/gps": true,
"go/parser": true,
"sort": true,
}
except("sort", "github.com/sdboyer/gps", "go/parser")
validate()
// The only thing varied *doesn't* cover is disallowed path patterns
ptree, err := ListPackages(filepath.Join(getwd(t), "_testdata", "src", "disallow"), "disallow")
if err != nil {
t.Fatalf("listPackages failed on disallow test case: %s", err)
}
result := ptree.ExternalReach(false, false, nil).ListExternalImports()
expect = []string{"github.com/sdboyer/gps", "hash", "sort"}
if !reflect.DeepEqual(expect, result) {
t.Errorf("Wrong imports in %q case:\n\t(GOT): %s\n\t(WNT): %s", name, result, expect)
}
}
func TestExternalReach(t *testing.T) {
// There's enough in the 'varied' test case to test most of what matters
vptree, err := ListPackages(filepath.Join(getwd(t), "_testdata", "src", "varied"), "varied")
if err != nil {
t.Fatalf("listPackages failed on varied test case: %s", err)
}
// Set up vars for validate closure
var expect map[string][]string
var name string
var main, tests bool
var ignore map[string]bool
validate := func() {
result := vptree.ExternalReach(main, tests, ignore)
if !reflect.DeepEqual(expect, result) {
seen := make(map[string]bool)
for ip, epkgs := range expect {
seen[ip] = true
if pkgs, exists := result[ip]; !exists {
t.Errorf("ver(%q): expected import path %s was not present in result", name, ip)
} else {
if !reflect.DeepEqual(pkgs, epkgs) {
t.Errorf("ver(%q): did not get expected package set for import path %s:\n\t(GOT): %s\n\t(WNT): %s", name, ip, pkgs, epkgs)
}
}
}
for ip, pkgs := range result {
if seen[ip] {
continue
}
t.Errorf("ver(%q): Got packages for import path %s, but none were expected:\n\t%s", name, ip, pkgs)
}
}
}
all := map[string][]string{
"varied": {"encoding/binary", "github.com/Masterminds/semver", "github.com/sdboyer/gps", "go/parser", "hash", "net/http", "os", "sort"},
"varied/m1p": {"github.com/sdboyer/gps", "os", "sort"},
"varied/namemismatch": {"github.com/Masterminds/semver", "os"},
"varied/otherpath": {"github.com/sdboyer/gps", "os", "sort"},
"varied/simple": {"encoding/binary", "github.com/sdboyer/gps", "go/parser", "hash", "os", "sort"},
"varied/simple/another": {"encoding/binary", "github.com/sdboyer/gps", "hash", "os", "sort"},
}
// build a map to validate the exception inputs. do this because shit is
// hard enough to keep track of that it's preferable not to have silent
// success if a typo creeps in and we're trying to except an import that
// isn't in a pkg in the first place
valid := make(map[string]map[string]bool)
for ip, expkgs := range all {
m := make(map[string]bool)
for _, pkg := range expkgs {
m[pkg] = true
}
valid[ip] = m
}
// helper to compose expect, excepting specific packages
//
// this makes it easier to see what we're taking out on each test
except := func(pkgig ...string) {
// reinit expect with everything from all
expect = make(map[string][]string)
for ip, expkgs := range all {
sl := make([]string, len(expkgs))
copy(sl, expkgs)
expect[ip] = sl
}
// now build the dropmap
drop := make(map[string]map[string]bool)
for _, igstr := range pkgig {
// split on space; first elem is import path to pkg, the rest are
// the imports to drop.
not := strings.Split(igstr, " ")
var ip string
ip, not = not[0], not[1:]
if _, exists := valid[ip]; !exists {
t.Fatalf("%s is not a package name we're working with, doofus", ip)
}
// if only a single elem was passed, though, drop the whole thing
if len(not) == 0 {
delete(expect, ip)
continue
}
m := make(map[string]bool)
for _, imp := range not {
if !valid[ip][imp] {
t.Fatalf("%s is not a reachable import of %s, even in the all case", imp, ip)
}
m[imp] = true
}
drop[ip] = m
}
for ip, pkgs := range expect {
var npkgs []string
for _, imp := range pkgs {
if !drop[ip][imp] {
npkgs = append(npkgs, imp)
}
}
expect[ip] = npkgs
}
}
// first, validate all
name = "all"
main, tests = true, true
except()
validate()
// turn off main pkgs, which necessarily doesn't affect anything else
name = "no main"
main = false
except("varied")
validate()
// ignoring the "varied" pkg has same effect as disabling main pkgs
name = "ignore root"
ignore = map[string]bool{
"varied": true,
}
main = true
validate()
// when we drop tests, varied/otherpath loses its link to varied/m1p and
// varied/simple/another loses its test import, which has a fairly big
// cascade
name = "no tests"
tests = false
ignore = nil
except(
"varied encoding/binary",
"varied/simple encoding/binary",
"varied/simple/another encoding/binary",
"varied/otherpath github.com/sdboyer/gps os sort",
)
// almost the same as previous, but varied just goes away completely
name = "no main or tests"
main = false
except(
"varied",
"varied/simple encoding/binary",
"varied/simple/another encoding/binary",
"varied/otherpath github.com/sdboyer/gps os sort",
)
validate()
// focus on ignores now, so reset main and tests
main, tests = true, true
// now, the fun stuff. punch a hole in the middle by cutting out
// varied/simple
name = "ignore varied/simple"
ignore = map[string]bool{
"varied/simple": true,
}
except(
// root pkg loses on everything in varied/simple/another
"varied hash encoding/binary go/parser",
"varied/simple",
)
validate()
// widen the hole by excluding otherpath
name = "ignore varied/{otherpath,simple}"
ignore = map[string]bool{
"varied/otherpath": true,
"varied/simple": true,
}
except(
// root pkg loses on everything in varied/simple/another and varied/m1p
"varied hash encoding/binary go/parser github.com/sdboyer/gps sort",
"varied/otherpath",
"varied/simple",
)
validate()
// remove namemismatch, though we're mostly beating a dead horse now
name = "ignore varied/{otherpath,simple,namemismatch}"
ignore["varied/namemismatch"] = true
except(
// root pkg loses on everything in varied/simple/another and varied/m1p
"varied hash encoding/binary go/parser github.com/sdboyer/gps sort os github.com/Masterminds/semver",
"varied/otherpath",
"varied/simple",
"varied/namemismatch",
)
validate()
}
var _ = map[string][]string{
"varied": {"encoding/binary", "github.com/Masterminds/semver", "github.com/sdboyer/gps", "go/parser", "hash", "net/http", "os", "sort"},
"varied/m1p": {"github.com/sdboyer/gps", "os", "sort"},
"varied/namemismatch": {"github.com/Masterminds/semver", "os"},
"varied/otherpath": {"github.com/sdboyer/gps", "os", "sort"},
"varied/simple": {"encoding/binary", "github.com/sdboyer/gps", "go/parser", "hash", "os", "sort"},
"varied/simple/another": {"encoding/binary", "github.com/sdboyer/gps", "hash", "os", "sort"},
}
func getwd(t *testing.T) string {
cwd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
return cwd
}
Re-enable test TODO
package gps
import (
"fmt"
"go/build"
"go/scanner"
"go/token"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
)
// PackageTree.ExternalReach() uses an easily separable algorithm, wmToReach(),
// to turn a discovered set of packages and their imports into a proper external
// reach map.
//
// That algorithm is purely symbolic (no filesystem interaction), and thus is
// easy to test. This is that test.
func TestWorkmapToReach(t *testing.T) {
empty := func() map[string]bool {
return make(map[string]bool)
}
table := map[string]struct {
workmap map[string]wm
basedir string
out map[string][]string
}{
"single": {
workmap: map[string]wm{
"foo": {
ex: empty(),
in: empty(),
},
},
out: map[string][]string{
"foo": nil,
},
},
"no external": {
workmap: map[string]wm{
"foo": {
ex: empty(),
in: empty(),
},
"foo/bar": {
ex: empty(),
in: empty(),
},
},
out: map[string][]string{
"foo": nil,
"foo/bar": nil,
},
},
"no external with subpkg": {
workmap: map[string]wm{
"foo": {
ex: empty(),
in: map[string]bool{
"foo/bar": true,
},
},
"foo/bar": {
ex: empty(),
in: empty(),
},
},
out: map[string][]string{
"foo": nil,
"foo/bar": nil,
},
},
"simple base transitive": {
workmap: map[string]wm{
"foo": {
ex: empty(),
in: map[string]bool{
"foo/bar": true,
},
},
"foo/bar": {
ex: map[string]bool{
"baz": true,
},
in: empty(),
},
},
out: map[string][]string{
"foo": {
"baz",
},
"foo/bar": {
"baz",
},
},
},
"missing package is poison": {
workmap: map[string]wm{
"A": {
ex: map[string]bool{
"B/foo": true,
},
in: map[string]bool{
"A/foo": true, // missing
"A/bar": true,
},
},
"A/bar": {
ex: map[string]bool{
"B/baz": true,
},
in: empty(),
},
},
out: map[string][]string{
"A/bar": {
"B/baz",
},
},
},
"transitive missing package is poison": {
workmap: map[string]wm{
"A": {
ex: map[string]bool{
"B/foo": true,
},
in: map[string]bool{
"A/foo": true, // transitively missing
"A/quux": true,
},
},
"A/foo": {
ex: map[string]bool{
"C/flugle": true,
},
in: map[string]bool{
"A/bar": true, // missing
},
},
"A/quux": {
ex: map[string]bool{
"B/baz": true,
},
in: empty(),
},
},
out: map[string][]string{
"A/quux": {
"B/baz",
},
},
},
"err'd package is poison": {
workmap: map[string]wm{
"A": {
ex: map[string]bool{
"B/foo": true,
},
in: map[string]bool{
"A/foo": true, // err'd
"A/bar": true,
},
},
"A/foo": {
err: fmt.Errorf("err pkg"),
},
"A/bar": {
ex: map[string]bool{
"B/baz": true,
},
in: empty(),
},
},
out: map[string][]string{
"A/bar": {
"B/baz",
},
},
},
"transitive err'd package is poison": {
workmap: map[string]wm{
"A": {
ex: map[string]bool{
"B/foo": true,
},
in: map[string]bool{
"A/foo": true, // transitively err'd
"A/quux": true,
},
},
"A/foo": {
ex: map[string]bool{
"C/flugle": true,
},
in: map[string]bool{
"A/bar": true, // err'd
},
},
"A/bar": {
err: fmt.Errorf("err pkg"),
},
"A/quux": {
ex: map[string]bool{
"B/baz": true,
},
in: empty(),
},
},
out: map[string][]string{
"A/quux": {
"B/baz",
},
},
},
}
for name, fix := range table {
out := wmToReach(fix.workmap, fix.basedir)
if !reflect.DeepEqual(out, fix.out) {
t.Errorf("wmToReach(%q): Did not get expected reach map:\n\t(GOT): %s\n\t(WNT): %s", name, out, fix.out)
}
}
}
func TestListPackages(t *testing.T) {
srcdir := filepath.Join(getwd(t), "_testdata", "src")
j := func(s ...string) string {
return filepath.Join(srcdir, filepath.Join(s...))
}
table := map[string]struct {
fileRoot string
importRoot string
out PackageTree
err error
}{
"empty": {
fileRoot: j("empty"),
importRoot: "empty",
out: PackageTree{
ImportRoot: "empty",
Packages: map[string]PackageOrErr{
"empty": {
Err: &build.NoGoError{
Dir: j("empty"),
},
},
},
},
err: nil,
},
"code only": {
fileRoot: j("simple"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
},
},
},
},
},
},
"impose import path": {
fileRoot: j("simple"),
importRoot: "arbitrary",
out: PackageTree{
ImportRoot: "arbitrary",
Packages: map[string]PackageOrErr{
"arbitrary": {
P: Package{
ImportPath: "arbitrary",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
},
},
},
},
},
},
"test only": {
fileRoot: j("t"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{},
TestImports: []string{
"math/rand",
"strconv",
},
},
},
},
},
},
"xtest only": {
fileRoot: j("xt"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{},
TestImports: []string{
"sort",
"strconv",
},
},
},
},
},
},
"code and test": {
fileRoot: j("simplet"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
},
TestImports: []string{
"math/rand",
"strconv",
},
},
},
},
},
},
"code and xtest": {
fileRoot: j("simplext"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
},
TestImports: []string{
"sort",
"strconv",
},
},
},
},
},
},
"code, test, xtest": {
fileRoot: j("simpleallt"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
},
TestImports: []string{
"math/rand",
"sort",
"strconv",
},
},
},
},
},
},
"one pkg multifile": {
fileRoot: j("m1p"),
importRoot: "m1p",
out: PackageTree{
ImportRoot: "m1p",
Packages: map[string]PackageOrErr{
"m1p": {
P: Package{
ImportPath: "m1p",
CommentPath: "",
Name: "m1p",
Imports: []string{
"github.com/sdboyer/gps",
"os",
"sort",
},
},
},
},
},
},
"one nested below": {
fileRoot: j("nest"),
importRoot: "nest",
out: PackageTree{
ImportRoot: "nest",
Packages: map[string]PackageOrErr{
"nest": {
P: Package{
ImportPath: "nest",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
},
},
},
"nest/m1p": {
P: Package{
ImportPath: "nest/m1p",
CommentPath: "",
Name: "m1p",
Imports: []string{
"github.com/sdboyer/gps",
"os",
"sort",
},
},
},
},
},
},
"malformed go file": {
fileRoot: j("bad"),
importRoot: "bad",
out: PackageTree{
ImportRoot: "bad",
Packages: map[string]PackageOrErr{
"bad": {
Err: scanner.ErrorList{
&scanner.Error{
Pos: token.Position{
Filename: j("bad", "bad.go"),
Offset: 113,
Line: 2,
Column: 43,
},
Msg: "expected 'package', found 'EOF'",
},
},
},
},
},
},
"two nested under empty root": {
fileRoot: j("ren"),
importRoot: "ren",
out: PackageTree{
ImportRoot: "ren",
Packages: map[string]PackageOrErr{
"ren": {
Err: &build.NoGoError{
Dir: j("ren"),
},
},
"ren/m1p": {
P: Package{
ImportPath: "ren/m1p",
CommentPath: "",
Name: "m1p",
Imports: []string{
"github.com/sdboyer/gps",
"os",
"sort",
},
},
},
"ren/simple": {
P: Package{
ImportPath: "ren/simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
},
},
},
},
},
},
"internal name mismatch": {
fileRoot: j("doublenest"),
importRoot: "doublenest",
out: PackageTree{
ImportRoot: "doublenest",
Packages: map[string]PackageOrErr{
"doublenest": {
P: Package{
ImportPath: "doublenest",
CommentPath: "",
Name: "base",
Imports: []string{
"github.com/sdboyer/gps",
"go/parser",
},
},
},
"doublenest/namemismatch": {
P: Package{
ImportPath: "doublenest/namemismatch",
CommentPath: "",
Name: "nm",
Imports: []string{
"github.com/Masterminds/semver",
"os",
},
},
},
"doublenest/namemismatch/m1p": {
P: Package{
ImportPath: "doublenest/namemismatch/m1p",
CommentPath: "",
Name: "m1p",
Imports: []string{
"github.com/sdboyer/gps",
"os",
"sort",
},
},
},
},
},
},
"file and importroot mismatch": {
fileRoot: j("doublenest"),
importRoot: "other",
out: PackageTree{
ImportRoot: "other",
Packages: map[string]PackageOrErr{
"other": {
P: Package{
ImportPath: "other",
CommentPath: "",
Name: "base",
Imports: []string{
"github.com/sdboyer/gps",
"go/parser",
},
},
},
"other/namemismatch": {
P: Package{
ImportPath: "other/namemismatch",
CommentPath: "",
Name: "nm",
Imports: []string{
"github.com/Masterminds/semver",
"os",
},
},
},
"other/namemismatch/m1p": {
P: Package{
ImportPath: "other/namemismatch/m1p",
CommentPath: "",
Name: "m1p",
Imports: []string{
"github.com/sdboyer/gps",
"os",
"sort",
},
},
},
},
},
},
"code and ignored main": {
fileRoot: j("igmain"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
"unicode",
},
},
},
},
},
},
"code and ignored main with comment leader": {
fileRoot: j("igmainlong"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
"unicode",
},
},
},
},
},
},
"code, tests, and ignored main": {
fileRoot: j("igmaint"),
importRoot: "simple",
out: PackageTree{
ImportRoot: "simple",
Packages: map[string]PackageOrErr{
"simple": {
P: Package{
ImportPath: "simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"sort",
"unicode",
},
TestImports: []string{
"math/rand",
"strconv",
},
},
},
},
},
},
// New code allows this because it doesn't care if the code compiles (kinda) or not,
// so maybe this is actually not an error anymore?
//
// TODO re-enable this case after the full and proper ListPackages()
// refactor in #99
/*"two pkgs": {
fileRoot: j("twopkgs"),
importRoot: "twopkgs",
out: PackageTree{
ImportRoot: "twopkgs",
Packages: map[string]PackageOrErr{
"twopkgs": {
Err: &build.MultiplePackageError{
Dir: j("twopkgs"),
Packages: []string{"simple", "m1p"},
Files: []string{"a.go", "b.go"},
},
},
},
},
}, */
// imports a missing pkg
"missing import": {
fileRoot: j("missing"),
importRoot: "missing",
out: PackageTree{
ImportRoot: "missing",
Packages: map[string]PackageOrErr{
"missing": {
P: Package{
ImportPath: "missing",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"missing/missing",
"sort",
},
},
},
"missing/m1p": {
P: Package{
ImportPath: "missing/m1p",
CommentPath: "",
Name: "m1p",
Imports: []string{
"github.com/sdboyer/gps",
"os",
"sort",
},
},
},
},
},
},
// has disallowed dir names
"disallowed dirs": {
fileRoot: j("disallow"),
importRoot: "disallow",
out: PackageTree{
ImportRoot: "disallow",
Packages: map[string]PackageOrErr{
"disallow": {
P: Package{
ImportPath: "disallow",
CommentPath: "",
Name: "disallow",
Imports: []string{
"disallow/testdata",
"github.com/sdboyer/gps",
"sort",
},
},
},
// disallow/.m1p is ignored by listPackages...for now. Kept
// here commented because this might change again...
//"disallow/.m1p": {
//P: Package{
//ImportPath: "disallow/.m1p",
//CommentPath: "",
//Name: "m1p",
//Imports: []string{
//"github.com/sdboyer/gps",
//"os",
//"sort",
//},
//},
//},
"disallow/testdata": {
P: Package{
ImportPath: "disallow/testdata",
CommentPath: "",
Name: "testdata",
Imports: []string{
"hash",
},
},
},
},
},
},
// This case mostly exists for the PackageTree methods, but it does
// cover a bit of range
"varied": {
fileRoot: j("varied"),
importRoot: "varied",
out: PackageTree{
ImportRoot: "varied",
Packages: map[string]PackageOrErr{
"varied": {
P: Package{
ImportPath: "varied",
CommentPath: "",
Name: "main",
Imports: []string{
"net/http",
"varied/namemismatch",
"varied/otherpath",
"varied/simple",
},
},
},
"varied/otherpath": {
P: Package{
ImportPath: "varied/otherpath",
CommentPath: "",
Name: "otherpath",
Imports: []string{},
TestImports: []string{
"varied/m1p",
},
},
},
"varied/simple": {
P: Package{
ImportPath: "varied/simple",
CommentPath: "",
Name: "simple",
Imports: []string{
"github.com/sdboyer/gps",
"go/parser",
"varied/simple/another",
},
},
},
"varied/simple/another": {
P: Package{
ImportPath: "varied/simple/another",
CommentPath: "",
Name: "another",
Imports: []string{
"hash",
"varied/m1p",
},
TestImports: []string{
"encoding/binary",
},
},
},
"varied/namemismatch": {
P: Package{
ImportPath: "varied/namemismatch",
CommentPath: "",
Name: "nm",
Imports: []string{
"github.com/Masterminds/semver",
"os",
},
},
},
"varied/m1p": {
P: Package{
ImportPath: "varied/m1p",
CommentPath: "",
Name: "m1p",
Imports: []string{
"github.com/sdboyer/gps",
"os",
"sort",
},
},
},
},
},
},
}
for name, fix := range table {
if _, err := os.Stat(fix.fileRoot); err != nil {
t.Errorf("listPackages(%q): error on fileRoot %s: %s", name, fix.fileRoot, err)
continue
}
out, err := ListPackages(fix.fileRoot, fix.importRoot)
if err != nil && fix.err == nil {
t.Errorf("listPackages(%q): Received error but none expected: %s", name, err)
} else if fix.err != nil && err == nil {
t.Errorf("listPackages(%q): Error expected but none received", name)
} else if fix.err != nil && err != nil {
if !reflect.DeepEqual(fix.err, err) {
t.Errorf("listPackages(%q): Did not receive expected error:\n\t(GOT): %s\n\t(WNT): %s", name, err, fix.err)
}
}
if fix.out.ImportRoot != "" && fix.out.Packages != nil {
if !reflect.DeepEqual(out, fix.out) {
if fix.out.ImportRoot != out.ImportRoot {
t.Errorf("listPackages(%q): Expected ImportRoot %s, got %s", name, fix.out.ImportRoot, out.ImportRoot)
}
// overwrite the out one to see if we still have a real problem
out.ImportRoot = fix.out.ImportRoot
if !reflect.DeepEqual(out, fix.out) {
if len(fix.out.Packages) < 2 {
t.Errorf("listPackages(%q): Did not get expected PackageOrErrs:\n\t(GOT): %#v\n\t(WNT): %#v", name, out, fix.out)
} else {
seen := make(map[string]bool)
for path, perr := range fix.out.Packages {
seen[path] = true
if operr, exists := out.Packages[path]; !exists {
t.Errorf("listPackages(%q): Expected PackageOrErr for path %s was missing from output:\n\t%s", name, path, perr)
} else {
if !reflect.DeepEqual(perr, operr) {
t.Errorf("listPackages(%q): PkgOrErr for path %s was not as expected:\n\t(GOT): %#v\n\t(WNT): %#v", name, path, operr, perr)
}
}
}
for path, operr := range out.Packages {
if seen[path] {
continue
}
t.Errorf("listPackages(%q): Got PackageOrErr for path %s, but none was expected:\n\t%s", name, path, operr)
}
}
}
}
}
}
}
func TestListExternalImports(t *testing.T) {
// There's enough in the 'varied' test case to test most of what matters
vptree, err := ListPackages(filepath.Join(getwd(t), "_testdata", "src", "varied"), "varied")
if err != nil {
t.Fatalf("listPackages failed on varied test case: %s", err)
}
var expect []string
var name string
var ignore map[string]bool
var main, tests bool
validate := func() {
result := vptree.ExternalReach(main, tests, ignore).ListExternalImports()
if !reflect.DeepEqual(expect, result) {
t.Errorf("Wrong imports in %q case:\n\t(GOT): %s\n\t(WNT): %s", name, result, expect)
}
}
all := []string{
"encoding/binary",
"github.com/Masterminds/semver",
"github.com/sdboyer/gps",
"go/parser",
"hash",
"net/http",
"os",
"sort",
}
// helper to rewrite expect, except for a couple packages
//
// this makes it easier to see what we're taking out on each test
except := func(not ...string) {
expect = make([]string, len(all)-len(not))
drop := make(map[string]bool)
for _, npath := range not {
drop[npath] = true
}
k := 0
for _, path := range all {
if !drop[path] {
expect[k] = path
k++
}
}
}
// everything on
name = "simple"
except()
main, tests = true, true
validate()
// Now without tests, which should just cut one
name = "no tests"
tests = false
except("encoding/binary")
validate()
// Now skip main, which still just cuts out one
name = "no main"
main, tests = false, true
except("net/http")
validate()
// No test and no main, which should be additive
name = "no test, no main"
main, tests = false, false
except("net/http", "encoding/binary")
validate()
// now, the ignore tests. turn main and tests back on
main, tests = true, true
// start with non-matching
name = "non-matching ignore"
ignore = map[string]bool{
"nomatch": true,
}
except()
validate()
// should have the same effect as ignoring main
name = "ignore the root"
ignore = map[string]bool{
"varied": true,
}
except("net/http")
validate()
// now drop a more interesting one
name = "ignore simple"
ignore = map[string]bool{
"varied/simple": true,
}
// we get github.com/sdboyer/gps from m1p, too, so it should still be there
except("go/parser")
validate()
// now drop two
name = "ignore simple and namemismatch"
ignore = map[string]bool{
"varied/simple": true,
"varied/namemismatch": true,
}
except("go/parser", "github.com/Masterminds/semver")
validate()
// make sure tests and main play nice with ignore
name = "ignore simple and namemismatch, and no tests"
tests = false
except("go/parser", "github.com/Masterminds/semver", "encoding/binary")
validate()
name = "ignore simple and namemismatch, and no main"
main, tests = false, true
except("go/parser", "github.com/Masterminds/semver", "net/http")
validate()
name = "ignore simple and namemismatch, and no main or tests"
main, tests = false, false
except("go/parser", "github.com/Masterminds/semver", "net/http", "encoding/binary")
validate()
main, tests = true, true
// ignore two that should knock out gps
name = "ignore both importers"
ignore = map[string]bool{
"varied/simple": true,
"varied/m1p": true,
}
except("sort", "github.com/sdboyer/gps", "go/parser")
validate()
// finally, directly ignore some external packages
name = "ignore external"
ignore = map[string]bool{
"github.com/sdboyer/gps": true,
"go/parser": true,
"sort": true,
}
except("sort", "github.com/sdboyer/gps", "go/parser")
validate()
// The only thing varied *doesn't* cover is disallowed path patterns
ptree, err := ListPackages(filepath.Join(getwd(t), "_testdata", "src", "disallow"), "disallow")
if err != nil {
t.Fatalf("listPackages failed on disallow test case: %s", err)
}
result := ptree.ExternalReach(false, false, nil).ListExternalImports()
expect = []string{"github.com/sdboyer/gps", "hash", "sort"}
if !reflect.DeepEqual(expect, result) {
t.Errorf("Wrong imports in %q case:\n\t(GOT): %s\n\t(WNT): %s", name, result, expect)
}
}
func TestExternalReach(t *testing.T) {
// There's enough in the 'varied' test case to test most of what matters
vptree, err := ListPackages(filepath.Join(getwd(t), "_testdata", "src", "varied"), "varied")
if err != nil {
t.Fatalf("listPackages failed on varied test case: %s", err)
}
// Set up vars for validate closure
var expect map[string][]string
var name string
var main, tests bool
var ignore map[string]bool
validate := func() {
result := vptree.ExternalReach(main, tests, ignore)
if !reflect.DeepEqual(expect, result) {
seen := make(map[string]bool)
for ip, epkgs := range expect {
seen[ip] = true
if pkgs, exists := result[ip]; !exists {
t.Errorf("ver(%q): expected import path %s was not present in result", name, ip)
} else {
if !reflect.DeepEqual(pkgs, epkgs) {
t.Errorf("ver(%q): did not get expected package set for import path %s:\n\t(GOT): %s\n\t(WNT): %s", name, ip, pkgs, epkgs)
}
}
}
for ip, pkgs := range result {
if seen[ip] {
continue
}
t.Errorf("ver(%q): Got packages for import path %s, but none were expected:\n\t%s", name, ip, pkgs)
}
}
}
all := map[string][]string{
"varied": {"encoding/binary", "github.com/Masterminds/semver", "github.com/sdboyer/gps", "go/parser", "hash", "net/http", "os", "sort"},
"varied/m1p": {"github.com/sdboyer/gps", "os", "sort"},
"varied/namemismatch": {"github.com/Masterminds/semver", "os"},
"varied/otherpath": {"github.com/sdboyer/gps", "os", "sort"},
"varied/simple": {"encoding/binary", "github.com/sdboyer/gps", "go/parser", "hash", "os", "sort"},
"varied/simple/another": {"encoding/binary", "github.com/sdboyer/gps", "hash", "os", "sort"},
}
// build a map to validate the exception inputs. do this because shit is
// hard enough to keep track of that it's preferable not to have silent
// success if a typo creeps in and we're trying to except an import that
// isn't in a pkg in the first place
valid := make(map[string]map[string]bool)
for ip, expkgs := range all {
m := make(map[string]bool)
for _, pkg := range expkgs {
m[pkg] = true
}
valid[ip] = m
}
// helper to compose expect, excepting specific packages
//
// this makes it easier to see what we're taking out on each test
except := func(pkgig ...string) {
// reinit expect with everything from all
expect = make(map[string][]string)
for ip, expkgs := range all {
sl := make([]string, len(expkgs))
copy(sl, expkgs)
expect[ip] = sl
}
// now build the dropmap
drop := make(map[string]map[string]bool)
for _, igstr := range pkgig {
// split on space; first elem is import path to pkg, the rest are
// the imports to drop.
not := strings.Split(igstr, " ")
var ip string
ip, not = not[0], not[1:]
if _, exists := valid[ip]; !exists {
t.Fatalf("%s is not a package name we're working with, doofus", ip)
}
// if only a single elem was passed, though, drop the whole thing
if len(not) == 0 {
delete(expect, ip)
continue
}
m := make(map[string]bool)
for _, imp := range not {
if !valid[ip][imp] {
t.Fatalf("%s is not a reachable import of %s, even in the all case", imp, ip)
}
m[imp] = true
}
drop[ip] = m
}
for ip, pkgs := range expect {
var npkgs []string
for _, imp := range pkgs {
if !drop[ip][imp] {
npkgs = append(npkgs, imp)
}
}
expect[ip] = npkgs
}
}
// first, validate all
name = "all"
main, tests = true, true
except()
validate()
// turn off main pkgs, which necessarily doesn't affect anything else
name = "no main"
main = false
except("varied")
validate()
// ignoring the "varied" pkg has same effect as disabling main pkgs
name = "ignore root"
ignore = map[string]bool{
"varied": true,
}
main = true
validate()
// when we drop tests, varied/otherpath loses its link to varied/m1p and
// varied/simple/another loses its test import, which has a fairly big
// cascade
name = "no tests"
tests = false
ignore = nil
except(
"varied encoding/binary",
"varied/simple encoding/binary",
"varied/simple/another encoding/binary",
"varied/otherpath github.com/sdboyer/gps os sort",
)
// almost the same as previous, but varied just goes away completely
name = "no main or tests"
main = false
except(
"varied",
"varied/simple encoding/binary",
"varied/simple/another encoding/binary",
"varied/otherpath github.com/sdboyer/gps os sort",
)
validate()
// focus on ignores now, so reset main and tests
main, tests = true, true
// now, the fun stuff. punch a hole in the middle by cutting out
// varied/simple
name = "ignore varied/simple"
ignore = map[string]bool{
"varied/simple": true,
}
except(
// root pkg loses on everything in varied/simple/another
"varied hash encoding/binary go/parser",
"varied/simple",
)
validate()
// widen the hole by excluding otherpath
name = "ignore varied/{otherpath,simple}"
ignore = map[string]bool{
"varied/otherpath": true,
"varied/simple": true,
}
except(
// root pkg loses on everything in varied/simple/another and varied/m1p
"varied hash encoding/binary go/parser github.com/sdboyer/gps sort",
"varied/otherpath",
"varied/simple",
)
validate()
// remove namemismatch, though we're mostly beating a dead horse now
name = "ignore varied/{otherpath,simple,namemismatch}"
ignore["varied/namemismatch"] = true
except(
// root pkg loses on everything in varied/simple/another and varied/m1p
"varied hash encoding/binary go/parser github.com/sdboyer/gps sort os github.com/Masterminds/semver",
"varied/otherpath",
"varied/simple",
"varied/namemismatch",
)
validate()
}
var _ = map[string][]string{
"varied": {"encoding/binary", "github.com/Masterminds/semver", "github.com/sdboyer/gps", "go/parser", "hash", "net/http", "os", "sort"},
"varied/m1p": {"github.com/sdboyer/gps", "os", "sort"},
"varied/namemismatch": {"github.com/Masterminds/semver", "os"},
"varied/otherpath": {"github.com/sdboyer/gps", "os", "sort"},
"varied/simple": {"encoding/binary", "github.com/sdboyer/gps", "go/parser", "hash", "os", "sort"},
"varied/simple/another": {"encoding/binary", "github.com/sdboyer/gps", "hash", "os", "sort"},
}
func getwd(t *testing.T) string {
cwd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
return cwd
}
|
package yagnats
import (
"errors"
"sync"
"time"
"github.com/apcera/nats"
)
type ApceraWrapperNATSClient interface {
Ping() bool
Connect() error
Disconnect()
Publish(subject string, payload []byte) error
PublishWithReplyTo(subject, reply string, payload []byte) error
Subscribe(subject string, handler nats.MsgHandler) (*nats.Subscription, error)
SubscribeWithQueue(subject, queue string, handler nats.MsgHandler) (*nats.Subscription, error)
Unsubscribe(subscription *nats.Subscription) error
AddReconnectedCB(func(ApceraWrapperNATSClient))
}
type ApceraWrapper struct {
options nats.Options
conn *nats.Conn
reconnectcbs []func(ApceraWrapperNATSClient)
*sync.Mutex
}
func NewApceraClientWrapper(urls []string) *ApceraWrapper {
options := nats.DefaultOptions
options.Servers = urls
options.ReconnectWait = 500 * time.Millisecond
c := &ApceraWrapper{
options: options,
reconnectcbs: []func(ApceraWrapperNATSClient){},
Mutex: &sync.Mutex{},
}
c.options.ReconnectedCB = c.reconnectcb
return c
}
func (c *ApceraWrapper) reconnectcb(conn *nats.Conn) {
c.Lock()
callbacks := make([]func(ApceraWrapperNATSClient), len(c.reconnectcbs))
copy(callbacks, c.reconnectcbs)
c.Unlock()
for _, cb := range callbacks {
cb(c)
}
}
func (c *ApceraWrapper) AddReconnectedCB(handler func(ApceraWrapperNATSClient)) {
c.Lock()
c.reconnectcbs = append(c.reconnectcbs, handler)
c.Unlock()
}
func (c *ApceraWrapper) connection() *nats.Conn {
c.Lock()
conn := c.conn
c.Unlock()
return conn
}
func (c *ApceraWrapper) Connect() error {
c.Lock()
defer c.Unlock()
if c.conn != nil && !c.conn.IsClosed() {
return errors.New("already connected")
}
conn, err := c.options.Connect()
if err != nil {
return err
}
c.conn = conn
return nil
}
func (c *ApceraWrapper) Disconnect() {
conn := c.connection()
if conn != nil {
conn.Close()
}
}
func (c *ApceraWrapper) Publish(subject string, payload []byte) error {
conn := c.connection()
if conn == nil {
return errors.New("not connected")
}
return conn.Publish(subject, payload)
}
func (c *ApceraWrapper) Ping() bool {
conn := c.connection()
if conn == nil {
return false
}
err := conn.FlushTimeout(500 * time.Millisecond)
return err == nil
}
func (c *ApceraWrapper) PublishWithReplyTo(subject, reply string, payload []byte) error {
conn := c.connection()
if conn == nil {
return errors.New("not connected")
}
return conn.PublishRequest(subject, reply, payload)
}
func (c *ApceraWrapper) Subscribe(subject string, handler nats.MsgHandler) (*nats.Subscription, error) {
conn := c.connection()
if conn == nil {
return nil, errors.New("not connected")
}
return conn.Subscribe(subject, handler)
}
func (c *ApceraWrapper) SubscribeWithQueue(subject, queue string, handler nats.MsgHandler) (*nats.Subscription, error) {
conn := c.connection()
if conn == nil {
return nil, errors.New("not connected")
}
return conn.QueueSubscribe(subject, queue, handler)
}
func (c *ApceraWrapper) Unsubscribe(subscription *nats.Subscription) error {
return subscription.Unsubscribe()
}
Let apcera's nats client retry infinitely
package yagnats
import (
"errors"
"sync"
"time"
"github.com/apcera/nats"
)
type ApceraWrapperNATSClient interface {
Ping() bool
Connect() error
Disconnect()
Publish(subject string, payload []byte) error
PublishWithReplyTo(subject, reply string, payload []byte) error
Subscribe(subject string, handler nats.MsgHandler) (*nats.Subscription, error)
SubscribeWithQueue(subject, queue string, handler nats.MsgHandler) (*nats.Subscription, error)
Unsubscribe(subscription *nats.Subscription) error
AddReconnectedCB(func(ApceraWrapperNATSClient))
}
type ApceraWrapper struct {
options nats.Options
conn *nats.Conn
reconnectcbs []func(ApceraWrapperNATSClient)
*sync.Mutex
}
func NewApceraClientWrapper(urls []string) *ApceraWrapper {
options := nats.DefaultOptions
options.Servers = urls
options.ReconnectWait = 500 * time.Millisecond
options.MaxReconnect = -1
c := &ApceraWrapper{
options: options,
reconnectcbs: []func(ApceraWrapperNATSClient){},
Mutex: &sync.Mutex{},
}
c.options.ReconnectedCB = c.reconnectcb
return c
}
func (c *ApceraWrapper) reconnectcb(conn *nats.Conn) {
c.Lock()
callbacks := make([]func(ApceraWrapperNATSClient), len(c.reconnectcbs))
copy(callbacks, c.reconnectcbs)
c.Unlock()
for _, cb := range callbacks {
cb(c)
}
}
func (c *ApceraWrapper) AddReconnectedCB(handler func(ApceraWrapperNATSClient)) {
c.Lock()
c.reconnectcbs = append(c.reconnectcbs, handler)
c.Unlock()
}
func (c *ApceraWrapper) connection() *nats.Conn {
c.Lock()
conn := c.conn
c.Unlock()
return conn
}
func (c *ApceraWrapper) Connect() error {
c.Lock()
defer c.Unlock()
if c.conn != nil && !c.conn.IsClosed() {
return errors.New("already connected")
}
conn, err := c.options.Connect()
if err != nil {
return err
}
c.conn = conn
return nil
}
func (c *ApceraWrapper) Disconnect() {
conn := c.connection()
if conn != nil {
conn.Close()
}
}
func (c *ApceraWrapper) Publish(subject string, payload []byte) error {
conn := c.connection()
if conn == nil {
return errors.New("not connected")
}
return conn.Publish(subject, payload)
}
func (c *ApceraWrapper) Ping() bool {
conn := c.connection()
if conn == nil {
return false
}
err := conn.FlushTimeout(500 * time.Millisecond)
return err == nil
}
func (c *ApceraWrapper) PublishWithReplyTo(subject, reply string, payload []byte) error {
conn := c.connection()
if conn == nil {
return errors.New("not connected")
}
return conn.PublishRequest(subject, reply, payload)
}
func (c *ApceraWrapper) Subscribe(subject string, handler nats.MsgHandler) (*nats.Subscription, error) {
conn := c.connection()
if conn == nil {
return nil, errors.New("not connected")
}
return conn.Subscribe(subject, handler)
}
func (c *ApceraWrapper) SubscribeWithQueue(subject, queue string, handler nats.MsgHandler) (*nats.Subscription, error) {
conn := c.connection()
if conn == nil {
return nil, errors.New("not connected")
}
return conn.QueueSubscribe(subject, queue, handler)
}
func (c *ApceraWrapper) Unsubscribe(subscription *nats.Subscription) error {
return subscription.Unsubscribe()
}
|
package main
import (
"log"
"os"
"path/filepath"
"strings"
"github.com/fsnotify/fsnotify"
)
func scan(path string) ([]string, error) {
var folders []string
folder, err := os.Open(path)
if err != nil {
return nil, err
}
defer folder.Close()
files, err := folder.Readdir(-1)
if err != nil {
panic(err)
}
for _, fi := range files {
// skip all dot files/folders
if fi.Name()[0] == '.' {
continue
}
if fi.IsDir() {
folders = append(folders, path+"/"+fi.Name())
subfolder, err := scan(path + "/" + fi.Name())
if err != nil {
panic(err)
}
folders = append(folders, subfolder...)
}
}
return folders, nil
}
func watchDirs(dirs, exts string, restart chan bool) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
if dirs == "" {
dirs = "."
}
allDirs := strings.Split(dirs, ",")
for _, dd := range allDirs {
path, err := filepath.Abs(dd)
if err != nil {
log.Fatal(err)
}
if err = watcher.Add(path); err != nil {
log.Fatal(err)
}
folders, err := scan(path)
if err != nil {
log.Fatal(err)
}
for _, f := range folders {
if err = watcher.Add(f); err != nil {
log.Fatal(err)
}
}
}
allExts := strings.Split(exts, ",")
for {
select {
case event := <-watcher.Events:
//log.Println("event:", event)
if event.Op&fsnotify.Write == fsnotify.Write {
if len(allExts) > 0 {
for _, ext := range allExts {
if strings.HasSuffix(event.Name, ext) {
restart <- true
break
}
}
} else {
restart <- true
}
}
case err := <-watcher.Errors:
log.Println("error:", err)
}
}
}
#5 Listen for all events. This will include create, remove, rename, write, attribute change
package main
import (
"log"
"os"
"path/filepath"
"strings"
"github.com/fsnotify/fsnotify"
)
func scan(path string) ([]string, error) {
var folders []string
folder, err := os.Open(path)
if err != nil {
return nil, err
}
defer folder.Close()
files, err := folder.Readdir(-1)
if err != nil {
panic(err)
}
for _, fi := range files {
// skip all dot files/folders
if fi.Name()[0] == '.' {
continue
}
if fi.IsDir() {
folders = append(folders, path+"/"+fi.Name())
subfolder, err := scan(path + "/" + fi.Name())
if err != nil {
panic(err)
}
folders = append(folders, subfolder...)
}
}
return folders, nil
}
func watchDirs(dirs, exts string, restart chan bool) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
if dirs == "" {
dirs = "."
}
allDirs := strings.Split(dirs, ",")
for _, dd := range allDirs {
path, err := filepath.Abs(dd)
if err != nil {
log.Fatal(err)
}
if err = watcher.Add(path); err != nil {
log.Fatal(err)
}
folders, err := scan(path)
if err != nil {
log.Fatal(err)
}
for _, f := range folders {
if err = watcher.Add(f); err != nil {
log.Fatal(err)
}
}
}
allExts := strings.Split(exts, ",")
for {
select {
case event := <-watcher.Events:
if len(allExts) > 0 {
for _, ext := range allExts {
if strings.HasSuffix(event.Name, ext) {
restart <- true
break
}
}
} else {
restart <- true
}
case err := <-watcher.Errors:
log.Println("error:", err)
}
}
}
|
package linux
import (
"context"
"log"
"github.com/go-ble/ble"
"github.com/go-ble/ble/linux/att"
"github.com/go-ble/ble/linux/gatt"
"github.com/go-ble/ble/linux/hci"
"github.com/pkg/errors"
)
// NewDevice returns the default HCI device.
func NewDevice() (*Device, error) {
return NewDeviceWithName("Gopher")
}
// NewDeviceWithName returns the default HCI device.
func NewDeviceWithName(name string) (*Device, error) {
return NewDeviceWithNameAndHandler(name, nil)
}
func NewDeviceWithNameAndHandler(name string, handler ble.NotifyHandler) (*Device, error) {
dev, err := hci.NewHCI()
if err != nil {
return nil, errors.Wrap(err, "can't create hci")
}
if err = dev.Init(); err != nil {
return nil, errors.Wrap(err, "can't init hci")
}
srv, err := gatt.NewServerWithNameAndHandler(name, handler)
if err != nil {
return nil, errors.Wrap(err, "can't create server")
}
// mtu := ble.DefaultMTU
mtu := ble.MaxMTU // TODO: get this from user using Option.
if mtu > ble.MaxMTU {
return nil, errors.Wrapf(err, "maximum ATT_MTU is %d", ble.MaxMTU)
}
go loop(dev, srv, mtu)
return &Device{HCI: dev, Server: srv}, nil
}
func loop(dev *hci.HCI, s *gatt.Server, mtu int) {
for {
l2c, err := dev.Accept()
if err != nil {
log.Printf("can't accept: %s", err)
return
}
// Initialize the per-connection cccd values.
l2c.SetContext(context.WithValue(l2c.Context(), ble.ContextKeyCCC, make(map[uint16]uint16)))
l2c.SetRxMTU(mtu)
s.Lock()
as, err := att.NewServer(s.DB(), l2c)
s.Unlock()
if err != nil {
log.Printf("can't create ATT server: %s", err)
continue
}
go as.Loop()
}
}
// Device ...
type Device struct {
HCI *hci.HCI
Server *gatt.Server
}
// AddService adds a service to database.
func (d *Device) AddService(svc *ble.Service) error {
return d.Server.AddService(svc)
}
// RemoveAllServices removes all services that are currently in the database.
func (d *Device) RemoveAllServices() error {
return d.Server.RemoveAllServices()
}
// SetServices set the specified service to the database.
// It removes all currently added services, if any.
func (d *Device) SetServices(svcs []*ble.Service) error {
return d.Server.SetServices(svcs)
}
// Stop stops gatt server.
func (d *Device) Stop() error {
return d.HCI.Close()
}
func (d *Device) Advertise(ctx context.Context, adv ble.Advertisement) error {
if err := d.HCI.AdvertiseAdv(adv); err != nil {
return err
}
<-ctx.Done()
d.HCI.StopAdvertising()
return ctx.Err()
}
// AdvertiseNameAndServices advertises device name, and specified service UUIDs.
// It tres to fit the UUIDs in the advertising packet as much as possible.
// If name doesn't fit in the advertising packet, it will be put in scan response.
func (d *Device) AdvertiseNameAndServices(ctx context.Context, name string, uuids ...ble.UUID) error {
if err := d.HCI.AdvertiseNameAndServices(name, uuids...); err != nil {
return err
}
<-ctx.Done()
d.HCI.StopAdvertising()
return ctx.Err()
}
// AdvertiseMfgData avertises the given manufacturer data.
func (d *Device) AdvertiseMfgData(ctx context.Context, id uint16, b []byte) error {
if err := d.HCI.AdvertiseMfgData(id, b); err != nil {
return err
}
<-ctx.Done()
d.HCI.StopAdvertising()
return ctx.Err()
}
// AdvertiseServiceData16 advertises data associated with a 16bit service uuid
func (d *Device) AdvertiseServiceData16(ctx context.Context, id uint16, b []byte) error {
if err := d.HCI.AdvertiseServiceData16(id, b); err != nil {
return err
}
<-ctx.Done()
d.HCI.StopAdvertising()
return ctx.Err()
}
// AdvertiseIBeaconData advertise iBeacon with given manufacturer data.
func (d *Device) AdvertiseIBeaconData(ctx context.Context, b []byte) error {
if err := d.HCI.AdvertiseIBeaconData(b); err != nil {
return err
}
<-ctx.Done()
d.HCI.StopAdvertising()
return ctx.Err()
}
// AdvertiseIBeacon advertises iBeacon with specified parameters.
func (d *Device) AdvertiseIBeacon(ctx context.Context, u ble.UUID, major, minor uint16, pwr int8) error {
if err := d.HCI.AdvertiseIBeacon(u, major, minor, pwr); err != nil {
return err
}
<-ctx.Done()
d.HCI.StopAdvertising()
return ctx.Err()
}
// Scan starts scanning. Duplicated advertisements will be filtered out if allowDup is set to false.
func (d *Device) Scan(ctx context.Context, allowDup bool, h ble.AdvHandler) error {
if err := d.HCI.SetAdvHandler(h); err != nil {
return err
}
if err := d.HCI.Scan(allowDup); err != nil {
return err
}
<-ctx.Done()
d.HCI.StopScanning()
return ctx.Err()
}
// Dial ...
func (d *Device) Dial(ctx context.Context, a ble.Addr) (ble.Client, error) {
// d.HCI.Dial is a blocking call, although most of time it should return immediately.
// But in case passing wrong device address or the device went non-connectable, it blocks.
cln, err := d.HCI.Dial(ctx, a)
return cln, errors.Wrap(err, "can't dial")
}
// Address returns the listener's device address.
func (d *Device) Address() ble.Addr {
return d.HCI.Addr()
}
linux - Don't log "can't accept" error on close. (#8)
This is a hack which silences an extraneous log message. The message
gets reported due to an apparent race condition involving the
`sktLoop()` function in `linux/hci/hci.go`. This function implements a
socket-read loop that only returns on error. It is called as a
Goroutine, and typically continues to run until the process terminates.
If client code closes the owning `Device` at an inopportune time
(presumably during the socket read), the `Read()` call returns an
`io.EOF` error, which causes `NewDevice()` to log the following message:
can't accept: skt: EOF
Since the API permits the user to close the device at any time, this
error message is extraneous.
This commit simply inhibits the logging of the error message in case of
EOF. This is not a long-term solution, but it does fix the visible
issue.
package linux
import (
"context"
"io"
"log"
"github.com/go-ble/ble"
"github.com/go-ble/ble/linux/att"
"github.com/go-ble/ble/linux/gatt"
"github.com/go-ble/ble/linux/hci"
"github.com/pkg/errors"
)
// NewDevice returns the default HCI device.
func NewDevice() (*Device, error) {
return NewDeviceWithName("Gopher")
}
// NewDeviceWithName returns the default HCI device.
func NewDeviceWithName(name string) (*Device, error) {
return NewDeviceWithNameAndHandler(name, nil)
}
func NewDeviceWithNameAndHandler(name string, handler ble.NotifyHandler) (*Device, error) {
dev, err := hci.NewHCI()
if err != nil {
return nil, errors.Wrap(err, "can't create hci")
}
if err = dev.Init(); err != nil {
return nil, errors.Wrap(err, "can't init hci")
}
srv, err := gatt.NewServerWithNameAndHandler(name, handler)
if err != nil {
return nil, errors.Wrap(err, "can't create server")
}
// mtu := ble.DefaultMTU
mtu := ble.MaxMTU // TODO: get this from user using Option.
if mtu > ble.MaxMTU {
return nil, errors.Wrapf(err, "maximum ATT_MTU is %d", ble.MaxMTU)
}
go loop(dev, srv, mtu)
return &Device{HCI: dev, Server: srv}, nil
}
func loop(dev *hci.HCI, s *gatt.Server, mtu int) {
for {
l2c, err := dev.Accept()
if err != nil {
// An EOF error indicates that the HCI socket was closed during
// the read. Don't report this as an error.
if err != io.EOF {
log.Printf("can't accept: %s", err)
}
return
}
// Initialize the per-connection cccd values.
l2c.SetContext(context.WithValue(l2c.Context(), ble.ContextKeyCCC, make(map[uint16]uint16)))
l2c.SetRxMTU(mtu)
s.Lock()
as, err := att.NewServer(s.DB(), l2c)
s.Unlock()
if err != nil {
log.Printf("can't create ATT server: %s", err)
continue
}
go as.Loop()
}
}
// Device ...
type Device struct {
HCI *hci.HCI
Server *gatt.Server
}
// AddService adds a service to database.
func (d *Device) AddService(svc *ble.Service) error {
return d.Server.AddService(svc)
}
// RemoveAllServices removes all services that are currently in the database.
func (d *Device) RemoveAllServices() error {
return d.Server.RemoveAllServices()
}
// SetServices set the specified service to the database.
// It removes all currently added services, if any.
func (d *Device) SetServices(svcs []*ble.Service) error {
return d.Server.SetServices(svcs)
}
// Stop stops gatt server.
func (d *Device) Stop() error {
return d.HCI.Close()
}
func (d *Device) Advertise(ctx context.Context, adv ble.Advertisement) error {
if err := d.HCI.AdvertiseAdv(adv); err != nil {
return err
}
<-ctx.Done()
d.HCI.StopAdvertising()
return ctx.Err()
}
// AdvertiseNameAndServices advertises device name, and specified service UUIDs.
// It tres to fit the UUIDs in the advertising packet as much as possible.
// If name doesn't fit in the advertising packet, it will be put in scan response.
func (d *Device) AdvertiseNameAndServices(ctx context.Context, name string, uuids ...ble.UUID) error {
if err := d.HCI.AdvertiseNameAndServices(name, uuids...); err != nil {
return err
}
<-ctx.Done()
d.HCI.StopAdvertising()
return ctx.Err()
}
// AdvertiseMfgData avertises the given manufacturer data.
func (d *Device) AdvertiseMfgData(ctx context.Context, id uint16, b []byte) error {
if err := d.HCI.AdvertiseMfgData(id, b); err != nil {
return err
}
<-ctx.Done()
d.HCI.StopAdvertising()
return ctx.Err()
}
// AdvertiseServiceData16 advertises data associated with a 16bit service uuid
func (d *Device) AdvertiseServiceData16(ctx context.Context, id uint16, b []byte) error {
if err := d.HCI.AdvertiseServiceData16(id, b); err != nil {
return err
}
<-ctx.Done()
d.HCI.StopAdvertising()
return ctx.Err()
}
// AdvertiseIBeaconData advertise iBeacon with given manufacturer data.
func (d *Device) AdvertiseIBeaconData(ctx context.Context, b []byte) error {
if err := d.HCI.AdvertiseIBeaconData(b); err != nil {
return err
}
<-ctx.Done()
d.HCI.StopAdvertising()
return ctx.Err()
}
// AdvertiseIBeacon advertises iBeacon with specified parameters.
func (d *Device) AdvertiseIBeacon(ctx context.Context, u ble.UUID, major, minor uint16, pwr int8) error {
if err := d.HCI.AdvertiseIBeacon(u, major, minor, pwr); err != nil {
return err
}
<-ctx.Done()
d.HCI.StopAdvertising()
return ctx.Err()
}
// Scan starts scanning. Duplicated advertisements will be filtered out if allowDup is set to false.
func (d *Device) Scan(ctx context.Context, allowDup bool, h ble.AdvHandler) error {
if err := d.HCI.SetAdvHandler(h); err != nil {
return err
}
if err := d.HCI.Scan(allowDup); err != nil {
return err
}
<-ctx.Done()
d.HCI.StopScanning()
return ctx.Err()
}
// Dial ...
func (d *Device) Dial(ctx context.Context, a ble.Addr) (ble.Client, error) {
// d.HCI.Dial is a blocking call, although most of time it should return immediately.
// But in case passing wrong device address or the device went non-connectable, it blocks.
cln, err := d.HCI.Dial(ctx, a)
return cln, errors.Wrap(err, "can't dial")
}
// Address returns the listener's device address.
func (d *Device) Address() ble.Addr {
return d.HCI.Addr()
}
|
// BaruwaAPI Golang bindings for Baruwa REST API
// Copyright (C) 2019 Andrew Colin Kissa <andrew@topdog.za.net>
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package api
const (
// APIVersion of Baruwa API
APIVersion = "v1"
// Version of this library
Version = "0.0.1"
timeFmt = "2006:01:02:15:04:05"
endpointError = "The endpoint param is required"
userIDError = "The userID param should be > 0"
aliasIDError = "The aliasID param should be > 0"
domainIDError = "The domainID param should be > 0"
serverIDError = "The serverID param should be > 0"
settingsIDError = "The settingsID param should be > 0"
aliasSIDError = "The alias.ID param should be > 0"
serverSIDError = "The server.ID param should be > 0"
settingsSIDError = "The settings.ID param should be > 0"
userParamError = "The user param is required"
aliasParamError = "The alias param is required"
serverParamError = "The server param is required"
settingsParamError = "The settings param is required"
clientIDError = "clientID is required"
clientSecretError = "secret is required"
pwFormError = "The form param is required"
)
FET: Add more constants
// BaruwaAPI Golang bindings for Baruwa REST API
// Copyright (C) 2019 Andrew Colin Kissa <andrew@topdog.za.net>
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package api
const (
// APIVersion of Baruwa API
APIVersion = "v1"
// Version of this library
Version = "0.0.1"
timeFmt = "2006:01:02:15:04:05"
endpointError = "The endpoint param is required"
userIDError = "The userID param should be > 0"
aliasIDError = "The aliasID param should be > 0"
domainIDError = "The domainID param should be > 0"
serverIDError = "The serverID param should be > 0"
settingsIDError = "The settingsID param should be > 0"
aliasSIDError = "The alias.ID param should be > 0"
serverSIDError = "The server.ID param should be > 0"
settingsSIDError = "The settings.ID param should be > 0"
domainSIDError = "The domain.ID param should be > 0"
userParamError = "The user param is required"
aliasParamError = "The alias param is required"
serverParamError = "The server param is required"
settingsParamError = "The settings param is required"
domainNameParamError = "The domainName param is required"
domainParamError = "The domain param is required"
clientIDError = "clientID is required"
clientSecretError = "secret is required"
pwFormError = "The form param is required"
)
|
// Package local provides a filesystem interface
package local
import (
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"time"
"unicode/utf8"
"golang.org/x/text/unicode/norm"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// Constants
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "local",
Description: "Local Disk",
NewFs: NewFs,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Optional: true,
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names",
}},
}},
}
fs.Register(fsi)
}
// Fs represents a local filesystem rooted at root
type Fs struct {
name string // the name of the remote
root string // The root directory (OS path)
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
wmu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
nounc bool // Skip UNC conversion on Windows
}
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path - properly UTF-8 encoded - for rclone
path string // The local path - may not be properly UTF-8 encoded - for OS
info os.FileInfo // Interface for file info (always present)
hashes map[fs.HashType]string // Hashes
}
// ------------------------------------------------------------
// NewFs constructs an Fs from the path
func NewFs(name, root string) (fs.Fs, error) {
var err error
nounc, _ := fs.ConfigFile.GetValue(name, "nounc")
f := &Fs{
name: name,
warned: make(map[string]struct{}),
nounc: nounc == "true",
dev: devUnset,
}
f.root = f.cleanPath(root)
// Check to see if this points to a file
fi, err := os.Lstat(f.root)
if err == nil {
f.dev = readDevice(fi)
}
if err == nil && fi.Mode().IsRegular() {
// It is a file, so use the parent as the root
f.root, _ = getDirFile(f.root)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Local file system at %s", f.root)
}
// newObject makes a half completed Object
func (f *Fs) newObject(remote string) *Object {
dstPath := f.cleanPath(filepath.Join(f.root, remote))
remote = f.cleanRemote(remote)
return &Object{
fs: f,
remote: remote,
path: dstPath,
}
}
// Return an Object from a path
//
// May return nil if an error occurred
func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, error) {
o := f.newObject(remote)
if info != nil {
o.info = info
} else {
err := o.lstat()
if err != nil {
if os.IsNotExist(err) {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// listArgs is the arguments that a new list takes
type listArgs struct {
remote string
dirpath string
level int
}
// list traverses the directory passed in, listing to out.
// it returns a boolean whether it is finished or not.
func (f *Fs) list(out fs.ListOpts, remote string, dirpath string, level int) (subdirs []listArgs) {
fd, err := os.Open(dirpath)
if err != nil {
out.SetError(errors.Wrapf(err, "failed to open directory %q", dirpath))
return nil
}
defer func() {
err := fd.Close()
if err != nil {
out.SetError(errors.Wrapf(err, "failed to close directory %q:", dirpath))
}
}()
for {
fis, err := fd.Readdir(1024)
if err == io.EOF && len(fis) == 0 {
break
}
if err != nil {
out.SetError(errors.Wrapf(err, "failed to read directory %q", dirpath))
return nil
}
for _, fi := range fis {
name := fi.Name()
newRemote := path.Join(remote, name)
newPath := filepath.Join(dirpath, name)
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (fi.Mode()&os.ModeSymlink) == 0 && out.IncludeDirectory(newRemote) {
dir := &fs.Dir{
Name: f.cleanRemote(newRemote),
When: fi.ModTime(),
Bytes: 0,
Count: 0,
}
if out.AddDir(dir) {
return nil
}
if level > 0 && f.dev == readDevice(fi) {
subdirs = append(subdirs, listArgs{remote: newRemote, dirpath: newPath, level: level - 1})
}
}
} else {
fso, err := f.newObjectWithInfo(newRemote, fi)
if err != nil {
out.SetError(err)
return nil
}
if fso.Storable() && out.Add(fso) {
return nil
}
}
}
}
return subdirs
}
// List the path into out
//
// Ignores everything which isn't Storable, eg links etc
func (f *Fs) List(out fs.ListOpts, dir string) {
defer out.Finished()
root := f.cleanPath(filepath.Join(f.root, dir))
dir = f.cleanRemote(dir)
_, err := os.Stat(root)
if err != nil {
out.SetError(fs.ErrorDirNotFound)
return
}
in := make(chan listArgs, out.Buffer())
var wg sync.WaitGroup // sync closing of go routines
var traversing sync.WaitGroup // running directory traversals
// Start the process
traversing.Add(1)
in <- listArgs{remote: dir, dirpath: root, level: out.Level() - 1}
for i := 0; i < fs.Config.Checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for job := range in {
if out.IsFinished() {
continue
}
newJobs := f.list(out, job.remote, job.dirpath, job.level)
// Now we have traversed this directory, send
// these ones off for traversal
if len(newJobs) != 0 {
traversing.Add(len(newJobs))
go func() {
for _, newJob := range newJobs {
in <- newJob
}
}()
}
traversing.Done()
}
}()
}
// Wait for traversal to finish
traversing.Wait()
close(in)
wg.Wait()
}
// cleanRemote makes string a valid UTF-8 string for remote strings.
//
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
// It also normalises the UTF-8 and converts the slashes if necessary.
func (f *Fs) cleanRemote(name string) string {
if !utf8.ValidString(name) {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
name = string([]rune(name))
}
name = norm.NFC.String(name)
name = filepath.ToSlash(name)
return name
}
// Put the Object to the local filesystem
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
remote := src.Remote()
// Temporary Object under construction - info filled in by Update()
o := f.newObject(remote)
err := o.Update(in, src)
if err != nil {
return nil, err
}
return o, nil
}
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
root := path.Join(f.root, dir)
err := os.MkdirAll(root, 0777)
if err != nil {
return err
}
if dir == "" {
fi, err := os.Lstat(root)
if err != nil {
return err
}
f.dev = readDevice(fi)
}
return nil
}
// Rmdir removes the directory
//
// If it isn't empty it will return an error
func (f *Fs) Rmdir(dir string) error {
return os.Remove(path.Join(f.root, dir))
}
// Precision of the file system
func (f *Fs) Precision() (precision time.Duration) {
f.precisionOk.Do(func() {
f.precision = f.readPrecision()
})
return f.precision
}
// Read the precision
func (f *Fs) readPrecision() (precision time.Duration) {
// Default precision of 1s
precision = time.Second
// Create temporary file and test it
fd, err := ioutil.TempFile("", "rclone")
if err != nil {
// If failed return 1s
// fmt.Println("Failed to create temp file", err)
return time.Second
}
path := fd.Name()
// fmt.Println("Created temp file", path)
err = fd.Close()
if err != nil {
return time.Second
}
// Delete it on return
defer func() {
// fmt.Println("Remove temp file")
_ = os.Remove(path) // ignore error
}()
// Find the minimum duration we can detect
for duration := time.Duration(1); duration < time.Second; duration *= 10 {
// Current time with delta
t := time.Unix(time.Now().Unix(), int64(duration))
err := os.Chtimes(path, t, t)
if err != nil {
// fmt.Println("Failed to Chtimes", err)
break
}
// Read the actual time back
fi, err := os.Stat(path)
if err != nil {
// fmt.Println("Failed to Stat", err)
break
}
// If it matches - have found the precision
// fmt.Println("compare", fi.ModTime(), t)
if fi.ModTime() == t {
// fmt.Println("Precision detected as", duration)
return duration
}
}
return
}
// Purge deletes all the files and directories
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
fi, err := os.Lstat(f.root)
if err != nil {
return err
}
if !fi.Mode().IsDir() {
return errors.Errorf("can't purge non directory: %q", f.root)
}
return os.RemoveAll(f.root)
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Temporary Object under construction
dstObj := f.newObject(remote)
// Check it is a file if it exists
err := dstObj.lstat()
if os.IsNotExist(err) {
// OK
} else if err != nil {
return nil, err
} else if !dstObj.info.Mode().IsRegular() {
// It isn't a file
return nil, errors.New("can't move file onto non-file")
}
// Create destination
err = dstObj.mkdirAll()
if err != nil {
return nil, err
}
// Do the move
err = os.Rename(srcObj.path, dstObj.path)
if err != nil {
return nil, err
}
// Update the info
err = dstObj.lstat()
if err != nil {
return nil, err
}
return dstObj, nil
}
// DirMove moves src directory to this remote using server side move
// operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debug(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
// Check if source exists
sstat, err := os.Lstat(srcFs.root)
if err != nil {
return err
}
// And is a directory
if !sstat.IsDir() {
return fs.ErrorCantDirMove
}
// Check if destination exists
_, err = os.Lstat(f.root)
if !os.IsNotExist(err) {
return fs.ErrorDirExists
}
// Do the move
return os.Rename(srcFs.root, f.root)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.SupportedHashes
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(r fs.HashType) (string, error) {
// Check that the underlying file hasn't changed
oldtime := o.info.ModTime()
oldsize := o.info.Size()
err := o.lstat()
if err != nil {
return "", errors.Wrap(err, "hash: failed to stat")
}
if !o.info.ModTime().Equal(oldtime) || oldsize != o.info.Size() {
o.hashes = nil
}
if o.hashes == nil {
o.hashes = make(map[fs.HashType]string)
in, err := os.Open(o.path)
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
}
o.hashes, err = fs.HashStream(in)
closeErr := in.Close()
if err != nil {
return "", errors.Wrap(err, "hash: failed to read")
}
if closeErr != nil {
return "", errors.Wrap(closeErr, "hash: failed to close")
}
}
return o.hashes[r], nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.info.Size()
}
// ModTime returns the modification time of the object
func (o *Object) ModTime() time.Time {
return o.info.ModTime()
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
err := os.Chtimes(o.path, modTime, modTime)
if err != nil {
return err
}
// Re-read metadata
return o.lstat()
}
// Storable returns a boolean showing if this object is storable
func (o *Object) Storable() bool {
// Check for control characters in the remote name and show non storable
for _, c := range o.Remote() {
if c >= 0x00 && c < 0x20 || c == 0x7F {
fs.Debug(o.fs, "Can't store file with control characters: %q", o.Remote())
return false
}
}
mode := o.info.Mode()
// On windows a file with os.ModeSymlink represents a file with reparse points
if runtime.GOOS == "windows" && (mode&os.ModeSymlink) != 0 {
fs.Debug(o, "Clearing symlink bit to allow a file with reparse points to be copied")
mode &^= os.ModeSymlink
}
if mode&(os.ModeSymlink|os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
fs.Debug(o, "Can't transfer non file/directory")
return false
} else if mode&os.ModeDir != 0 {
// fs.Debug(o, "Skipping directory")
return false
}
return true
}
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
// object that is read
type localOpenFile struct {
o *Object // object that is open
in io.ReadCloser // handle we are wrapping
hash *fs.MultiHasher // currently accumulating hashes
}
// Read bytes from the object - see io.Reader
func (file *localOpenFile) Read(p []byte) (n int, err error) {
n, err = file.in.Read(p)
if n > 0 {
// Hash routines never return an error
_, _ = file.hash.Write(p[:n])
}
return
}
// Close the object and update the hashes
func (file *localOpenFile) Close() (err error) {
err = file.in.Close()
if err == nil {
if file.hash.Size() == file.o.Size() {
file.o.hashes = file.hash.Sums()
}
}
return err
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset int64
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
default:
if option.Mandatory() {
fs.Log(o, "Unsupported mandatory option: %v", option)
}
}
}
fd, err := os.Open(o.path)
if err != nil {
return
}
if offset != 0 {
// seek the object
_, err = fd.Seek(offset, 0)
// don't attempt to make checksums
return fd, err
}
// Update the md5sum as we go along
in = &localOpenFile{
o: o,
in: fd,
hash: fs.NewMultiHasher(),
}
return in, nil
}
// mkdirAll makes all the directories needed to store the object
func (o *Object) mkdirAll() error {
dir, _ := getDirFile(o.path)
return os.MkdirAll(dir, 0777)
}
// Update the object from in with modTime and size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
err := o.mkdirAll()
if err != nil {
return err
}
out, err := os.Create(o.path)
if err != nil {
return err
}
// Calculate the hash of the object we are reading as we go along
hash := fs.NewMultiHasher()
in = io.TeeReader(in, hash)
_, err = io.Copy(out, in)
closeErr := out.Close()
if err == nil {
err = closeErr
}
if err != nil {
fs.Debug(o, "Removing partially written file on error: %v", err)
if removeErr := os.Remove(o.path); removeErr != nil {
fs.ErrorLog(o, "Failed to remove partially written file: %v", removeErr)
}
return err
}
// All successful so update the hashes
o.hashes = hash.Sums()
// Set the mtime
err = o.SetModTime(src.ModTime())
if err != nil {
return err
}
// ReRead info now that we have finished
return o.lstat()
}
// Stat a Object into info
func (o *Object) lstat() error {
info, err := os.Lstat(o.path)
o.info = info
return err
}
// Remove an object
func (o *Object) Remove() error {
return os.Remove(o.path)
}
// Return the directory and file from an OS path. Assumes
// os.PathSeparator is used.
func getDirFile(s string) (string, string) {
i := strings.LastIndex(s, string(os.PathSeparator))
dir, file := s[:i], s[i+1:]
if dir == "" {
dir = string(os.PathSeparator)
}
return dir, file
}
// cleanPathFragment cleans an OS path fragment which is part of a
// bigger path and not necessarily absolute
func cleanPathFragment(s string) string {
if s == "" {
return s
}
s = filepath.Clean(s)
if runtime.GOOS == "windows" {
s = strings.Replace(s, `/`, `\`, -1)
}
return s
}
// cleanPath cleans and makes absolute the path passed in and returns
// an OS path.
//
// The input might be in OS form or rclone form or a mixture, but the
// output is in OS form.
//
// On windows it makes the path UNC also and replaces any characters
// Windows can't deal with with their replacements.
func (f *Fs) cleanPath(s string) string {
s = cleanPathFragment(s)
if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
if !f.nounc {
// Convert to UNC
s = uncPath(s)
}
s = cleanWindowsName(f, s)
} else {
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
}
return s
}
// Pattern to match a windows absolute path: "c:\" and similar
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
// uncPath converts an absolute Windows path
// to a UNC long path.
func uncPath(s string) string {
// UNC can NOT use "/", so convert all to "\"
s = strings.Replace(s, `/`, `\`, -1)
// If prefix is "\\", we already have a UNC path or server.
if strings.HasPrefix(s, `\\`) {
// If already long path, just keep it
if strings.HasPrefix(s, `\\?\`) {
return s
}
// Trim "\\" from path and add UNC prefix.
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
}
if isAbsWinDrive.MatchString(s) {
return `\\?\` + s
}
return s
}
// cleanWindowsName will clean invalid Windows characters replacing them with _
func cleanWindowsName(f *Fs, name string) string {
original := name
var name2 string
if strings.HasPrefix(name, `\\?\`) {
name2 = `\\?\`
name = strings.TrimPrefix(name, `\\?\`)
}
if strings.HasPrefix(name, `//?/`) {
name2 = `//?/`
name = strings.TrimPrefix(name, `//?/`)
}
// Colon is allowed as part of a drive name X:\
colonAt := strings.Index(name, ":")
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
// Copy to name2, which is unfiltered
name2 += name[0 : colonAt+1]
name = name[colonAt+1:]
}
name2 += strings.Map(func(r rune) rune {
switch r {
case '<', '>', '"', '|', '?', '*', ':':
return '_'
}
return r
}, name)
if name2 != original && f != nil {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Debug(f, "Replacing invalid characters in %q to %q", name, name2)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
}
return name2
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Object = &Object{}
)
local: fix Mkdir/Rmdir with a dir on Windows
// Package local provides a filesystem interface
package local
import (
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"time"
"unicode/utf8"
"golang.org/x/text/unicode/norm"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// Constants
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "local",
Description: "Local Disk",
NewFs: NewFs,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Optional: true,
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names",
}},
}},
}
fs.Register(fsi)
}
// Fs represents a local filesystem rooted at root
type Fs struct {
name string // the name of the remote
root string // The root directory (OS path)
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
wmu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
nounc bool // Skip UNC conversion on Windows
}
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path - properly UTF-8 encoded - for rclone
path string // The local path - may not be properly UTF-8 encoded - for OS
info os.FileInfo // Interface for file info (always present)
hashes map[fs.HashType]string // Hashes
}
// ------------------------------------------------------------
// NewFs constructs an Fs from the path
func NewFs(name, root string) (fs.Fs, error) {
var err error
nounc, _ := fs.ConfigFile.GetValue(name, "nounc")
f := &Fs{
name: name,
warned: make(map[string]struct{}),
nounc: nounc == "true",
dev: devUnset,
}
f.root = f.cleanPath(root)
// Check to see if this points to a file
fi, err := os.Lstat(f.root)
if err == nil {
f.dev = readDevice(fi)
}
if err == nil && fi.Mode().IsRegular() {
// It is a file, so use the parent as the root
f.root, _ = getDirFile(f.root)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Local file system at %s", f.root)
}
// newObject makes a half completed Object
func (f *Fs) newObject(remote string) *Object {
dstPath := f.cleanPath(filepath.Join(f.root, remote))
remote = f.cleanRemote(remote)
return &Object{
fs: f,
remote: remote,
path: dstPath,
}
}
// Return an Object from a path
//
// May return nil if an error occurred
func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, error) {
o := f.newObject(remote)
if info != nil {
o.info = info
} else {
err := o.lstat()
if err != nil {
if os.IsNotExist(err) {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// listArgs is the arguments that a new list takes
type listArgs struct {
remote string
dirpath string
level int
}
// list traverses the directory passed in, listing to out.
// it returns a boolean whether it is finished or not.
func (f *Fs) list(out fs.ListOpts, remote string, dirpath string, level int) (subdirs []listArgs) {
fd, err := os.Open(dirpath)
if err != nil {
out.SetError(errors.Wrapf(err, "failed to open directory %q", dirpath))
return nil
}
defer func() {
err := fd.Close()
if err != nil {
out.SetError(errors.Wrapf(err, "failed to close directory %q:", dirpath))
}
}()
for {
fis, err := fd.Readdir(1024)
if err == io.EOF && len(fis) == 0 {
break
}
if err != nil {
out.SetError(errors.Wrapf(err, "failed to read directory %q", dirpath))
return nil
}
for _, fi := range fis {
name := fi.Name()
newRemote := path.Join(remote, name)
newPath := filepath.Join(dirpath, name)
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (fi.Mode()&os.ModeSymlink) == 0 && out.IncludeDirectory(newRemote) {
dir := &fs.Dir{
Name: f.cleanRemote(newRemote),
When: fi.ModTime(),
Bytes: 0,
Count: 0,
}
if out.AddDir(dir) {
return nil
}
if level > 0 && f.dev == readDevice(fi) {
subdirs = append(subdirs, listArgs{remote: newRemote, dirpath: newPath, level: level - 1})
}
}
} else {
fso, err := f.newObjectWithInfo(newRemote, fi)
if err != nil {
out.SetError(err)
return nil
}
if fso.Storable() && out.Add(fso) {
return nil
}
}
}
}
return subdirs
}
// List the path into out
//
// Ignores everything which isn't Storable, eg links etc
func (f *Fs) List(out fs.ListOpts, dir string) {
defer out.Finished()
root := f.cleanPath(filepath.Join(f.root, dir))
dir = f.cleanRemote(dir)
_, err := os.Stat(root)
if err != nil {
out.SetError(fs.ErrorDirNotFound)
return
}
in := make(chan listArgs, out.Buffer())
var wg sync.WaitGroup // sync closing of go routines
var traversing sync.WaitGroup // running directory traversals
// Start the process
traversing.Add(1)
in <- listArgs{remote: dir, dirpath: root, level: out.Level() - 1}
for i := 0; i < fs.Config.Checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for job := range in {
if out.IsFinished() {
continue
}
newJobs := f.list(out, job.remote, job.dirpath, job.level)
// Now we have traversed this directory, send
// these ones off for traversal
if len(newJobs) != 0 {
traversing.Add(len(newJobs))
go func() {
for _, newJob := range newJobs {
in <- newJob
}
}()
}
traversing.Done()
}
}()
}
// Wait for traversal to finish
traversing.Wait()
close(in)
wg.Wait()
}
// cleanRemote makes string a valid UTF-8 string for remote strings.
//
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
// It also normalises the UTF-8 and converts the slashes if necessary.
func (f *Fs) cleanRemote(name string) string {
if !utf8.ValidString(name) {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
name = string([]rune(name))
}
name = norm.NFC.String(name)
name = filepath.ToSlash(name)
return name
}
// Put the Object to the local filesystem
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
remote := src.Remote()
// Temporary Object under construction - info filled in by Update()
o := f.newObject(remote)
err := o.Update(in, src)
if err != nil {
return nil, err
}
return o, nil
}
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
root := f.cleanPath(filepath.Join(f.root, dir))
err := os.MkdirAll(root, 0777)
if err != nil {
return err
}
if dir == "" {
fi, err := os.Lstat(root)
if err != nil {
return err
}
f.dev = readDevice(fi)
}
return nil
}
// Rmdir removes the directory
//
// If it isn't empty it will return an error
func (f *Fs) Rmdir(dir string) error {
root := f.cleanPath(filepath.Join(f.root, dir))
return os.Remove(root)
}
// Precision of the file system
func (f *Fs) Precision() (precision time.Duration) {
f.precisionOk.Do(func() {
f.precision = f.readPrecision()
})
return f.precision
}
// Read the precision
func (f *Fs) readPrecision() (precision time.Duration) {
// Default precision of 1s
precision = time.Second
// Create temporary file and test it
fd, err := ioutil.TempFile("", "rclone")
if err != nil {
// If failed return 1s
// fmt.Println("Failed to create temp file", err)
return time.Second
}
path := fd.Name()
// fmt.Println("Created temp file", path)
err = fd.Close()
if err != nil {
return time.Second
}
// Delete it on return
defer func() {
// fmt.Println("Remove temp file")
_ = os.Remove(path) // ignore error
}()
// Find the minimum duration we can detect
for duration := time.Duration(1); duration < time.Second; duration *= 10 {
// Current time with delta
t := time.Unix(time.Now().Unix(), int64(duration))
err := os.Chtimes(path, t, t)
if err != nil {
// fmt.Println("Failed to Chtimes", err)
break
}
// Read the actual time back
fi, err := os.Stat(path)
if err != nil {
// fmt.Println("Failed to Stat", err)
break
}
// If it matches - have found the precision
// fmt.Println("compare", fi.ModTime(), t)
if fi.ModTime() == t {
// fmt.Println("Precision detected as", duration)
return duration
}
}
return
}
// Purge deletes all the files and directories
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
fi, err := os.Lstat(f.root)
if err != nil {
return err
}
if !fi.Mode().IsDir() {
return errors.Errorf("can't purge non directory: %q", f.root)
}
return os.RemoveAll(f.root)
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Temporary Object under construction
dstObj := f.newObject(remote)
// Check it is a file if it exists
err := dstObj.lstat()
if os.IsNotExist(err) {
// OK
} else if err != nil {
return nil, err
} else if !dstObj.info.Mode().IsRegular() {
// It isn't a file
return nil, errors.New("can't move file onto non-file")
}
// Create destination
err = dstObj.mkdirAll()
if err != nil {
return nil, err
}
// Do the move
err = os.Rename(srcObj.path, dstObj.path)
if err != nil {
return nil, err
}
// Update the info
err = dstObj.lstat()
if err != nil {
return nil, err
}
return dstObj, nil
}
// DirMove moves src directory to this remote using server side move
// operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debug(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
// Check if source exists
sstat, err := os.Lstat(srcFs.root)
if err != nil {
return err
}
// And is a directory
if !sstat.IsDir() {
return fs.ErrorCantDirMove
}
// Check if destination exists
_, err = os.Lstat(f.root)
if !os.IsNotExist(err) {
return fs.ErrorDirExists
}
// Do the move
return os.Rename(srcFs.root, f.root)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.SupportedHashes
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(r fs.HashType) (string, error) {
// Check that the underlying file hasn't changed
oldtime := o.info.ModTime()
oldsize := o.info.Size()
err := o.lstat()
if err != nil {
return "", errors.Wrap(err, "hash: failed to stat")
}
if !o.info.ModTime().Equal(oldtime) || oldsize != o.info.Size() {
o.hashes = nil
}
if o.hashes == nil {
o.hashes = make(map[fs.HashType]string)
in, err := os.Open(o.path)
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
}
o.hashes, err = fs.HashStream(in)
closeErr := in.Close()
if err != nil {
return "", errors.Wrap(err, "hash: failed to read")
}
if closeErr != nil {
return "", errors.Wrap(closeErr, "hash: failed to close")
}
}
return o.hashes[r], nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.info.Size()
}
// ModTime returns the modification time of the object
func (o *Object) ModTime() time.Time {
return o.info.ModTime()
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
err := os.Chtimes(o.path, modTime, modTime)
if err != nil {
return err
}
// Re-read metadata
return o.lstat()
}
// Storable returns a boolean showing if this object is storable
func (o *Object) Storable() bool {
// Check for control characters in the remote name and show non storable
for _, c := range o.Remote() {
if c >= 0x00 && c < 0x20 || c == 0x7F {
fs.Debug(o.fs, "Can't store file with control characters: %q", o.Remote())
return false
}
}
mode := o.info.Mode()
// On windows a file with os.ModeSymlink represents a file with reparse points
if runtime.GOOS == "windows" && (mode&os.ModeSymlink) != 0 {
fs.Debug(o, "Clearing symlink bit to allow a file with reparse points to be copied")
mode &^= os.ModeSymlink
}
if mode&(os.ModeSymlink|os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
fs.Debug(o, "Can't transfer non file/directory")
return false
} else if mode&os.ModeDir != 0 {
// fs.Debug(o, "Skipping directory")
return false
}
return true
}
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
// object that is read
type localOpenFile struct {
o *Object // object that is open
in io.ReadCloser // handle we are wrapping
hash *fs.MultiHasher // currently accumulating hashes
}
// Read bytes from the object - see io.Reader
func (file *localOpenFile) Read(p []byte) (n int, err error) {
n, err = file.in.Read(p)
if n > 0 {
// Hash routines never return an error
_, _ = file.hash.Write(p[:n])
}
return
}
// Close the object and update the hashes
func (file *localOpenFile) Close() (err error) {
err = file.in.Close()
if err == nil {
if file.hash.Size() == file.o.Size() {
file.o.hashes = file.hash.Sums()
}
}
return err
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset int64
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
default:
if option.Mandatory() {
fs.Log(o, "Unsupported mandatory option: %v", option)
}
}
}
fd, err := os.Open(o.path)
if err != nil {
return
}
if offset != 0 {
// seek the object
_, err = fd.Seek(offset, 0)
// don't attempt to make checksums
return fd, err
}
// Update the md5sum as we go along
in = &localOpenFile{
o: o,
in: fd,
hash: fs.NewMultiHasher(),
}
return in, nil
}
// mkdirAll makes all the directories needed to store the object
func (o *Object) mkdirAll() error {
dir, _ := getDirFile(o.path)
return os.MkdirAll(dir, 0777)
}
// Update the object from in with modTime and size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
err := o.mkdirAll()
if err != nil {
return err
}
out, err := os.Create(o.path)
if err != nil {
return err
}
// Calculate the hash of the object we are reading as we go along
hash := fs.NewMultiHasher()
in = io.TeeReader(in, hash)
_, err = io.Copy(out, in)
closeErr := out.Close()
if err == nil {
err = closeErr
}
if err != nil {
fs.Debug(o, "Removing partially written file on error: %v", err)
if removeErr := os.Remove(o.path); removeErr != nil {
fs.ErrorLog(o, "Failed to remove partially written file: %v", removeErr)
}
return err
}
// All successful so update the hashes
o.hashes = hash.Sums()
// Set the mtime
err = o.SetModTime(src.ModTime())
if err != nil {
return err
}
// ReRead info now that we have finished
return o.lstat()
}
// Stat a Object into info
func (o *Object) lstat() error {
info, err := os.Lstat(o.path)
o.info = info
return err
}
// Remove an object
func (o *Object) Remove() error {
return os.Remove(o.path)
}
// Return the directory and file from an OS path. Assumes
// os.PathSeparator is used.
func getDirFile(s string) (string, string) {
i := strings.LastIndex(s, string(os.PathSeparator))
dir, file := s[:i], s[i+1:]
if dir == "" {
dir = string(os.PathSeparator)
}
return dir, file
}
// cleanPathFragment cleans an OS path fragment which is part of a
// bigger path and not necessarily absolute
func cleanPathFragment(s string) string {
if s == "" {
return s
}
s = filepath.Clean(s)
if runtime.GOOS == "windows" {
s = strings.Replace(s, `/`, `\`, -1)
}
return s
}
// cleanPath cleans and makes absolute the path passed in and returns
// an OS path.
//
// The input might be in OS form or rclone form or a mixture, but the
// output is in OS form.
//
// On windows it makes the path UNC also and replaces any characters
// Windows can't deal with with their replacements.
func (f *Fs) cleanPath(s string) string {
s = cleanPathFragment(s)
if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
if !f.nounc {
// Convert to UNC
s = uncPath(s)
}
s = cleanWindowsName(f, s)
} else {
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
}
return s
}
// Pattern to match a windows absolute path: "c:\" and similar
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
// uncPath converts an absolute Windows path
// to a UNC long path.
func uncPath(s string) string {
// UNC can NOT use "/", so convert all to "\"
s = strings.Replace(s, `/`, `\`, -1)
// If prefix is "\\", we already have a UNC path or server.
if strings.HasPrefix(s, `\\`) {
// If already long path, just keep it
if strings.HasPrefix(s, `\\?\`) {
return s
}
// Trim "\\" from path and add UNC prefix.
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
}
if isAbsWinDrive.MatchString(s) {
return `\\?\` + s
}
return s
}
// cleanWindowsName will clean invalid Windows characters replacing them with _
func cleanWindowsName(f *Fs, name string) string {
original := name
var name2 string
if strings.HasPrefix(name, `\\?\`) {
name2 = `\\?\`
name = strings.TrimPrefix(name, `\\?\`)
}
if strings.HasPrefix(name, `//?/`) {
name2 = `//?/`
name = strings.TrimPrefix(name, `//?/`)
}
// Colon is allowed as part of a drive name X:\
colonAt := strings.Index(name, ":")
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
// Copy to name2, which is unfiltered
name2 += name[0 : colonAt+1]
name = name[colonAt+1:]
}
name2 += strings.Map(func(r rune) rune {
switch r {
case '<', '>', '"', '|', '?', '*', ':':
return '_'
}
return r
}, name)
if name2 != original && f != nil {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Debug(f, "Replacing invalid characters in %q to %q", name, name2)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
}
return name2
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Object = &Object{}
)
|
package google
import (
"fmt"
"log"
"time"
"code.google.com/p/google-api-go-client/compute/v1"
"code.google.com/p/google-api-go-client/googleapi"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceComputeDisk() *schema.Resource {
return &schema.Resource{
Create: resourceComputeDiskCreate,
Read: resourceComputeDiskRead,
Delete: resourceComputeDiskDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"zone": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"image": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"size": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},
"type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
},
}
}
func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
// Get the zone
log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string))
zone, err := config.clientCompute.Zones.Get(
config.Project, d.Get("zone").(string)).Do()
if err != nil {
return fmt.Errorf(
"Error loading zone '%s': %s", d.Get("zone").(string), err)
}
// Build the disk parameter
disk := &compute.Disk{
Name: d.Get("name").(string),
SizeGb: int64(d.Get("size").(int)),
}
// If we were given a source image, load that.
if v, ok := d.GetOk("image"); ok {
log.Printf("[DEBUG] Loading image: %s", v.(string))
image, err := readImage(config, v.(string))
if err != nil {
return fmt.Errorf(
"Error loading image '%s': %s",
v.(string), err)
}
disk.SourceImage = image.SelfLink
}
if v, ok := d.GetOk("type"); ok {
log.Printf("[DEBUG] Loading disk type: %s", v.(string))
diskType, err := readDiskType(config, zone, v.(string))
if err != nil {
return fmt.Errorf(
"Error loading disk type '%s': %s",
v.(string), err)
}
disk.Type = diskType.SelfLink
}
op, err := config.clientCompute.Disks.Insert(
config.Project, d.Get("zone").(string), disk).Do()
if err != nil {
return fmt.Errorf("Error creating disk: %s", err)
}
// It probably maybe worked, so store the ID now
d.SetId(disk.Name)
// Wait for the operation to complete
w := &OperationWaiter{
Service: config.clientCompute,
Op: op,
Project: config.Project,
Zone: d.Get("zone").(string),
Type: OperationWaitZone,
}
state := w.Conf()
state.Timeout = 2 * time.Minute
state.MinTimeout = 1 * time.Second
opRaw, err := state.WaitForState()
if err != nil {
return fmt.Errorf("Error waiting for disk to create: %s", err)
}
op = opRaw.(*compute.Operation)
if op.Error != nil {
// The resource didn't actually create
d.SetId("")
// Return the error
return OperationError(*op.Error)
}
return resourceComputeDiskRead(d, meta)
}
func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
_, err := config.clientCompute.Disks.Get(
config.Project, d.Get("zone").(string), d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
// The resource doesn't exist anymore
d.SetId("")
return nil
}
return fmt.Errorf("Error reading disk: %s", err)
}
return nil
}
func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
// Delete the disk
op, err := config.clientCompute.Disks.Delete(
config.Project, d.Get("zone").(string), d.Id()).Do()
if err != nil {
return fmt.Errorf("Error deleting disk: %s", err)
}
// Wait for the operation to complete
w := &OperationWaiter{
Service: config.clientCompute,
Op: op,
Project: config.Project,
Zone: d.Get("zone").(string),
Type: OperationWaitZone,
}
state := w.Conf()
state.Timeout = 2 * time.Minute
state.MinTimeout = 1 * time.Second
opRaw, err := state.WaitForState()
if err != nil {
return fmt.Errorf("Error waiting for disk to delete: %s", err)
}
op = opRaw.(*compute.Operation)
if op.Error != nil {
// Return the error
return OperationError(*op.Error)
}
d.SetId("")
return nil
}
Add SelfLink field to GCE disk resource.
package google
import (
"fmt"
"log"
"time"
"code.google.com/p/google-api-go-client/compute/v1"
"code.google.com/p/google-api-go-client/googleapi"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceComputeDisk() *schema.Resource {
return &schema.Resource{
Create: resourceComputeDiskCreate,
Read: resourceComputeDiskRead,
Delete: resourceComputeDiskDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"zone": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"image": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"size": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},
"type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"self_link": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
// Get the zone
log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string))
zone, err := config.clientCompute.Zones.Get(
config.Project, d.Get("zone").(string)).Do()
if err != nil {
return fmt.Errorf(
"Error loading zone '%s': %s", d.Get("zone").(string), err)
}
// Build the disk parameter
disk := &compute.Disk{
Name: d.Get("name").(string),
SizeGb: int64(d.Get("size").(int)),
}
// If we were given a source image, load that.
if v, ok := d.GetOk("image"); ok {
log.Printf("[DEBUG] Loading image: %s", v.(string))
image, err := readImage(config, v.(string))
if err != nil {
return fmt.Errorf(
"Error loading image '%s': %s",
v.(string), err)
}
disk.SourceImage = image.SelfLink
}
if v, ok := d.GetOk("type"); ok {
log.Printf("[DEBUG] Loading disk type: %s", v.(string))
diskType, err := readDiskType(config, zone, v.(string))
if err != nil {
return fmt.Errorf(
"Error loading disk type '%s': %s",
v.(string), err)
}
disk.Type = diskType.SelfLink
}
op, err := config.clientCompute.Disks.Insert(
config.Project, d.Get("zone").(string), disk).Do()
if err != nil {
return fmt.Errorf("Error creating disk: %s", err)
}
// It probably maybe worked, so store the ID now
d.SetId(disk.Name)
// Wait for the operation to complete
w := &OperationWaiter{
Service: config.clientCompute,
Op: op,
Project: config.Project,
Zone: d.Get("zone").(string),
Type: OperationWaitZone,
}
state := w.Conf()
state.Timeout = 2 * time.Minute
state.MinTimeout = 1 * time.Second
opRaw, err := state.WaitForState()
if err != nil {
return fmt.Errorf("Error waiting for disk to create: %s", err)
}
op = opRaw.(*compute.Operation)
if op.Error != nil {
// The resource didn't actually create
d.SetId("")
// Return the error
return OperationError(*op.Error)
}
return resourceComputeDiskRead(d, meta)
}
func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
disk, err := config.clientCompute.Disks.Get(
config.Project, d.Get("zone").(string), d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
// The resource doesn't exist anymore
d.SetId("")
return nil
}
return fmt.Errorf("Error reading disk: %s", err)
}
d.Set("self_link", disk.SelfLink)
return nil
}
func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
// Delete the disk
op, err := config.clientCompute.Disks.Delete(
config.Project, d.Get("zone").(string), d.Id()).Do()
if err != nil {
return fmt.Errorf("Error deleting disk: %s", err)
}
// Wait for the operation to complete
w := &OperationWaiter{
Service: config.clientCompute,
Op: op,
Project: config.Project,
Zone: d.Get("zone").(string),
Type: OperationWaitZone,
}
state := w.Conf()
state.Timeout = 2 * time.Minute
state.MinTimeout = 1 * time.Second
opRaw, err := state.WaitForState()
if err != nil {
return fmt.Errorf("Error waiting for disk to delete: %s", err)
}
op = opRaw.(*compute.Operation)
if op.Error != nil {
// Return the error
return OperationError(*op.Error)
}
d.SetId("")
return nil
}
|
package google
import (
"fmt"
"log"
"regexp"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
)
func resourceComputeDisk() *schema.Resource {
return &schema.Resource{
Create: resourceComputeDiskCreate,
Read: resourceComputeDiskRead,
Delete: resourceComputeDiskDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"zone": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"disk_encryption_key_raw": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Sensitive: true,
},
"disk_encryption_key_sha256": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"image": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"project": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"size": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},
"self_link": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"snapshot": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
},
}
}
func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
// Get the zone
log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string))
zone, err := config.clientCompute.Zones.Get(
project, d.Get("zone").(string)).Do()
if err != nil {
return fmt.Errorf(
"Error loading zone '%s': %s", d.Get("zone").(string), err)
}
// Build the disk parameter
disk := &compute.Disk{
Name: d.Get("name").(string),
SizeGb: int64(d.Get("size").(int)),
}
// If we were given a source image, load that.
if v, ok := d.GetOk("image"); ok {
log.Printf("[DEBUG] Resolving image name: %s", v.(string))
imageUrl, err := resolveImage(config, v.(string))
if err != nil {
return fmt.Errorf(
"Error resolving image name '%s': %s",
v.(string), err)
}
disk.SourceImage = imageUrl
log.Printf("[DEBUG] Image name resolved to: %s", imageUrl)
}
if v, ok := d.GetOk("type"); ok {
log.Printf("[DEBUG] Loading disk type: %s", v.(string))
diskType, err := readDiskType(config, zone, v.(string))
if err != nil {
return fmt.Errorf(
"Error loading disk type '%s': %s",
v.(string), err)
}
disk.Type = diskType.SelfLink
}
if v, ok := d.GetOk("snapshot"); ok {
snapshotName := v.(string)
match, _ := regexp.MatchString("^http", snapshotName)
if match {
disk.SourceSnapshot = snapshotName
} else {
log.Printf("[DEBUG] Loading snapshot: %s", snapshotName)
snapshotData, err := config.clientCompute.Snapshots.Get(
project, snapshotName).Do()
if err != nil {
return fmt.Errorf(
"Error loading snapshot '%s': %s",
snapshotName, err)
}
disk.SourceSnapshot = snapshotData.SelfLink
}
}
if v, ok := d.GetOk("disk_encryption_key_raw"); ok {
disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{}
disk.DiskEncryptionKey.RawKey = v.(string)
}
op, err := config.clientCompute.Disks.Insert(
project, d.Get("zone").(string), disk).Do()
if err != nil {
return fmt.Errorf("Error creating disk: %s", err)
}
// It probably maybe worked, so store the ID now
d.SetId(disk.Name)
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating Disk")
if err != nil {
return err
}
return resourceComputeDiskRead(d, meta)
}
func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
disk, err := config.clientCompute.Disks.Get(
project, d.Get("zone").(string), d.Id()).Do()
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Disk %q", d.Get("name").(string)))
}
d.Set("self_link", disk.SelfLink)
if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" {
d.Set("disk_encryption_key_sha256", disk.DiskEncryptionKey.Sha256)
}
return nil
}
func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
// Delete the disk
op, err := config.clientCompute.Disks.Delete(
project, d.Get("zone").(string), d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
log.Printf("[WARN] Removing Disk %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
return nil
}
return fmt.Errorf("Error deleting disk: %s", err)
}
zone := d.Get("zone").(string)
err = computeOperationWaitZone(config, op, project, zone, "Deleting Disk")
if err != nil {
return err
}
d.SetId("")
return nil
}
Be more specific on the regexp used to detect URI
package google
import (
"fmt"
"log"
"regexp"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
)
func resourceComputeDisk() *schema.Resource {
return &schema.Resource{
Create: resourceComputeDiskCreate,
Read: resourceComputeDiskRead,
Delete: resourceComputeDiskDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"zone": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"disk_encryption_key_raw": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Sensitive: true,
},
"disk_encryption_key_sha256": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"image": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"project": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"size": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},
"self_link": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"snapshot": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
},
}
}
func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
// Get the zone
log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string))
zone, err := config.clientCompute.Zones.Get(
project, d.Get("zone").(string)).Do()
if err != nil {
return fmt.Errorf(
"Error loading zone '%s': %s", d.Get("zone").(string), err)
}
// Build the disk parameter
disk := &compute.Disk{
Name: d.Get("name").(string),
SizeGb: int64(d.Get("size").(int)),
}
// If we were given a source image, load that.
if v, ok := d.GetOk("image"); ok {
log.Printf("[DEBUG] Resolving image name: %s", v.(string))
imageUrl, err := resolveImage(config, v.(string))
if err != nil {
return fmt.Errorf(
"Error resolving image name '%s': %s",
v.(string), err)
}
disk.SourceImage = imageUrl
log.Printf("[DEBUG] Image name resolved to: %s", imageUrl)
}
if v, ok := d.GetOk("type"); ok {
log.Printf("[DEBUG] Loading disk type: %s", v.(string))
diskType, err := readDiskType(config, zone, v.(string))
if err != nil {
return fmt.Errorf(
"Error loading disk type '%s': %s",
v.(string), err)
}
disk.Type = diskType.SelfLink
}
if v, ok := d.GetOk("snapshot"); ok {
snapshotName := v.(string)
match, _ := regexp.MatchString("^https://www.googleapis.com/compute", snapshotName)
if match {
disk.SourceSnapshot = snapshotName
} else {
log.Printf("[DEBUG] Loading snapshot: %s", snapshotName)
snapshotData, err := config.clientCompute.Snapshots.Get(
project, snapshotName).Do()
if err != nil {
return fmt.Errorf(
"Error loading snapshot '%s': %s",
snapshotName, err)
}
disk.SourceSnapshot = snapshotData.SelfLink
}
}
if v, ok := d.GetOk("disk_encryption_key_raw"); ok {
disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{}
disk.DiskEncryptionKey.RawKey = v.(string)
}
op, err := config.clientCompute.Disks.Insert(
project, d.Get("zone").(string), disk).Do()
if err != nil {
return fmt.Errorf("Error creating disk: %s", err)
}
// It probably maybe worked, so store the ID now
d.SetId(disk.Name)
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating Disk")
if err != nil {
return err
}
return resourceComputeDiskRead(d, meta)
}
func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
disk, err := config.clientCompute.Disks.Get(
project, d.Get("zone").(string), d.Id()).Do()
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Disk %q", d.Get("name").(string)))
}
d.Set("self_link", disk.SelfLink)
if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" {
d.Set("disk_encryption_key_sha256", disk.DiskEncryptionKey.Sha256)
}
return nil
}
func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
// Delete the disk
op, err := config.clientCompute.Disks.Delete(
project, d.Get("zone").(string), d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
log.Printf("[WARN] Removing Disk %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
return nil
}
return fmt.Errorf("Error deleting disk: %s", err)
}
zone := d.Get("zone").(string)
err = computeOperationWaitZone(config, op, project, zone, "Deleting Disk")
if err != nil {
return err
}
d.SetId("")
return nil
}
|
// picviewer2.go
package main
/*
REVISION HISTORY
======== =======
7 Apr 20 -- Now called picviewer2.go. I'm going to try the image reading trick I learned from the Qt example imageviewer.
9 Apr 20 -- Will try to handle arrow keys.
*/
import (
"fmt"
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/gui"
"github.com/therecipe/qt/widgets"
"os"
)
var (
displayArea *widgets.QWidget
scene *widgets.QGraphicsScene
view *widgets.QGraphicsView
item *widgets.QGraphicsPixmapItem
mainApp *widgets.QApplication
imageFileName string
)
func imageViewer() *widgets.QWidget {
displayArea = widgets.NewQWidget(nil, 0)
scene = widgets.NewQGraphicsScene(nil)
view = widgets.NewQGraphicsView(nil)
var imageReader *gui.QImageReader
imageReader.SetAutoTransform(true)
imageReader = gui.NewQImageReader3(imageFileName, core.NewQByteArray2("", 0))
// arrowEvent := gui.NewQKeyEvent(core.QEvent__KeyPress, int(core.Qt__Key_Up), core.Qt__NoModifier, "", false, 0)
arrowEventclosure := func(ev *gui.QKeyEvent) {
if ev.Key() == int(core.Qt__Key_N) {
widgets.QMessageBox_Information(nil, "N key", "N key hit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
} else if ev.Key() == int(core.Qt__Key_B) {
widgets.QMessageBox_Information(nil, "B key", "B key kit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
} else if ev.Matches(gui.QKeySequence__Forward) {
widgets.QMessageBox_Information(nil, "key forward", "forward key kit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
} else if ev.Matches(gui.QKeySequence__Back) {
widgets.QMessageBox_Information(nil, "key back", "back key kit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
}
}
displayArea.ConnectKeyPressEvent(arrowEventclosure)
// displayArea.ConnectKeyPressEvent(func(ev *gui.QKeyEvent) {
// widgets.QMessageBox_Information(nil, "OK", "Up arrow key kit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
// })(arrowEvent)
// test to see if we are dealing with animated GIF
fmt.Println("Animated GIF : ", imageReader.SupportsAnimation())
if imageReader.SupportsAnimation() {
// instead of reading from file(disk) again, we take from memory
// HOWEVER, this will cause segmentation violation error ! :(
//var movie = gui.NewQMovieFromPointer(imageReader.Pointer())
var movie = gui.NewQMovie3(imageFileName, core.NewQByteArray2("", 0), nil)
// see http://stackoverflow.com/questions/5769766/qt-how-to-show-gifanimated-image-in-qgraphicspixmapitem
var movieLabel = widgets.NewQLabel(nil, core.Qt__Widget)
movieLabel.SetMovie(movie)
movie.Start()
scene.AddWidget(movieLabel, core.Qt__Widget)
} else {
var pixmap = gui.NewQPixmap5(imageFileName, "", core.Qt__AutoColor) // this was changed fromNewQPixmap3 in before I had to redo Qt and therecipe.
//size := pixmap.Size()
width := pixmap.Width()
height := pixmap.Height()
fmt.Printf(" Image from file %s is %d wide and %d high \n", imageFileName, width, height)
item = widgets.NewQGraphicsPixmapItem2(pixmap, nil)
scene.AddItem(item)
}
view.SetScene(scene)
//create a button and connect the clicked signal
var button = widgets.NewQPushButton2("Quit", nil)
btnclicked := func(flag bool) {
//os.Exit(0)
widgets.QApplication_Beep()
//widgets.QMessageBox_Information(nil, "OK", "You clicked quit button!", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
// errmm... proper way to quit Qt application
// https://godoc.org/github.com/therecipe/qt/widgets#QApplication.Quit
mainApp.Quit()
}
button.ConnectClicked(btnclicked)
var layout = widgets.NewQVBoxLayout()
layout.AddWidget(view, 0, core.Qt__AlignCenter)
layout.AddWidget(button, 0, core.Qt__AlignCenter)
displayArea.SetLayout(layout)
return displayArea
}
func main() {
if len(os.Args) != 2 {
fmt.Printf("Usage : %s <image file>\n", os.Args[0])
os.Exit(0)
}
imageFileName = os.Args[1]
fmt.Println("Loading image : ", imageFileName)
mainApp = widgets.NewQApplication(len(os.Args), os.Args)
imageViewer().Show()
widgets.QApplication_Exec()
// mainApp.exec() // I wonder if this will work.}
}
04/11/2020 10:00:03 AM
picviewer2/picviewer2.go -- works recognizing keys.
// picviewer2.go
package main
/*
REVISION HISTORY
======== =======
7 Apr 20 -- Now called picviewer2.go. I'm going to try the image reading trick I learned from the Qt example imageviewer.
9 Apr 20 -- Will try to handle arrow keys.
*/
import (
"fmt"
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/gui"
"github.com/therecipe/qt/widgets"
"os"
)
var (
displayArea *widgets.QWidget
scene *widgets.QGraphicsScene
view *widgets.QGraphicsView
item *widgets.QGraphicsPixmapItem
mainApp *widgets.QApplication
imageFileName string
)
func imageViewer() *widgets.QWidget {
displayArea = widgets.NewQWidget(nil, 0)
scene = widgets.NewQGraphicsScene(nil)
view = widgets.NewQGraphicsView(nil)
var imageReader *gui.QImageReader
imageReader.SetAutoTransform(true)
imageReader = gui.NewQImageReader3(imageFileName, core.NewQByteArray2("", 0))
// arrowEvent := gui.NewQKeyEvent(core.QEvent__KeyPress, int(core.Qt__Key_Up), core.Qt__NoModifier, "", false, 0)
// Must test combo keys before indiv keys, as indiv key test ignore the modifiers.
// I discovered that testing N before Ctrl-N always found N and never ctrl-N.
arrowEventclosure := func(ev *gui.QKeyEvent) {
if false {
// do nothing, just so I can test this.
}else if ev.Matches(gui.QKeySequence__New) {
widgets.QMessageBox_Information(nil, "key New", "Ctrl-N kit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
} else if ev.Key() == int(core.Qt__Key_B) {
widgets.QMessageBox_Information(nil, "B key", "B key kit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
} else if ev.Key() == int(core.Qt__Key_N) {
widgets.QMessageBox_Information(nil, "N key", "N key hit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
} else if ev.Matches(gui.QKeySequence__Open) {
widgets.QMessageBox_Information(nil, "key Open", "Ctrl-O key kit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
} else if ev.Matches(gui.QKeySequence__HelpContents) {
widgets.QMessageBox_Information(nil, "key Help", "F1 key kit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
}
}
displayArea.ConnectKeyPressEvent(arrowEventclosure)
// displayArea.ConnectKeyPressEvent(func(ev *gui.QKeyEvent) {
// widgets.QMessageBox_Information(nil, "OK", "Up arrow key kit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
// })(arrowEvent)
// test to see if we are dealing with animated GIF
fmt.Println("Animated GIF : ", imageReader.SupportsAnimation())
if imageReader.SupportsAnimation() {
// instead of reading from file(disk) again, we take from memory
// HOWEVER, this will cause segmentation violation error ! :(
//var movie = gui.NewQMovieFromPointer(imageReader.Pointer())
var movie = gui.NewQMovie3(imageFileName, core.NewQByteArray2("", 0), nil)
// see http://stackoverflow.com/questions/5769766/qt-how-to-show-gifanimated-image-in-qgraphicspixmapitem
var movieLabel = widgets.NewQLabel(nil, core.Qt__Widget)
movieLabel.SetMovie(movie)
movie.Start()
scene.AddWidget(movieLabel, core.Qt__Widget)
} else {
var pixmap = gui.NewQPixmap5(imageFileName, "", core.Qt__AutoColor) // this was changed fromNewQPixmap3 in before I had to redo Qt and therecipe.
//size := pixmap.Size()
width := pixmap.Width()
height := pixmap.Height()
fmt.Printf(" Image from file %s is %d wide and %d high \n", imageFileName, width, height)
item = widgets.NewQGraphicsPixmapItem2(pixmap, nil)
scene.AddItem(item)
}
view.SetScene(scene)
//create a button and connect the clicked signal
var button = widgets.NewQPushButton2("Quit", nil)
btnclicked := func(flag bool) {
//os.Exit(0)
widgets.QApplication_Beep()
//widgets.QMessageBox_Information(nil, "OK", "You clicked quit button!", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
// errmm... proper way to quit Qt application
// https://godoc.org/github.com/therecipe/qt/widgets#QApplication.Quit
mainApp.Quit()
}
button.ConnectClicked(btnclicked)
var layout = widgets.NewQVBoxLayout()
layout.AddWidget(view, 0, core.Qt__AlignCenter)
layout.AddWidget(button, 0, core.Qt__AlignCenter)
displayArea.SetLayout(layout)
return displayArea
}
func main() {
if len(os.Args) != 2 {
fmt.Printf("Usage : %s <image file>\n", os.Args[0])
os.Exit(0)
}
imageFileName = os.Args[1]
fmt.Println("Loading image : ", imageFileName)
mainApp = widgets.NewQApplication(len(os.Args), os.Args)
imageViewer().Show()
widgets.QApplication_Exec()
// mainApp.exec() // I wonder if this will work.}
}
|
// picviewer2.go
package main
/*
REVISION HISTORY
======== =======
7 Apr 20 -- Now called picviewer2.go. I'm going to try the image reading trick I learned from the Qt example imageviewer.
9 Apr 20 -- Will try to handle arrow keys.
11 Apr 20 -- Won't handle arrow keys that I can get to work. Will use N and B, I think.
12 Apr 20 -- Now that the keys are working, I don't need a pushbutton.
*/
import (
"fmt"
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/gui"
"github.com/therecipe/qt/widgets"
"io/ioutil"
"log"
"math"
"os"
"sort"
"strings"
)
var (
displayArea *widgets.QWidget
scene *widgets.QGraphicsScene
view *widgets.QGraphicsView
item *widgets.QGraphicsPixmapItem
mainApp *widgets.QApplication
imageFileName string
picfiles sort.StringSlice
currImgIdx int
origImgIdx int
prevImgIdx int
)
func imageViewer() *widgets.QWidget {
displayArea = widgets.NewQWidget(nil, 0)
scene = widgets.NewQGraphicsScene(displayArea)
view = widgets.NewQGraphicsView(displayArea)
var imageReader *gui.QImageReader
imageReader.SetAutoTransform(true)
imageReader = gui.NewQImageReader3(imageFileName, core.NewQByteArray2("", 0))
// test to see if we are dealing with animated GIF
fmt.Println("Animated GIF : ", imageReader.SupportsAnimation())
if imageReader.SupportsAnimation() {
// instead of reading from file(disk) again, we take from memory
// HOWEVER, this will cause segmentation violation error ! :(
//var movie = gui.NewQMovieFromPointer(imageReader.Pointer())
var movie = gui.NewQMovie3(imageFileName, core.NewQByteArray2("", 0), nil)
// see http://stackoverflow.com/questions/5769766/qt-how-to-show-gifanimated-image-in-qgraphicspixmapitem
var movieLabel = widgets.NewQLabel(nil, core.Qt__Widget)
movieLabel.SetMovie(movie)
movie.Start()
scene.AddWidget(movieLabel, core.Qt__Widget)
} else {
var pixmap = gui.NewQPixmap5(imageFileName, "", core.Qt__AutoColor) // this was changed fromNewQPixmap3 in before I had to redo Qt and therecipe.
//size := pixmap.Size()
width := pixmap.Width()
height := pixmap.Height()
fmt.Printf(" Image from file %s is %d wide and %d high \n", imageFileName, width, height)
item = widgets.NewQGraphicsPixmapItem2(pixmap, nil)
scene.AddItem(item)
}
view.SetScene(scene)
/*
//create a button and connect the clicked signal. Or not.
var button = widgets.NewQPushButton2("Quit", nil)
btnclicked := func(flag bool) {
widgets.QApplication_Beep()
//widgets.QMessageBox_Information(nil, "OK", "You clicked quit button!", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
mainApp.Quit()
}
button.ConnectClicked(btnclicked)
*/
var layout = widgets.NewQVBoxLayout()
layout.AddWidget(view, 0, core.Qt__AlignCenter)
// layout.AddWidget(button, 0, core.Qt__AlignCenter)
displayArea.SetLayout(layout) // I tried not using a layout, but displayArea does not have an AddItem method.
// Must test combo keys before indiv keys, as indiv key test ignore the modifiers.
// I discovered that testing N before Ctrl-N always found N and never ctrl-N.
arrowEventclosure := func(ev *gui.QKeyEvent) {
if false { // only keys without events will still call qmessagebox
// do nothing, just so I can test this.
} else if ev.Matches(gui.QKeySequence__New) { // ctrl-n
//widgets.QMessageBox_Information(nil, "key New", "Ctrl-N hit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
nextPic()
} else if ev.Matches(gui.QKeySequence__Quit) { // ctrl-q
//widgets.QMessageBox_Information(nil, "quit Key", "Ctrl-q hit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
mainApp.Quit()
} else if ev.Matches(gui.QKeySequence__Cancel) { // ESC
//widgets.QMessageBox_Information(nil, "cancel", "cancel <Esc> hit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
mainApp.Quit()
} else if ev.Matches(gui.QKeySequence__Open) { // ctrl-oh
widgets.QMessageBox_Information(nil, "key Open", "Ctrl-O key hit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
} else if ev.Matches(gui.QKeySequence__HelpContents) {
widgets.QMessageBox_Information(nil, "key Help", "F1 key kit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
} else if ev.Key() == int(core.Qt__Key_B) {
//widgets.QMessageBox_Information(nil, "B key", "B key hit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
prevPic()
displayImageByNumber()
} else if ev.Key() == int(core.Qt__Key_N) {
//widgets.QMessageBox_Information(nil, "N key", "N key hit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
nextPic()
displayImageByNumber()
} else if ev.Key() == int(core.Qt__Key_Q) {
mainApp.Quit()
}
}
displayArea.ConnectKeyPressEvent(arrowEventclosure)
return displayArea
}
func main() {
if len(os.Args) != 2 {
fmt.Printf("Usage : %s <image file>\n", os.Args[0])
os.Exit(0)
}
imageFileName = os.Args[1]
fmt.Println("Loading image : ", imageFileName)
mainApp = widgets.NewQApplication(len(os.Args), os.Args)
imageViewer().Show()
workingdir, _ := os.Getwd()
// populate the string slice of all picture filenames, and the index in this slice of the initial displayed image.
files, err := ioutil.ReadDir(workingdir)
if err != nil { // It seems that ReadDir itself stops when it gets an error of any kind, and I cannot change that.
log.Println(err, "so calling my own MyReadDir.")
files = MyReadDir(workingdir)
}
picfiles = make(sort.StringSlice, 0, len(files))
for _, f := range files {
if isPicFile(f.Name()) {
picfiles = append(picfiles, f.Name())
}
}
picfiles.Sort()
currImgIdx = picfiles.Search(imageFileName)
fmt.Println(" Current image index in the picfiles slice is", currImgIdx, "; there are", len(picfiles), "picture files in", workingdir)
origImgIdx = currImgIdx
widgets.QApplication_Exec()
// mainApp.exec() // also works.}
} // end main
// ------------------------------- MyReadDir -----------------------------------
func MyReadDir(dir string) []os.FileInfo {
dirname, err := os.Open(dir)
// dirname, err := os.OpenFile(dir, os.O_RDONLY,0777)
if err != nil {
return nil
}
defer dirname.Close()
names, err := dirname.Readdirnames(0) // zero means read all names into the returned []string
if err != nil {
return nil
}
fi := make([]os.FileInfo, 0, len(names))
for _, s := range names {
L, err := os.Lstat(s)
if err != nil {
log.Println(" Error from os.Lstat ", err)
continue
}
fi = append(fi, L)
}
return fi
} // MyReadDir
// ---------------------------- isPicFile ------------------------------
func isPicFile(filename string) bool {
picext := []string{".jpg", ".png", ".jpeg", ".gif", "xcf"}
for _, ext := range picext {
if strings.HasSuffix(filename, ext) {
return true
}
}
return false
}
// arrowEvent := gui.NewQKeyEvent(core.QEvent__KeyPress, int(core.Qt__Key_Up), core.Qt__NoModifier, "", false, 0)
// displayArea.ConnectKeyPressEvent(func(ev *gui.QKeyEvent) { This doesn't work.
// widgets.QMessageBox_Information(nil, "OK", "Up arrow key kit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
// })(arrowEvent)
// -------------------------- NextPic --------------------------------
func nextPic() {
prevImgIdx = currImgIdx
if currImgIdx < len(picfiles)-1 {
currImgIdx++
}
fmt.Println(" In NexPic. prevImgIdx=", prevImgIdx, ", and currImgIdx=", currImgIdx)
}
// ------------------------- PrevPic -------------------------------
func prevPic()() {
prevImgIdx = currImgIdx
if currImgIdx > 0 {
currImgIdx--
}
fmt.Println(" In NexPic. prevImgIdx=", prevImgIdx, ", and currImgIdx=", currImgIdx)
}
// ------------------------- DisplayImageByNumber ----------------------
func displayImageByNumber() {
imageFileName = picfiles[currImgIdx]
fmt.Println(" in displayImageByNumber. currImgIdx=", currImgIdx, ", imageFileName=", imageFileName)
var pic = gui.NewQPixmap5(imageFileName, "", core.Qt__AutoColor)
scene.RemoveItem(item)
item = widgets.NewQGraphicsPixmapItem2(pic, nil)
width := pic.Width()
height := pic.Height()
var fwidth float64 = math.Trunc(float64(width) * 1.1)
var fheight float64 = math.Trunc(float64(height) * 1.1)
fmt.Printf(" displayImageByNumber %s is %d wide and %d high, goes to %g wide and %g high \n",
imageFileName, width, height, fwidth, fheight)
width1 := int(fwidth)
if fwidth < 300 {
width1 += 100
}
height1 := int(fheight)
if fheight < 300 {
height1 += 100
}
scene.AddItem(item)
//fmt.Printf(" displayImageByNumber %s is %d wide and %d high \n", imageFileName, width, height)
//displayArea.AdjustSize() didn't do anything
displayArea.Resize2(width1, height1) // slightly too small.
// displayArea.SetContentsMargins(0,0,width,height) Doen't do what I want.
//displayArea.Scroll(-width/2, -height/2) Doesn't do what I want, at all.
displayArea.Show()
}
04/12/2020 02:28:59 PM
picviewer2/picviewer2.go -- works as intended, mostly
// picviewer2.go
package main
/*
REVISION HISTORY
======== =======
7 Apr 20 -- Now called picviewer2.go. I'm going to try the image reading trick I learned from the Qt example imageviewer.
9 Apr 20 -- Will try to handle arrow keys.
11 Apr 20 -- Won't handle arrow keys that I can get to work. Will use N and B, I think.
12 Apr 20 -- Now that the keys are working, I don't need a pushbutton.
And will make less dependent on globals.
*/
import (
"fmt"
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/gui"
"github.com/therecipe/qt/widgets"
"io/ioutil"
"log"
"math"
"os"
"sort"
"strings"
)
var (
displayArea *widgets.QWidget
scene *widgets.QGraphicsScene
view *widgets.QGraphicsView
item *widgets.QGraphicsPixmapItem
mainApp *widgets.QApplication
imageFileName string
picfiles sort.StringSlice
currImgIdx int
origImgIdx int
prevImgIdx int
)
func imageViewer() *widgets.QWidget {
displayArea = widgets.NewQWidget(nil, 0)
scene = widgets.NewQGraphicsScene(displayArea)
view = widgets.NewQGraphicsView(displayArea)
var imageReader *gui.QImageReader
imageReader.SetAutoTransform(true)
imageReader = gui.NewQImageReader3(imageFileName, core.NewQByteArray2("", 0))
// test to see if we are dealing with animated GIF
fmt.Println("Animated GIF : ", imageReader.SupportsAnimation())
if imageReader.SupportsAnimation() {
// instead of reading from file(disk) again, we take from memory
// HOWEVER, this will cause segmentation violation error ! :(
//var movie = gui.NewQMovieFromPointer(imageReader.Pointer())
var movie = gui.NewQMovie3(imageFileName, core.NewQByteArray2("", 0), nil)
// see http://stackoverflow.com/questions/5769766/qt-how-to-show-gifanimated-image-in-qgraphicspixmapitem
var movieLabel = widgets.NewQLabel(nil, core.Qt__Widget)
movieLabel.SetMovie(movie)
movie.Start()
scene.AddWidget(movieLabel, core.Qt__Widget)
} else {
var pixmap = gui.NewQPixmap5(imageFileName, "", core.Qt__AutoColor) // this was changed fromNewQPixmap3 in before I had to redo Qt and therecipe.
//size := pixmap.Size()
width := pixmap.Width()
height := pixmap.Height()
fmt.Printf(" Image from file %s is %d wide and %d high \n", imageFileName, width, height)
item = widgets.NewQGraphicsPixmapItem2(pixmap, nil)
scene.AddItem(item)
}
view.SetScene(scene)
/*
//create a button and connect the clicked signal. Or not.
var button = widgets.NewQPushButton2("Quit", nil)
btnclicked := func(flag bool) {
widgets.QApplication_Beep()
//widgets.QMessageBox_Information(nil, "OK", "You clicked quit button!", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
mainApp.Quit()
}
button.ConnectClicked(btnclicked)
*/
var layout = widgets.NewQVBoxLayout()
layout.AddWidget(view, 0, core.Qt__AlignCenter)
// layout.AddWidget(button, 0, core.Qt__AlignCenter)
displayArea.SetLayout(layout) // I tried not using a layout, but displayArea does not have an AddItem method.
// Must test combo keys before indiv keys, as indiv key test ignore the modifiers.
// I discovered that testing N before Ctrl-N always found N and never ctrl-N.
arrowEventclosure := func(ev *gui.QKeyEvent) {
if false { // only keys without events will still call qmessagebox
// do nothing, just so I can test this.
} else if ev.Matches(gui.QKeySequence__New) { // ctrl-n
//widgets.QMessageBox_Information(nil, "key New", "Ctrl-N hit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
i := nextPic(currImgIdx)
currImgIdx = i
displayImageByNumber(i)
} else if ev.Matches(gui.QKeySequence__Quit) { // ctrl-q
//widgets.QMessageBox_Information(nil, "quit Key", "Ctrl-q hit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
mainApp.Quit()
} else if ev.Matches(gui.QKeySequence__Cancel) { // ESC
//widgets.QMessageBox_Information(nil, "cancel", "cancel <Esc> hit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
mainApp.Quit()
} else if ev.Matches(gui.QKeySequence__Open) { // ctrl-oh
//widgets.QMessageBox_Information(nil, "key Open", "Ctrl-O key hit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
origImgIdx, currImgIdx = currImgIdx, origImgIdx
displayImageByNumber(currImgIdx)
} else if ev.Matches(gui.QKeySequence__HelpContents) {
widgets.QMessageBox_Information(nil, "key Help", "F1 key kit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
} else if ev.Key() == int(core.Qt__Key_B) {
//widgets.QMessageBox_Information(nil, "B key", "B key hit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
i := prevPic(currImgIdx)
currImgIdx = i
displayImageByNumber(i)
} else if ev.Key() == int(core.Qt__Key_N) {
//widgets.QMessageBox_Information(nil, "N key", "N key hit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
i := nextPic(currImgIdx)
currImgIdx = i
displayImageByNumber(i)
} else if ev.Key() == int(core.Qt__Key_Q) {
mainApp.Quit()
}
}
displayArea.ConnectKeyPressEvent(arrowEventclosure)
return displayArea
}
func main() {
if len(os.Args) != 2 {
fmt.Printf("Usage : %s <image file>\n", os.Args[0])
os.Exit(0)
}
imageFileName = os.Args[1]
fmt.Println("Loading image : ", imageFileName)
mainApp = widgets.NewQApplication(len(os.Args), os.Args)
imageViewer().Show()
workingdir, _ := os.Getwd()
// populate the string slice of all picture filenames, and the index in this slice of the initial displayed image.
files, err := ioutil.ReadDir(workingdir)
if err != nil { // It seems that ReadDir itself stops when it gets an error of any kind, and I cannot change that.
log.Println(err, "so calling my own MyReadDir.")
files = MyReadDir(workingdir)
}
picfiles = make(sort.StringSlice, 0, len(files))
for _, f := range files {
if isPicFile(f.Name()) {
picfiles = append(picfiles, f.Name())
}
}
picfiles.Sort()
currImgIdx = picfiles.Search(imageFileName)
fmt.Println(" Current image index in the picfiles slice is", currImgIdx, "; there are", len(picfiles), "picture files in", workingdir)
origImgIdx = currImgIdx
widgets.QApplication_Exec()
// mainApp.exec() // also works.}
} // end main
// ------------------------------- MyReadDir -----------------------------------
func MyReadDir(dir string) []os.FileInfo {
dirname, err := os.Open(dir)
// dirname, err := os.OpenFile(dir, os.O_RDONLY,0777)
if err != nil {
return nil
}
defer dirname.Close()
names, err := dirname.Readdirnames(0) // zero means read all names into the returned []string
if err != nil {
return nil
}
fi := make([]os.FileInfo, 0, len(names))
for _, s := range names {
L, err := os.Lstat(s)
if err != nil {
log.Println(" Error from os.Lstat ", err)
continue
}
fi = append(fi, L)
}
return fi
} // MyReadDir
// ---------------------------- isPicFile ------------------------------
func isPicFile(filename string) bool {
picext := []string{".jpg", ".png", ".jpeg", ".gif", "xcf"}
for _, ext := range picext {
if strings.HasSuffix(filename, ext) {
return true
}
}
return false
}
// arrowEvent := gui.NewQKeyEvent(core.QEvent__KeyPress, int(core.Qt__Key_Up), core.Qt__NoModifier, "", false, 0)
// displayArea.ConnectKeyPressEvent(func(ev *gui.QKeyEvent) { This doesn't work.
// widgets.QMessageBox_Information(nil, "OK", "Up arrow key kit", widgets.QMessageBox__Ok, widgets.QMessageBox__Ok)
// })(arrowEvent)
// -------------------------- NextPic --------------------------------
func nextPic(i int) int {
j := i
if j < len(picfiles)-1 {
j++
}
//fmt.Println(" In NexPic. prevImgIdx=", prevImgIdx, ", and currImgIdx=", currImgIdx)
return j
}
// ------------------------- PrevPic -------------------------------
func prevPic(i int)(int) {
j := i
if j > 0 {
j--
}
//fmt.Println(" In NexPic. prevImgIdx=", prevImgIdx, ", and currImgIdx=", currImgIdx)
return j
}
// ------------------------- DisplayImageByNumber ----------------------
func displayImageByNumber(i int) {
currImgIdx = i
imageFileName = picfiles[currImgIdx]
fmt.Println(" in displayImageByNumber. currImgIdx=", currImgIdx, ", imageFileName=", imageFileName)
var pic = gui.NewQPixmap5(imageFileName, "", core.Qt__AutoColor)
scene.RemoveItem(item)
item = widgets.NewQGraphicsPixmapItem2(pic, nil)
width := pic.Width()
height := pic.Height()
var fwidth float64 = math.Trunc(float64(width) * 1.1)
var fheight float64 = math.Trunc(float64(height) * 1.1)
fmt.Printf(" displayImageByNumber %s is %d wide and %d high, goes to %g wide and %g high \n",
imageFileName, width, height, fwidth, fheight)
width1 := int(fwidth)
if fwidth < 300 {
width1 += 100
}
height1 := int(fheight)
if fheight < 300 {
height1 += 100
}
scene.AddItem(item)
//fmt.Printf(" displayImageByNumber %s is %d wide and %d high \n", imageFileName, width, height)
//displayArea.AdjustSize() didn't do anything
displayArea.Resize2(width1, height1) // slightly too small.
// displayArea.SetContentsMargins(0,0,width,height) Doen't do what I want.
//displayArea.Scroll(-width/2, -height/2) Doesn't do what I want, at all.
displayArea.Show()
} |
package main
// +build -tags netgo -a
import (
"bitbucket.org/strings/via/pkg"
"bytes"
"errors"
"flag"
"fmt"
"github.com/str1ngs/util"
"github.com/str1ngs/util/console/command"
"github.com/str1ngs/util/file"
"github.com/str1ngs/util/json"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
)
var (
root = flag.String("r", "/", "root directory")
verbose = flag.Bool("v", false, "verbose output")
finstall = flag.Bool("i", false, "install package after build")
fdebug = flag.Bool("d", false, "debug output")
config = via.GetConfig()
fclean = flag.Bool("c", false, "clean before build")
)
func main() {
flag.Parse()
via.Verbose(*verbose)
via.Root(*root)
util.Verbose = *verbose
via.Debug(*fdebug)
command.Add("add", add, "add plan/s to git index")
command.Add("checkout", checkout, "changes plan branch")
command.Add("cd", cd, "returns a bash evaluable cd path")
command.Add("diff", diff, "prints git diff for plan(s)")
command.Add("branch", branch, "prints plan branch to stdout")
command.Add("build", build, "build plan")
command.Add("clean", clean, "clean build dir")
command.Add("create", create, "create plan from URL")
command.Add("edit", edit, "calls EDITOR to edit plan")
command.Add("list", list, "lists files")
command.Add("install", install, "install package")
command.Add("lint", lint, "lint plans")
command.Add("log", plog, "print config log for plan")
command.Add("repo", repo, "update repo")
command.Add("owns", owns, "finds which package owns a file")
command.Add("sync", sync, "fetch remote repo data")
command.Add("search", search, "search for plans (currently lists all use grep)")
command.Add("pack", pack, "package plan")
command.Add("remove", remove, "remove package")
command.Add("show", fnShow, "prints plan to stdout")
command.Add("config", fnConfig, "prints config to stdout")
command.Add("elf", elf, "prints elf information to stdout")
if *fdebug {
path, _ := os.LookupEnv("PATH")
fmt.Println("PATH", path)
which("GCC", "gcc")
}
err := command.Run()
if err != nil {
log.Fatal(err)
}
return
}
func which(label, path string) {
fmt.Printf("GCC ")
cmd := exec.Command("which", path)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
log.Println(err)
}
}
func cd() error {
if len(command.Args()) < 1 {
return errors.New("you need to specify a config path")
}
arg := command.Args()[0]
switch arg {
case "plans":
fmt.Printf("cd %s", config.Plans)
default:
err := fmt.Sprintf("config path %s not found", arg)
return errors.New(err)
}
return nil
}
func add() error {
if len(command.Args()) < 1 {
return errors.New("no plans specified")
}
for _, arg := range command.Args() {
glob := filepath.Join(config.Plans, "*", arg+".json")
res, err := filepath.Glob(glob)
if err != nil {
return err
}
git := exec.Command("git", "add", strings.Join(res, " "))
git.Dir = config.Plans
git.Stdout = os.Stdout
git.Stderr = os.Stderr
err = git.Run()
if err != nil {
return err
}
}
return nil
}
func diff() error {
if len(command.Args()) < 1 {
return errors.New("no plans specified")
}
for _, arg := range command.Args() {
glob := filepath.Join(config.Plans, "*", arg+".json")
res, err := filepath.Glob(glob)
if err != nil {
return err
}
git := exec.Command("git", "diff", strings.Join(res, " "))
git.Dir = config.Plans
git.Stdout = os.Stdout
git.Stderr = os.Stderr
err = git.Run()
if err != nil {
return err
}
}
return nil
}
func checkout() error {
if len(command.Args()) < 1 {
return errors.New("git branch needs to be specified")
}
arg := command.Args()[0]
git := exec.Command("git", "checkout", arg)
git.Dir = config.Plans
git.Stdout = os.Stdout
git.Stderr = os.Stderr
return git.Run()
}
func branch() error {
git := exec.Command("git", "branch")
git.Dir = config.Plans
git.Stdout = os.Stdout
git.Stderr = os.Stderr
return git.Run()
}
func edit() error {
var (
editor = os.Getenv("EDITOR")
arg0 = command.Args()[0]
p = path.Join(config.Plans, "config.json")
err error
)
if arg0 != "config" {
p, err = via.FindPlanPath(arg0)
if err != nil {
return err
}
}
cmd := exec.Command(editor, p)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func create() error {
for _, arg := range command.Args() {
err := via.Create(arg)
if err != nil {
return err
}
}
return nil
}
func plog() error {
for _, arg := range command.Args() {
plan, err := via.NewPlan(arg)
if err != nil {
return err
}
f := path.Join(plan.BuildDir(), "config.log")
err = file.Cat(os.Stdout, f)
if err != nil {
log.Fatal(err)
}
}
return nil
}
func build() error {
for _, arg := range command.Args() {
if *fclean {
via.Clean(arg)
}
plan, err := via.NewPlan(arg)
if err != nil {
return err
}
err = via.BuildSteps(plan)
if err != nil {
return err
}
if *finstall {
err := via.Install(plan.Name)
if err != nil {
return err
}
}
}
return nil
}
func pack() error {
for _, arg := range command.Args() {
plan, err := via.NewPlan(arg)
if err != nil {
return err
}
err = via.Package("", plan)
if err != nil {
return err
}
}
return nil
}
func list() error {
for _, arg := range command.Args() {
plan, err := via.NewPlan(arg)
if err != nil {
return err
}
for _, f := range plan.Files {
fmt.Println(f)
}
}
return nil
}
func install() error {
return command.ArgsDo(via.Install)
}
func remove() error {
return command.ArgsDo(via.Remove)
}
func lint() error {
return via.Lint()
}
func fnShow() error {
for _, arg := range command.Args() {
plan, err := via.NewPlan(arg)
if err != nil {
log.Fatal(err)
}
buf := new(bytes.Buffer)
less := exec.Command("less")
less.Stdin = buf
less.Stdout = os.Stdout
less.Stderr = os.Stderr
err = json.WritePretty(&plan, buf)
if err != nil {
fmt.Println(err)
}
less.Run()
}
return nil
}
func fnConfig() error {
err := json.WritePretty(via.GetConfig(), os.Stdout)
if err != nil {
return err
}
return nil
}
func clean() error {
return command.ArgsDo(via.Clean)
}
func elf() error {
for _, arg := range command.Args() {
err := via.Readelf(arg)
if err != nil {
return err
}
}
return nil
}
func sync() error {
return via.PlanSync()
}
func owns() error {
rfiles, err := via.ReadRepoFiles()
if err != nil {
return err
}
for _, arg := range command.Args() {
owner := rfiles.Owns(arg)
if owner == "" {
fmt.Println(arg+":", "owner not found.")
continue
}
fmt.Println(owner)
}
return nil
}
func repo() error {
return via.RepoCreate()
}
func search() error {
plans, err := via.GetPlans()
if err != nil {
return err
}
plans.SortSize().Print()
return nil
}
sort commands
package main
// +build -tags netgo -a
import (
"bitbucket.org/strings/via/pkg"
"bytes"
"errors"
"flag"
"fmt"
"github.com/str1ngs/util"
"github.com/str1ngs/util/console/command"
"github.com/str1ngs/util/file"
"github.com/str1ngs/util/json"
"io"
"log"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
)
var (
root = flag.String("r", "/", "root directory")
verbose = flag.Bool("v", false, "verbose output")
finstall = flag.Bool("i", false, "install package after build")
fdebug = flag.Bool("d", false, "debug output")
config = via.GetConfig()
fclean = flag.Bool("c", false, "clean before build")
)
func main() {
flag.Parse()
via.Verbose(*verbose)
via.Root(*root)
util.Verbose = *verbose
via.Debug(*fdebug)
command.Add("add", add, "add plan/s to git index")
command.Add("branch", branch, "prints plan branch to stdout")
command.Add("build", build, "build plan")
command.Add("cd", cd, "returns a bash evaluable cd path")
command.Add("checkout", checkout, "changes plan branch")
command.Add("clean", clean, "clean build dir")
command.Add("config", fnConfig, "prints config to stdout")
command.Add("create", create, "create plan from URL")
command.Add("diff", diff, "prints git diff for plan(s)")
command.Add("edit", edit, "calls EDITOR to edit plan")
command.Add("elf", elf, "prints elf information to stdout")
command.Add("install", install, "install package")
command.Add("lint", lint, "lint plans")
command.Add("list", list, "lists files")
command.Add("log", plog, "print config log for plan")
command.Add("ipfs", ipfs, "test ipfs connection")
command.Add("owns", owns, "finds which package owns a file")
command.Add("pack", pack, "package plan")
command.Add("remove", remove, "remove package")
command.Add("repo", repo, "update repo")
command.Add("search", search, "search for plans (currently lists all use grep)")
command.Add("show", fnShow, "prints plan to stdout")
command.Add("sync", sync, "fetch remote repo data")
if *fdebug {
path, _ := os.LookupEnv("PATH")
fmt.Println("PATH", path)
which("GCC", "gcc")
}
err := command.Run()
if err != nil {
log.Fatal(err)
}
return
}
func which(label, path string) {
fmt.Printf("GCC ")
cmd := exec.Command("which", path)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
log.Println(err)
}
}
func ipfs() error {
res, err := http.Get(config.Binary)
io.Copy(os.Stdout, res.Body)
return err
}
func cd() error {
if len(command.Args()) < 1 {
return errors.New("you need to specify a config path")
}
arg := command.Args()[0]
switch arg {
case "plans":
fmt.Printf("cd %s", config.Plans)
default:
err := fmt.Sprintf("config path %s not found", arg)
return errors.New(err)
}
return nil
}
func add() error {
if len(command.Args()) < 1 {
return errors.New("no plans specified")
}
for _, arg := range command.Args() {
glob := filepath.Join(config.Plans, "*", arg+".json")
res, err := filepath.Glob(glob)
if err != nil {
return err
}
git := exec.Command("git", "add", strings.Join(res, " "))
git.Dir = config.Plans
git.Stdout = os.Stdout
git.Stderr = os.Stderr
err = git.Run()
if err != nil {
return err
}
}
return nil
}
func diff() error {
if len(command.Args()) < 1 {
return errors.New("no plans specified")
}
for _, arg := range command.Args() {
glob := filepath.Join(config.Plans, "*", arg+".json")
res, err := filepath.Glob(glob)
if err != nil {
return err
}
git := exec.Command("git", "diff", strings.Join(res, " "))
git.Dir = config.Plans
git.Stdout = os.Stdout
git.Stderr = os.Stderr
err = git.Run()
if err != nil {
return err
}
}
return nil
}
func checkout() error {
if len(command.Args()) < 1 {
return errors.New("git branch needs to be specified")
}
arg := command.Args()[0]
git := exec.Command("git", "checkout", arg)
git.Dir = config.Plans
git.Stdout = os.Stdout
git.Stderr = os.Stderr
return git.Run()
}
func branch() error {
git := exec.Command("git", "branch")
git.Dir = config.Plans
git.Stdout = os.Stdout
git.Stderr = os.Stderr
return git.Run()
}
func edit() error {
var (
editor = os.Getenv("EDITOR")
arg0 = command.Args()[0]
p = path.Join(config.Plans, "config.json")
err error
)
if arg0 != "config" {
p, err = via.FindPlanPath(arg0)
if err != nil {
return err
}
}
cmd := exec.Command(editor, p)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func create() error {
for _, arg := range command.Args() {
err := via.Create(arg)
if err != nil {
return err
}
}
return nil
}
func plog() error {
for _, arg := range command.Args() {
plan, err := via.NewPlan(arg)
if err != nil {
return err
}
f := path.Join(plan.BuildDir(), "config.log")
err = file.Cat(os.Stdout, f)
if err != nil {
log.Fatal(err)
}
}
return nil
}
func build() error {
for _, arg := range command.Args() {
if *fclean {
via.Clean(arg)
}
plan, err := via.NewPlan(arg)
if err != nil {
return err
}
err = via.BuildSteps(plan)
if err != nil {
return err
}
if *finstall {
err := via.Install(plan.Name)
if err != nil {
return err
}
}
}
return nil
}
func pack() error {
for _, arg := range command.Args() {
plan, err := via.NewPlan(arg)
if err != nil {
return err
}
err = via.Package("", plan)
if err != nil {
return err
}
}
return nil
}
func list() error {
for _, arg := range command.Args() {
plan, err := via.NewPlan(arg)
if err != nil {
return err
}
for _, f := range plan.Files {
fmt.Println(f)
}
}
return nil
}
func install() error {
return command.ArgsDo(via.Install)
}
func remove() error {
return command.ArgsDo(via.Remove)
}
func lint() error {
return via.Lint()
}
func fnShow() error {
for _, arg := range command.Args() {
plan, err := via.NewPlan(arg)
if err != nil {
log.Fatal(err)
}
buf := new(bytes.Buffer)
less := exec.Command("less")
less.Stdin = buf
less.Stdout = os.Stdout
less.Stderr = os.Stderr
err = json.WritePretty(&plan, buf)
if err != nil {
fmt.Println(err)
}
less.Run()
}
return nil
}
func fnConfig() error {
err := json.WritePretty(via.GetConfig(), os.Stdout)
if err != nil {
return err
}
return nil
}
func clean() error {
return command.ArgsDo(via.Clean)
}
func elf() error {
for _, arg := range command.Args() {
err := via.Readelf(arg)
if err != nil {
return err
}
}
return nil
}
func sync() error {
return via.PlanSync()
}
func owns() error {
rfiles, err := via.ReadRepoFiles()
if err != nil {
return err
}
for _, arg := range command.Args() {
owner := rfiles.Owns(arg)
if owner == "" {
fmt.Println(arg+":", "owner not found.")
continue
}
fmt.Println(owner)
}
return nil
}
func repo() error {
return via.RepoCreate()
}
func search() error {
plans, err := via.GetPlans()
if err != nil {
return err
}
plans.SortSize().Print()
return nil
}
|
package util
import (
"fmt"
suggestionsv1alpha3 "github.com/kubeflow/katib/pkg/apis/controller/suggestions/v1alpha3"
"github.com/kubeflow/katib/pkg/controller.v1alpha3/consts"
)
func GetAlgorithmDeploymentName(s *suggestionsv1alpha3.Suggestion) string {
return s.Name + "-" + s.Spec.AlgorithmName
}
func GetAlgorithmServiceName(s *suggestionsv1alpha3.Suggestion) string {
return s.Name + "-" + s.Spec.AlgorithmName
}
func GetAlgorithmEndpoint(s *suggestionsv1alpha3.Suggestion) string {
serviceName := GetAlgorithmServiceName(s)
return fmt.Sprintf("%s:%d", serviceName, consts.DefaultSuggestionPort)
}
fix: Support multiple namespaces (#826)
Signed-off-by: Ce Gao <1d82c7e569a19de560a5cb3a123e2745e610ad0f@caicloud.io>
package util
import (
"fmt"
suggestionsv1alpha3 "github.com/kubeflow/katib/pkg/apis/controller/suggestions/v1alpha3"
"github.com/kubeflow/katib/pkg/controller.v1alpha3/consts"
)
func GetAlgorithmDeploymentName(s *suggestionsv1alpha3.Suggestion) string {
return s.Name + "-" + s.Spec.AlgorithmName
}
func GetAlgorithmServiceName(s *suggestionsv1alpha3.Suggestion) string {
return s.Name + "-" + s.Spec.AlgorithmName
}
// GetAlgorithmEndpoint returns the endpoint of the algorithm service.
func GetAlgorithmEndpoint(s *suggestionsv1alpha3.Suggestion) string {
serviceName := GetAlgorithmServiceName(s)
return fmt.Sprintf("%s.%s:%d",
serviceName,
s.Namespace,
consts.DefaultSuggestionPort)
}
|
package reconciler
import (
"context"
"fmt"
"time"
controllerclient "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/controller-runtime/client"
"github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
CatalogSourceUpdateKey = "catalogsource.operators.coreos.com/update"
CatalogPollingRequeuePeriod = 30 * time.Second
)
// grpcCatalogSourceDecorator wraps CatalogSource to add additional methods
type grpcCatalogSourceDecorator struct {
*v1alpha1.CatalogSource
}
type UpdateNotReadyErr struct {
catalogName string
podName string
}
func (u UpdateNotReadyErr) Error() string {
return fmt.Sprintf("catalog polling: %s not ready for update: update pod %s has not yet reported ready", u.catalogName, u.podName)
}
func (s *grpcCatalogSourceDecorator) Selector() labels.Selector {
return labels.SelectorFromValidatedSet(map[string]string{
CatalogSourceLabelKey: s.GetName(),
})
}
func (s *grpcCatalogSourceDecorator) SelectorForUpdate() labels.Selector {
return labels.SelectorFromValidatedSet(map[string]string{
CatalogSourceUpdateKey: s.GetName(),
})
}
func (s *grpcCatalogSourceDecorator) Labels() map[string]string {
return map[string]string{
CatalogSourceLabelKey: s.GetName(),
}
}
func (s *grpcCatalogSourceDecorator) Service() *v1.Service {
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: s.GetName(),
Namespace: s.GetNamespace(),
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
{
Name: "grpc",
Port: 50051,
TargetPort: intstr.FromInt(50051),
},
},
Selector: s.Labels(),
},
}
ownerutil.AddOwner(svc, s.CatalogSource, false, false)
return svc
}
func (s *grpcCatalogSourceDecorator) Pod() *v1.Pod {
pod := Pod(s.CatalogSource, "registry-server", s.Spec.Image, s.Labels(), 5, 10)
ownerutil.AddOwner(pod, s.CatalogSource, false, false)
return pod
}
type GrpcRegistryReconciler struct {
now nowFunc
Lister operatorlister.OperatorLister
OpClient operatorclient.ClientInterface
SSAClient *controllerclient.ServerSideApplier
}
var _ RegistryReconciler = &GrpcRegistryReconciler{}
func (c *GrpcRegistryReconciler) currentService(source grpcCatalogSourceDecorator) *v1.Service {
serviceName := source.Service().GetName()
service, err := c.Lister.CoreV1().ServiceLister().Services(source.GetNamespace()).Get(serviceName)
if err != nil {
logrus.WithField("service", serviceName).Warn("couldn't find service in cache")
return nil
}
return service
}
func (c *GrpcRegistryReconciler) currentPods(source grpcCatalogSourceDecorator) []*v1.Pod {
pods, err := c.Lister.CoreV1().PodLister().Pods(source.GetNamespace()).List(source.Selector())
if err != nil {
logrus.WithError(err).Warn("couldn't find pod in cache")
return nil
}
if len(pods) > 1 {
logrus.WithField("selector", source.Selector()).Warn("multiple pods found for selector")
}
return pods
}
func (c *GrpcRegistryReconciler) currentUpdatePods(source grpcCatalogSourceDecorator) []*v1.Pod {
pods, err := c.Lister.CoreV1().PodLister().Pods(source.GetNamespace()).List(source.SelectorForUpdate())
if err != nil {
logrus.WithError(err).Warn("couldn't find pod in cache")
return nil
}
if len(pods) > 1 {
logrus.WithField("selector", source.Selector()).Warn("multiple pods found for selector")
}
return pods
}
func (c *GrpcRegistryReconciler) currentPodsWithCorrectImage(source grpcCatalogSourceDecorator) []*v1.Pod {
pods, err := c.Lister.CoreV1().PodLister().Pods(source.GetNamespace()).List(labels.SelectorFromValidatedSet(source.Labels()))
if err != nil {
logrus.WithError(err).Warn("couldn't find pod in cache")
return nil
}
found := []*v1.Pod{}
for _, p := range pods {
if p.Spec.Containers[0].Image == source.Spec.Image {
found = append(found, p)
}
}
return found
}
// EnsureRegistryServer ensures that all components of registry server are up to date.
func (c *GrpcRegistryReconciler) EnsureRegistryServer(catalogSource *v1alpha1.CatalogSource) error {
source := grpcCatalogSourceDecorator{catalogSource}
// if service status is nil, we force create every object to ensure they're created the first time
overwrite := source.Status.RegistryServiceStatus == nil
// recreate the pod if no existing pod is serving the latest image
overwritePod := overwrite || len(c.currentPodsWithCorrectImage(source)) == 0
//TODO: if any of these error out, we should write a status back (possibly set RegistryServiceStatus to nil so they get recreated)
if err := c.ensurePod(source, overwritePod); err != nil {
return errors.Wrapf(err, "error ensuring pod: %s", source.Pod().GetName())
}
if err := c.ensureUpdatePod(source); err != nil {
if _, ok := err.(UpdateNotReadyErr); ok {
return err
}
return errors.Wrapf(err, "error ensuring updated catalog source pod: %s", source.Pod().GetName())
}
if err := c.ensureService(source, overwrite); err != nil {
return errors.Wrapf(err, "error ensuring service: %s", source.Service().GetName())
}
if overwritePod {
now := c.now()
catalogSource.Status.RegistryServiceStatus = &v1alpha1.RegistryServiceStatus{
CreatedAt: now,
Protocol: "grpc",
ServiceName: source.Service().GetName(),
ServiceNamespace: source.GetNamespace(),
Port: fmt.Sprintf("%d", source.Service().Spec.Ports[0].Port),
}
}
return nil
}
func (c *GrpcRegistryReconciler) ensurePod(source grpcCatalogSourceDecorator, overwrite bool) error {
// currentLivePods refers to the currently live instances of the catalog source
currentLivePods := c.currentPods(source)
if len(currentLivePods) > 0 {
if !overwrite {
return nil
}
for _, p := range currentLivePods {
if err := c.OpClient.KubernetesInterface().CoreV1().Pods(source.GetNamespace()).Delete(context.TODO(), p.GetName(), *metav1.NewDeleteOptions(0)); err != nil {
return errors.Wrapf(err, "error deleting old pod: %s", p.GetName())
}
}
}
_, err := c.OpClient.KubernetesInterface().CoreV1().Pods(source.GetNamespace()).Create(context.TODO(), source.Pod(), metav1.CreateOptions{})
if err != nil {
return errors.Wrapf(err, "error creating new pod: %s", source.Pod().GetGenerateName())
}
return nil
}
// ensureUpdatePod checks that for the same catalog source version the same container imageID is running
func (c *GrpcRegistryReconciler) ensureUpdatePod(source grpcCatalogSourceDecorator) error {
if !source.Poll() {
return nil
}
currentLivePods := c.currentPods(source)
currentUpdatePods := c.currentUpdatePods(source)
if source.Update() && len(currentUpdatePods) == 0 {
logrus.WithField("CatalogSource", source.GetName()).Infof("catalog update required at %s", time.Now().String())
pod, err := c.createUpdatePod(source)
if err != nil {
return errors.Wrapf(err, "creating update catalog source pod")
}
source.SetLastUpdateTime()
return UpdateNotReadyErr{catalogName: source.GetName(), podName: pod.GetName()}
}
// check if update pod is ready - if not requeue the sync
for _, p := range currentUpdatePods {
if !podReady(p) {
return UpdateNotReadyErr{catalogName: source.GetName(), podName: p.GetName()}
}
}
for _, updatePod := range currentUpdatePods {
// if container imageID IDs are different, switch the serving pods
if imageChanged(updatePod, currentLivePods) {
err := c.promoteCatalog(updatePod, source.GetName())
if err != nil {
return fmt.Errorf("detected imageID change: error during update: %s", err)
}
// remove old catalog source pod
err = c.removePods(currentLivePods, source.GetNamespace())
if err != nil {
return errors.Wrapf(err, "detected imageID change: error deleting old catalog source pod")
}
// done syncing
logrus.WithField("CatalogSource", source.GetName()).Infof("detected imageID change: catalogsource pod updated at %s", time.Now().String())
return nil
}
// delete update pod right away, since the digest match, to prevent long-lived duplicate catalog pods
logrus.WithField("CatalogSource", source.GetName()).Info("catalog polling result: no update")
err := c.removePods([]*corev1.Pod{updatePod}, source.GetNamespace())
if err != nil {
return errors.Wrapf(err, "error deleting duplicate catalog polling pod: %s", updatePod.GetName())
}
}
return nil
}
func (c *GrpcRegistryReconciler) ensureService(source grpcCatalogSourceDecorator, overwrite bool) error {
service := source.Service()
if c.currentService(source) != nil {
if !overwrite {
return nil
}
if err := c.OpClient.DeleteService(service.GetNamespace(), service.GetName(), metav1.NewDeleteOptions(0)); err != nil {
return err
}
}
_, err := c.OpClient.CreateService(service)
return err
}
// createUpdatePod is an internal method that creates a pod using the latest catalog source.
func (c *GrpcRegistryReconciler) createUpdatePod(source grpcCatalogSourceDecorator) (*corev1.Pod, error) {
// remove label from pod to ensure service does not accidentally route traffic to the pod
p := source.Pod()
p = swapLabels(p, "", source.Name)
pod, err := c.OpClient.KubernetesInterface().CoreV1().Pods(source.GetNamespace()).Create(context.TODO(), p, metav1.CreateOptions{})
if err != nil {
logrus.WithField("pod", source.Pod().GetName()).Warn("couldn't create new catalogsource pod")
return nil, err
}
return pod, nil
}
// checkUpdatePodDigest checks update pod to get Image ID and see if it matches the serving (live) pod ImageID
func imageChanged(updatePod *corev1.Pod, servingPods []*corev1.Pod) bool {
updatedCatalogSourcePodImageID := imageID(updatePod)
for _, servingPod := range servingPods {
servingCatalogSourcePodImageID := imageID(servingPod)
if updatedCatalogSourcePodImageID != servingCatalogSourcePodImageID {
logrus.WithField("CatalogSource", servingPod.GetName()).Infof("catalog image changed: serving pod %s update pod %s", servingCatalogSourcePodImageID, updatedCatalogSourcePodImageID)
return true
}
}
return false
}
// imageID returns the ImageID of the primary catalog source container.
// Note: the pod must be running and the container in a ready status to return a valid ImageID.
func imageID(pod *corev1.Pod) string {
return pod.Status.ContainerStatuses[0].ImageID
}
func (c *GrpcRegistryReconciler) removePods(pods []*corev1.Pod, namespace string) error {
for _, p := range pods {
err := c.OpClient.KubernetesInterface().CoreV1().Pods(namespace).Delete(context.TODO(), p.GetName(), *metav1.NewDeleteOptions(0))
if err != nil {
return errors.Wrapf(err, "error deleting pod: %s", p.GetName())
}
}
return nil
}
// CheckRegistryServer returns true if the given CatalogSource is considered healthy; false otherwise.
func (c *GrpcRegistryReconciler) CheckRegistryServer(catalogSource *v1alpha1.CatalogSource) (healthy bool, err error) {
source := grpcCatalogSourceDecorator{catalogSource}
// Check on registry resources
// TODO: add gRPC health check
if len(c.currentPodsWithCorrectImage(source)) < 1 ||
c.currentService(source) == nil {
healthy = false
return
}
healthy = true
return
}
// promoteCatalog swaps the labels on the update pod so that the update pod is now reachable by the catalog service.
// By updating the catalog on cluster it promotes the update pod to act as the new version of the catalog on-cluster.
func (c *GrpcRegistryReconciler) promoteCatalog(updatePod *corev1.Pod, key string) error {
// Update the update pod to promote it to serving pod via the SSA client
err := c.SSAClient.Apply(context.TODO(), updatePod, func(p *v1.Pod) error {
p.Labels[CatalogSourceLabelKey] = key
p.Labels[CatalogSourceUpdateKey] = ""
return nil
})()
return err
}
// podReady returns true if the given Pod has a ready status condition.
func podReady(pod *corev1.Pod) bool {
if pod.Status.Conditions == nil {
return false
}
for _, cond := range pod.Status.Conditions {
if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue {
return true
}
}
return false
}
func swapLabels(pod *corev1.Pod, labelKey, updateKey string) *corev1.Pod {
pod.Labels[CatalogSourceLabelKey] = labelKey
pod.Labels[CatalogSourceUpdateKey] = updateKey
return pod
}
fix: remove catalog update pod if it's in a failed state to prevent
cycles in the catalog reconciler.
package reconciler
import (
"context"
"fmt"
"time"
controllerclient "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/controller-runtime/client"
"github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
CatalogSourceUpdateKey = "catalogsource.operators.coreos.com/update"
CatalogPollingRequeuePeriod = 30 * time.Second
)
// grpcCatalogSourceDecorator wraps CatalogSource to add additional methods
type grpcCatalogSourceDecorator struct {
*v1alpha1.CatalogSource
}
type UpdateNotReadyErr struct {
catalogName string
podName string
}
func (u UpdateNotReadyErr) Error() string {
return fmt.Sprintf("catalog polling: %s not ready for update: update pod %s has not yet reported ready", u.catalogName, u.podName)
}
func (s *grpcCatalogSourceDecorator) Selector() labels.Selector {
return labels.SelectorFromValidatedSet(map[string]string{
CatalogSourceLabelKey: s.GetName(),
})
}
func (s *grpcCatalogSourceDecorator) SelectorForUpdate() labels.Selector {
return labels.SelectorFromValidatedSet(map[string]string{
CatalogSourceUpdateKey: s.GetName(),
})
}
func (s *grpcCatalogSourceDecorator) Labels() map[string]string {
return map[string]string{
CatalogSourceLabelKey: s.GetName(),
}
}
func (s *grpcCatalogSourceDecorator) Service() *v1.Service {
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: s.GetName(),
Namespace: s.GetNamespace(),
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
{
Name: "grpc",
Port: 50051,
TargetPort: intstr.FromInt(50051),
},
},
Selector: s.Labels(),
},
}
ownerutil.AddOwner(svc, s.CatalogSource, false, false)
return svc
}
func (s *grpcCatalogSourceDecorator) Pod() *v1.Pod {
pod := Pod(s.CatalogSource, "registry-server", s.Spec.Image, s.Labels(), 5, 10)
ownerutil.AddOwner(pod, s.CatalogSource, false, false)
return pod
}
type GrpcRegistryReconciler struct {
now nowFunc
Lister operatorlister.OperatorLister
OpClient operatorclient.ClientInterface
SSAClient *controllerclient.ServerSideApplier
}
var _ RegistryReconciler = &GrpcRegistryReconciler{}
func (c *GrpcRegistryReconciler) currentService(source grpcCatalogSourceDecorator) *v1.Service {
serviceName := source.Service().GetName()
service, err := c.Lister.CoreV1().ServiceLister().Services(source.GetNamespace()).Get(serviceName)
if err != nil {
logrus.WithField("service", serviceName).Warn("couldn't find service in cache")
return nil
}
return service
}
func (c *GrpcRegistryReconciler) currentPods(source grpcCatalogSourceDecorator) []*v1.Pod {
pods, err := c.Lister.CoreV1().PodLister().Pods(source.GetNamespace()).List(source.Selector())
if err != nil {
logrus.WithError(err).Warn("couldn't find pod in cache")
return nil
}
if len(pods) > 1 {
logrus.WithField("selector", source.Selector()).Warn("multiple pods found for selector")
}
return pods
}
func (c *GrpcRegistryReconciler) currentUpdatePods(source grpcCatalogSourceDecorator) []*v1.Pod {
pods, err := c.Lister.CoreV1().PodLister().Pods(source.GetNamespace()).List(source.SelectorForUpdate())
if err != nil {
logrus.WithError(err).Warn("couldn't find pod in cache")
return nil
}
if len(pods) > 1 {
logrus.WithField("selector", source.Selector()).Warn("multiple pods found for selector")
}
return pods
}
func (c *GrpcRegistryReconciler) currentPodsWithCorrectImage(source grpcCatalogSourceDecorator) []*v1.Pod {
pods, err := c.Lister.CoreV1().PodLister().Pods(source.GetNamespace()).List(labels.SelectorFromValidatedSet(source.Labels()))
if err != nil {
logrus.WithError(err).Warn("couldn't find pod in cache")
return nil
}
found := []*v1.Pod{}
for _, p := range pods {
if p.Spec.Containers[0].Image == source.Spec.Image {
found = append(found, p)
}
}
return found
}
// EnsureRegistryServer ensures that all components of registry server are up to date.
func (c *GrpcRegistryReconciler) EnsureRegistryServer(catalogSource *v1alpha1.CatalogSource) error {
source := grpcCatalogSourceDecorator{catalogSource}
// if service status is nil, we force create every object to ensure they're created the first time
overwrite := source.Status.RegistryServiceStatus == nil
// recreate the pod if no existing pod is serving the latest image
overwritePod := overwrite || len(c.currentPodsWithCorrectImage(source)) == 0
//TODO: if any of these error out, we should write a status back (possibly set RegistryServiceStatus to nil so they get recreated)
if err := c.ensurePod(source, overwritePod); err != nil {
return errors.Wrapf(err, "error ensuring pod: %s", source.Pod().GetName())
}
if err := c.ensureUpdatePod(source); err != nil {
if _, ok := err.(UpdateNotReadyErr); ok {
return err
}
return errors.Wrapf(err, "error ensuring updated catalog source pod: %s", source.Pod().GetName())
}
if err := c.ensureService(source, overwrite); err != nil {
return errors.Wrapf(err, "error ensuring service: %s", source.Service().GetName())
}
if overwritePod {
now := c.now()
catalogSource.Status.RegistryServiceStatus = &v1alpha1.RegistryServiceStatus{
CreatedAt: now,
Protocol: "grpc",
ServiceName: source.Service().GetName(),
ServiceNamespace: source.GetNamespace(),
Port: fmt.Sprintf("%d", source.Service().Spec.Ports[0].Port),
}
}
return nil
}
func (c *GrpcRegistryReconciler) ensurePod(source grpcCatalogSourceDecorator, overwrite bool) error {
// currentLivePods refers to the currently live instances of the catalog source
currentLivePods := c.currentPods(source)
if len(currentLivePods) > 0 {
if !overwrite {
return nil
}
for _, p := range currentLivePods {
if err := c.OpClient.KubernetesInterface().CoreV1().Pods(source.GetNamespace()).Delete(context.TODO(), p.GetName(), *metav1.NewDeleteOptions(0)); err != nil {
return errors.Wrapf(err, "error deleting old pod: %s", p.GetName())
}
}
}
_, err := c.OpClient.KubernetesInterface().CoreV1().Pods(source.GetNamespace()).Create(context.TODO(), source.Pod(), metav1.CreateOptions{})
if err != nil {
return errors.Wrapf(err, "error creating new pod: %s", source.Pod().GetGenerateName())
}
return nil
}
// ensureUpdatePod checks that for the same catalog source version the same container imageID is running
func (c *GrpcRegistryReconciler) ensureUpdatePod(source grpcCatalogSourceDecorator) error {
if !source.Poll() {
return nil
}
currentLivePods := c.currentPods(source)
currentUpdatePods := c.currentUpdatePods(source)
if source.Update() && len(currentUpdatePods) == 0 {
logrus.WithField("CatalogSource", source.GetName()).Infof("catalog update required at %s", time.Now().String())
pod, err := c.createUpdatePod(source)
if err != nil {
return errors.Wrapf(err, "creating update catalog source pod")
}
source.SetLastUpdateTime()
return UpdateNotReadyErr{catalogName: source.GetName(), podName: pod.GetName()}
}
// check if update pod is ready - if not requeue the sync
// if update pod failed (potentially due to a bad catalog image) delete it
for _, p := range currentUpdatePods {
fail, err := c.podFailed(p)
if err != nil {
return err
}
if fail {
return fmt.Errorf("update pod %s in a %s state: deleted update pod", p.GetName(), p.Status.Phase)
}
if !podReady(p) {
return UpdateNotReadyErr{catalogName: source.GetName(), podName: p.GetName()}
}
}
for _, updatePod := range currentUpdatePods {
// if container imageID IDs are different, switch the serving pods
if imageChanged(updatePod, currentLivePods) {
err := c.promoteCatalog(updatePod, source.GetName())
if err != nil {
return fmt.Errorf("detected imageID change: error during update: %s", err)
}
// remove old catalog source pod
err = c.removePods(currentLivePods, source.GetNamespace())
if err != nil {
return errors.Wrapf(err, "detected imageID change: error deleting old catalog source pod")
}
// done syncing
logrus.WithField("CatalogSource", source.GetName()).Infof("detected imageID change: catalogsource pod updated at %s", time.Now().String())
return nil
}
// delete update pod right away, since the digest match, to prevent long-lived duplicate catalog pods
logrus.WithField("CatalogSource", source.GetName()).Info("catalog polling result: no update")
err := c.removePods([]*corev1.Pod{updatePod}, source.GetNamespace())
if err != nil {
return errors.Wrapf(err, "error deleting duplicate catalog polling pod: %s", updatePod.GetName())
}
}
return nil
}
func (c *GrpcRegistryReconciler) ensureService(source grpcCatalogSourceDecorator, overwrite bool) error {
service := source.Service()
if c.currentService(source) != nil {
if !overwrite {
return nil
}
if err := c.OpClient.DeleteService(service.GetNamespace(), service.GetName(), metav1.NewDeleteOptions(0)); err != nil {
return err
}
}
_, err := c.OpClient.CreateService(service)
return err
}
// createUpdatePod is an internal method that creates a pod using the latest catalog source.
func (c *GrpcRegistryReconciler) createUpdatePod(source grpcCatalogSourceDecorator) (*corev1.Pod, error) {
// remove label from pod to ensure service does not accidentally route traffic to the pod
p := source.Pod()
p = swapLabels(p, "", source.Name)
pod, err := c.OpClient.KubernetesInterface().CoreV1().Pods(source.GetNamespace()).Create(context.TODO(), p, metav1.CreateOptions{})
if err != nil {
logrus.WithField("pod", source.Pod().GetName()).Warn("couldn't create new catalogsource pod")
return nil, err
}
return pod, nil
}
// checkUpdatePodDigest checks update pod to get Image ID and see if it matches the serving (live) pod ImageID
func imageChanged(updatePod *corev1.Pod, servingPods []*corev1.Pod) bool {
updatedCatalogSourcePodImageID := imageID(updatePod)
for _, servingPod := range servingPods {
servingCatalogSourcePodImageID := imageID(servingPod)
if updatedCatalogSourcePodImageID != servingCatalogSourcePodImageID {
logrus.WithField("CatalogSource", servingPod.GetName()).Infof("catalog image changed: serving pod %s update pod %s", servingCatalogSourcePodImageID, updatedCatalogSourcePodImageID)
return true
}
}
return false
}
// imageID returns the ImageID of the primary catalog source container.
// Note: the pod must be running and the container in a ready status to return a valid ImageID.
func imageID(pod *corev1.Pod) string {
return pod.Status.ContainerStatuses[0].ImageID
}
func (c *GrpcRegistryReconciler) removePods(pods []*corev1.Pod, namespace string) error {
for _, p := range pods {
err := c.OpClient.KubernetesInterface().CoreV1().Pods(namespace).Delete(context.TODO(), p.GetName(), *metav1.NewDeleteOptions(0))
if err != nil {
return errors.Wrapf(err, "error deleting pod: %s", p.GetName())
}
}
return nil
}
// CheckRegistryServer returns true if the given CatalogSource is considered healthy; false otherwise.
func (c *GrpcRegistryReconciler) CheckRegistryServer(catalogSource *v1alpha1.CatalogSource) (healthy bool, err error) {
source := grpcCatalogSourceDecorator{catalogSource}
// Check on registry resources
// TODO: add gRPC health check
if len(c.currentPodsWithCorrectImage(source)) < 1 ||
c.currentService(source) == nil {
healthy = false
return
}
healthy = true
return
}
// promoteCatalog swaps the labels on the update pod so that the update pod is now reachable by the catalog service.
// By updating the catalog on cluster it promotes the update pod to act as the new version of the catalog on-cluster.
func (c *GrpcRegistryReconciler) promoteCatalog(updatePod *corev1.Pod, key string) error {
// Update the update pod to promote it to serving pod via the SSA client
err := c.SSAClient.Apply(context.TODO(), updatePod, func(p *v1.Pod) error {
p.Labels[CatalogSourceLabelKey] = key
p.Labels[CatalogSourceUpdateKey] = ""
return nil
})()
return err
}
// podReady returns true if the given Pod has a ready status condition.
func podReady(pod *corev1.Pod) bool {
if pod.Status.Conditions == nil {
return false
}
for _, cond := range pod.Status.Conditions {
if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue {
return true
}
}
return false
}
func swapLabels(pod *corev1.Pod, labelKey, updateKey string) *corev1.Pod {
pod.Labels[CatalogSourceLabelKey] = labelKey
pod.Labels[CatalogSourceUpdateKey] = updateKey
return pod
}
// podFailed checks whether the pod status is in a failed or unknown state, and deletes the pod if so.
func (c *GrpcRegistryReconciler) podFailed(pod *corev1.Pod) (bool, error) {
if pod.Status.Phase == corev1.PodFailed || pod.Status.Phase == corev1.PodUnknown {
logrus.WithField("UpdatePod", pod.GetName()).Infof("catalog polling result: update pod %s failed to start", pod.GetName())
err := c.removePods([]*corev1.Pod{pod}, pod.GetNamespace())
if err != nil {
return true, errors.Wrapf(err, "error deleting failed catalog polling pod: %s", pod.GetName())
}
return true, nil
}
return false, nil
}
|
package test_test
import (
. "k8s.io/kubectl/pkg/framework/test"
"fmt"
"net"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("DefaultAddressManager", func() {
var defaultAddressManager *DefaultAddressManager
BeforeEach(func() {
defaultAddressManager = &DefaultAddressManager{}
})
Describe("Initialize", func() {
It("returns a free port and an address to bind to", func() {
port, host, err := defaultAddressManager.Initialize("localhost")
Expect(err).NotTo(HaveOccurred())
Expect(host).To(Equal("127.0.0.1"))
Expect(port).NotTo(Equal(0))
addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("%s:%d", host, port))
Expect(err).NotTo(HaveOccurred())
l, err := net.ListenTCP("tcp", addr)
defer func() {
Expect(l.Close()).To(Succeed())
}()
Expect(err).NotTo(HaveOccurred())
})
Context("when given an invalid hostname", func() {
It("propagates the error", func() {
_, _, err := defaultAddressManager.Initialize("this is not a hostname")
Expect(err).To(MatchError(ContainSubstring("no such host")))
})
})
Context("when given a hostname that we don't have permission to listen on", func() {
It("propagates the error", func() {
_, _, err := defaultAddressManager.Initialize("example.com")
Expect(err).To(MatchError(ContainSubstring("bind: can't assign requested address")))
})
})
Context("initialized multiple times", func() {
It("fails", func() {
_, _, err := defaultAddressManager.Initialize("localhost")
Expect(err).NotTo(HaveOccurred())
_, _, err = defaultAddressManager.Initialize("localhost")
Expect(err).To(MatchError(ContainSubstring("already initialized")))
})
})
})
Describe("Port", func() {
It("returns an error if Initialize has not been called yet", func() {
_, err := defaultAddressManager.Port()
Expect(err).To(MatchError(ContainSubstring("not initialized yet")))
})
It("returns the same port as previously allocated by Initialize", func() {
expectedPort, _, err := defaultAddressManager.Initialize("localhost")
Expect(err).NotTo(HaveOccurred())
actualPort, err := defaultAddressManager.Port()
Expect(err).NotTo(HaveOccurred())
Expect(actualPort).To(Equal(expectedPort))
})
})
Describe("Host", func() {
It("returns an error if Initialize has not been called yet", func() {
_, err := defaultAddressManager.Host()
Expect(err).To(MatchError(ContainSubstring("not initialized yet")))
})
It("returns the same port as previously allocated by Initialize", func() {
_, expectedHost, err := defaultAddressManager.Initialize("localhost")
Expect(err).NotTo(HaveOccurred())
actualHost, err := defaultAddressManager.Host()
Expect(err).NotTo(HaveOccurred())
Expect(actualHost).To(Equal(expectedHost))
})
})
})
Update test to work on multiple systems
Our integrationy unit test now works on expecting of multiple, slightly
different errors -- different systems give slightly different errors.
package test_test
import (
. "k8s.io/kubectl/pkg/framework/test"
"fmt"
"net"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("DefaultAddressManager", func() {
var defaultAddressManager *DefaultAddressManager
BeforeEach(func() {
defaultAddressManager = &DefaultAddressManager{}
})
Describe("Initialize", func() {
It("returns a free port and an address to bind to", func() {
port, host, err := defaultAddressManager.Initialize("localhost")
Expect(err).NotTo(HaveOccurred())
Expect(host).To(Equal("127.0.0.1"))
Expect(port).NotTo(Equal(0))
addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("%s:%d", host, port))
Expect(err).NotTo(HaveOccurred())
l, err := net.ListenTCP("tcp", addr)
defer func() {
Expect(l.Close()).To(Succeed())
}()
Expect(err).NotTo(HaveOccurred())
})
Context("when given an invalid hostname", func() {
It("propagates the error", func() {
_, _, err := defaultAddressManager.Initialize("this is not a hostname")
Expect(err).To(MatchError(ContainSubstring("no such host")))
})
})
Context("when given a hostname that we don't have permission to listen on", func() {
It("propagates the error", func() {
_, _, err := defaultAddressManager.Initialize("example.com")
Expect(err).To(SatisfyAny(
// Linux
MatchError(ContainSubstring("bind: cannot assign requested address")),
// Darwin
MatchError(ContainSubstring("bind: can't assign requested address")),
))
})
})
Context("initialized multiple times", func() {
It("fails", func() {
_, _, err := defaultAddressManager.Initialize("localhost")
Expect(err).NotTo(HaveOccurred())
_, _, err = defaultAddressManager.Initialize("localhost")
Expect(err).To(MatchError(ContainSubstring("already initialized")))
})
})
})
Describe("Port", func() {
It("returns an error if Initialize has not been called yet", func() {
_, err := defaultAddressManager.Port()
Expect(err).To(MatchError(ContainSubstring("not initialized yet")))
})
It("returns the same port as previously allocated by Initialize", func() {
expectedPort, _, err := defaultAddressManager.Initialize("localhost")
Expect(err).NotTo(HaveOccurred())
actualPort, err := defaultAddressManager.Port()
Expect(err).NotTo(HaveOccurred())
Expect(actualPort).To(Equal(expectedPort))
})
})
Describe("Host", func() {
It("returns an error if Initialize has not been called yet", func() {
_, err := defaultAddressManager.Host()
Expect(err).To(MatchError(ContainSubstring("not initialized yet")))
})
It("returns the same port as previously allocated by Initialize", func() {
_, expectedHost, err := defaultAddressManager.Initialize("localhost")
Expect(err).NotTo(HaveOccurred())
actualHost, err := defaultAddressManager.Host()
Expect(err).NotTo(HaveOccurred())
Expect(actualHost).To(Equal(expectedHost))
})
})
})
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"time"
"k8s.io/api/core/v1"
schedulerapi "k8s.io/api/scheduling/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
_ "github.com/stretchr/testify/assert"
)
var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList
var ns string
f := framework.NewDefaultFramework("sched-preemption")
lowPriority, mediumPriority, highPriority := int32(1), int32(100), int32(1000)
lowPriorityClassName := f.BaseName + "-low-priority"
mediumPriorityClassName := f.BaseName + "-medium-priority"
highPriorityClassName := f.BaseName + "-high-priority"
AfterEach(func() {
})
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
_, err = f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: mediumPriorityClassName}, Value: mediumPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
_, err = f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: lowPriorityClassName}, Value: lowPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
framework.WaitForAllNodesHealthy(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
err = framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
})
// This test verifies that when a higher priority pod is created and no node with
// enough resources is found, scheduler preempts a lower priority pod to schedule
// the high priority pod.
It("validates basic preemption works", func() {
var podRes v1.ResourceList
// Create one pod per node that uses a lot of the node's resources.
By("Create pods that use 60% of node resources.")
pods := make([]*v1.Pod, len(nodeList.Items))
for i, node := range nodeList.Items {
cpuAllocatable, found := node.Status.Allocatable["cpu"]
Expect(found).To(Equal(true))
milliCPU := cpuAllocatable.MilliValue() * 40 / 100
memAllocatable, found := node.Status.Allocatable["memory"]
Expect(found).To(Equal(true))
memory := memAllocatable.Value() * 60 / 100
podRes = v1.ResourceList{}
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
// make the first pod low priority and the rest medium priority.
priorityName := mediumPriorityClassName
if i == 0 {
priorityName = lowPriorityClassName
}
pods[i] = createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
PriorityClassName: priorityName,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
framework.Logf("Created pod: %v", pods[i].Name)
}
By("Wait for pods to be scheduled.")
for _, pod := range pods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
By("Run a high priority pod that use 60% of a node resources.")
// Create a high priority pod and make sure it is scheduled.
runPausePod(f, pausePodConfig{
Name: "preemptor-pod",
PriorityClassName: highPriorityClassName,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
// Make sure that the lowest priority pod is deleted.
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
podDeleted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
Expect(podDeleted).To(BeTrue())
// Other pods (mid priority ones) should be present.
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(livePod.DeletionTimestamp).To(BeNil())
}
})
// This test verifies that when a critical pod is created and no node with
// enough resources is found, scheduler preempts a lower priority pod to schedule
// this critical pod.
It("validates lower priority pod preemption by critical pod", func() {
var podRes v1.ResourceList
// Create one pod per node that uses a lot of the node's resources.
By("Create pods that use 60% of node resources.")
pods := make([]*v1.Pod, len(nodeList.Items))
for i, node := range nodeList.Items {
cpuAllocatable, found := node.Status.Allocatable["cpu"]
Expect(found).To(Equal(true))
milliCPU := cpuAllocatable.MilliValue() * 40 / 100
memAllocatable, found := node.Status.Allocatable["memory"]
Expect(found).To(Equal(true))
memory := memAllocatable.Value() * 60 / 100
podRes = v1.ResourceList{}
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
// make the first pod low priority and the rest medium priority.
priorityName := mediumPriorityClassName
if i == 0 {
priorityName = lowPriorityClassName
}
pods[i] = createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
PriorityClassName: priorityName,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
framework.Logf("Created pod: %v", pods[i].Name)
}
By("Wait for pods to be scheduled.")
for _, pod := range pods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
By("Run a critical pod that use 60% of a node resources.")
// Create a critical pod and make sure it is scheduled.
runPausePod(f, pausePodConfig{
Name: "critical-pod",
Namespace: metav1.NamespaceSystem,
PriorityClassName: scheduling.SystemClusterCritical,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
// Make sure that the lowest priority pod is deleted.
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
defer func() {
// Clean-up the critical pod
err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete("critical-pod", metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}()
podDeleted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
Expect(podDeleted).To(BeTrue())
// Other pods (mid priority ones) should be present.
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(livePod.DeletionTimestamp).To(BeNil())
}
})
// This test verifies that when a high priority pod is pending and its
// scheduling violates a medium priority pod anti-affinity, the medium priority
// pod is preempted to allow the higher priority pod schedule.
// It also verifies that existing low priority pods are not preempted as their
// preemption wouldn't help.
It("validates pod anti-affinity works in preemption", func() {
var podRes v1.ResourceList
// Create a few pods that uses a small amount of resources.
By("Create pods that use 10% of node resources.")
numPods := 4
if len(nodeList.Items) < numPods {
numPods = len(nodeList.Items)
}
pods := make([]*v1.Pod, numPods)
for i := 0; i < numPods; i++ {
node := nodeList.Items[i]
cpuAllocatable, found := node.Status.Allocatable["cpu"]
Expect(found).To(BeTrue())
milliCPU := cpuAllocatable.MilliValue() * 10 / 100
memAllocatable, found := node.Status.Allocatable["memory"]
Expect(found).To(BeTrue())
memory := memAllocatable.Value() * 10 / 100
podRes = v1.ResourceList{}
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
// Apply node label to each node
framework.AddOrUpdateLabelOnNode(cs, node.Name, "node", node.Name)
framework.ExpectNodeHasLabel(cs, node.Name, "node", node.Name)
// make the first pod medium priority and the rest low priority.
priorityName := lowPriorityClassName
if i == 0 {
priorityName = mediumPriorityClassName
}
pods[i] = createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
PriorityClassName: priorityName,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
Affinity: &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "service",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"blah", "foo"},
},
},
},
TopologyKey: "node",
},
},
},
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "node",
Operator: v1.NodeSelectorOpIn,
Values: []string{node.Name},
},
},
},
},
},
},
},
})
framework.Logf("Created pod: %v", pods[i].Name)
}
defer func() { // Remove added labels
for i := 0; i < numPods; i++ {
framework.RemoveLabelOffNode(cs, nodeList.Items[i].Name, "node")
}
}()
By("Wait for pods to be scheduled.")
for _, pod := range pods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
By("Run a high priority pod with node affinity to the first node.")
// Create a high priority pod and make sure it is scheduled.
runPausePod(f, pausePodConfig{
Name: "preemptor-pod",
PriorityClassName: highPriorityClassName,
Labels: map[string]string{"service": "blah"},
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "node",
Operator: v1.NodeSelectorOpIn,
Values: []string{nodeList.Items[0].Name},
},
},
},
},
},
},
},
})
// Make sure that the medium priority pod on the first node is preempted.
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
podDeleted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
Expect(podDeleted).To(BeTrue())
// Other pods (low priority ones) should be present.
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(livePod.DeletionTimestamp).To(BeNil())
}
})
})
var _ = SIGDescribe("PodPriorityResolution [Serial]", func() {
var cs clientset.Interface
var ns string
f := framework.NewDefaultFramework("sched-pod-priority")
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
})
// This test verifies that system critical priorities are created automatically and resolved properly.
It("validates critical system priorities are created and resolved", func() {
// Create pods that use system critical priorities and
By("Create pods that use critical system priorities.")
systemPriorityClasses := []string{
scheduling.SystemNodeCritical, scheduling.SystemClusterCritical,
}
for i, spc := range systemPriorityClasses {
pod := createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, spc),
Namespace: metav1.NamespaceSystem,
PriorityClassName: spc,
})
defer func() {
// Clean-up the pod.
err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}()
Expect(pod.Spec.Priority).NotTo(BeNil())
framework.Logf("Created pod: %v", pod.Name)
}
})
})
UPSTREAM: 76663: Fix preemption race conditions on heavy utilized nodes in e2e tests
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
_ "github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
schedulerapi "k8s.io/api/scheduling/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/test/e2e/framework"
)
var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList
var ns string
f := framework.NewDefaultFramework("sched-preemption")
lowPriority, mediumPriority, highPriority := int32(1), int32(100), int32(1000)
lowPriorityClassName := f.BaseName + "-low-priority"
mediumPriorityClassName := f.BaseName + "-medium-priority"
highPriorityClassName := f.BaseName + "-high-priority"
AfterEach(func() {
})
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
_, err = f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: mediumPriorityClassName}, Value: mediumPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
_, err = f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: lowPriorityClassName}, Value: lowPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
framework.WaitForAllNodesHealthy(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
err = framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
})
// This test verifies that when a higher priority pod is created and no node with
// enough resources is found, scheduler preempts a lower priority pod to schedule
// the high priority pod.
It("validates basic preemption works", func() {
var podRes v1.ResourceList
// Create one pod per node that uses a lot of the node's resources.
By("Create pods that use 60% of node resources.")
pods := make([]*v1.Pod, len(nodeList.Items))
allPods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
for i, node := range nodeList.Items {
currentCpuUsage, currentMemUsage := getCurrentPodUsageOnTheNode(node.Name, allPods.Items, podRequestedResource)
framework.Logf("Current cpu and memory usage %v, %v", currentCpuUsage, currentMemUsage)
currentNode, err := cs.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
cpuAllocatable, found := currentNode.Status.Allocatable["cpu"]
Expect(found).To(Equal(true))
milliCPU := cpuAllocatable.MilliValue()
milliCPU = milliCPU * 40 / 100
memAllocatable, found := currentNode.Status.Allocatable["memory"]
Expect(found).To(Equal(true))
memory := memAllocatable.Value()
memory = memory * 60 / 100
podRes = v1.ResourceList{}
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
// make the first pod low priority and the rest medium priority.
priorityName := mediumPriorityClassName
if i == 0 {
priorityName = lowPriorityClassName
}
pods[i] = createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
PriorityClassName: priorityName,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
framework.Logf("Created pod: %v", pods[i].Name)
}
By("Wait for pods to be scheduled.")
for _, pod := range pods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
By("Run a high priority pod that use 60% of a node resources.")
// Create a high priority pod and make sure it is scheduled.
runPausePod(f, pausePodConfig{
Name: "preemptor-pod",
PriorityClassName: highPriorityClassName,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
// Make sure that the lowest priority pod is deleted.
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
podDeleted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
Expect(podDeleted).To(BeTrue())
// Other pods (mid priority ones) should be present.
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(livePod.DeletionTimestamp).To(BeNil())
}
})
// This test verifies that when a critical pod is created and no node with
// enough resources is found, scheduler preempts a lower priority pod to schedule
// this critical pod.
It("validates lower priority pod preemption by critical pod", func() {
var podRes v1.ResourceList
// Create one pod per node that uses a lot of the node's resources.
By("Create pods that use 60% of node resources.")
pods := make([]*v1.Pod, len(nodeList.Items))
allPods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
for i, node := range nodeList.Items {
currentCpuUsage, currentMemUsage := getCurrentPodUsageOnTheNode(node.Name, allPods.Items, podRequestedResource)
framework.Logf("Current cpu usage and memory usage is %v, %v", currentCpuUsage, currentMemUsage)
currentNode, err := cs.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
cpuAllocatable, found := currentNode.Status.Allocatable["cpu"]
Expect(found).To(Equal(true))
milliCPU := cpuAllocatable.MilliValue()
milliCPU = milliCPU * 40 / 100
memAllocatable, found := currentNode.Status.Allocatable["memory"]
Expect(found).To(Equal(true))
memory := memAllocatable.Value()
memory = memory * 60 / 100
podRes = v1.ResourceList{}
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
// make the first pod low priority and the rest medium priority.
priorityName := mediumPriorityClassName
if i == 0 {
priorityName = lowPriorityClassName
}
pods[i] = createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
PriorityClassName: priorityName,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
framework.Logf("Created pod: %v", pods[i].Name)
}
By("Wait for pods to be scheduled.")
for _, pod := range pods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
By("Run a critical pod that use 60% of a node resources.")
// Create a critical pod and make sure it is scheduled.
runPausePod(f, pausePodConfig{
Name: "critical-pod",
Namespace: metav1.NamespaceSystem,
PriorityClassName: scheduling.SystemClusterCritical,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
// Make sure that the lowest priority pod is deleted.
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
defer func() {
// Clean-up the critical pod
err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete("critical-pod", metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}()
podDeleted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
Expect(podDeleted).To(BeTrue())
// Other pods (mid priority ones) should be present.
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(livePod.DeletionTimestamp).To(BeNil())
}
})
// This test verifies that when a high priority pod is pending and its
// scheduling violates a medium priority pod anti-affinity, the medium priority
// pod is preempted to allow the higher priority pod schedule.
// It also verifies that existing low priority pods are not preempted as their
// preemption wouldn't help.
It("validates pod anti-affinity works in preemption", func() {
var podRes v1.ResourceList
// Create a few pods that uses a small amount of resources.
By("Create pods that use 10% of node resources.")
numPods := 4
if len(nodeList.Items) < numPods {
numPods = len(nodeList.Items)
}
pods := make([]*v1.Pod, numPods)
allPods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
for i := 0; i < numPods; i++ {
node := nodeList.Items[i]
currentCpuUsage, currentMemUsage := getCurrentPodUsageOnTheNode(node.Name, allPods.Items, podRequestedResource)
framework.Logf("Current cpu usage and memory usage is %v, %v", currentCpuUsage, currentMemUsage)
currentNode, err := cs.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
cpuAllocatable, found := currentNode.Status.Allocatable["cpu"]
Expect(found).To(Equal(true))
milliCPU := cpuAllocatable.MilliValue()
milliCPU = milliCPU * 10 / 100
memAllocatable, found := currentNode.Status.Allocatable["memory"]
Expect(found).To(BeTrue())
memory := memAllocatable.Value()
memory = memory * 10 / 100
podRes = v1.ResourceList{}
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
// Apply node label to each node
framework.AddOrUpdateLabelOnNode(cs, node.Name, "node", node.Name)
framework.ExpectNodeHasLabel(cs, node.Name, "node", node.Name)
// make the first pod medium priority and the rest low priority.
priorityName := lowPriorityClassName
if i == 0 {
priorityName = mediumPriorityClassName
}
pods[i] = createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
PriorityClassName: priorityName,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
Affinity: &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "service",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"blah", "foo"},
},
},
},
TopologyKey: "node",
},
},
},
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "node",
Operator: v1.NodeSelectorOpIn,
Values: []string{node.Name},
},
},
},
},
},
},
},
})
framework.Logf("Created pod: %v", pods[i].Name)
}
defer func() { // Remove added labels
for i := 0; i < numPods; i++ {
framework.RemoveLabelOffNode(cs, nodeList.Items[i].Name, "node")
}
}()
By("Wait for pods to be scheduled.")
for _, pod := range pods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
By("Run a high priority pod with node affinity to the first node.")
// Create a high priority pod and make sure it is scheduled.
runPausePod(f, pausePodConfig{
Name: "preemptor-pod",
PriorityClassName: highPriorityClassName,
Labels: map[string]string{"service": "blah"},
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "node",
Operator: v1.NodeSelectorOpIn,
Values: []string{nodeList.Items[0].Name},
},
},
},
},
},
},
},
})
// Make sure that the medium priority pod on the first node is preempted.
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
podDeleted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
Expect(podDeleted).To(BeTrue())
// Other pods (low priority ones) should be present.
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(livePod.DeletionTimestamp).To(BeNil())
}
})
})
var _ = SIGDescribe("PodPriorityResolution [Serial]", func() {
var cs clientset.Interface
var ns string
f := framework.NewDefaultFramework("sched-pod-priority")
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
})
// This test verifies that system critical priorities are created automatically and resolved properly.
It("validates critical system priorities are created and resolved", func() {
// Create pods that use system critical priorities and
By("Create pods that use critical system priorities.")
systemPriorityClasses := []string{
scheduling.SystemNodeCritical, scheduling.SystemClusterCritical,
}
for i, spc := range systemPriorityClasses {
pod := createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, spc),
Namespace: metav1.NamespaceSystem,
PriorityClassName: spc,
})
defer func() {
// Clean-up the pod.
err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}()
Expect(pod.Spec.Priority).NotTo(BeNil())
framework.Logf("Created pod: %v", pod.Name)
}
})
})
func getCurrentPodUsageOnTheNode(nodeName string, pods []v1.Pod, resource *v1.ResourceRequirements) (int64, int64) {
totalRequestedCpuResource := resource.Requests.Cpu().MilliValue()
totalRequestedMemResource := resource.Requests.Memory().Value()
for _, pod := range pods {
if pod.Spec.NodeName == nodeName {
if v1qos.GetPodQOS(&pod) == v1.PodQOSBestEffort {
continue
}
}
result := getNonZeroRequests(&pod)
totalRequestedCpuResource += result.MilliCPU
totalRequestedMemResource += result.Memory
}
return totalRequestedCpuResource, totalRequestedMemResource
}
|
// Package client provides the primitives for a command-line client for music_player
package client
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
)
// Client struct holds the host on which music_player is running
type Client struct {
Host string
}
// getAlive checks if music_player is running
func (client *Client) getAlive() string {
response, err := http.Get(client.Host)
if err != nil {
fmt.Println("The service is not alive")
return ""
}
defer response.Body.Close()
body, err := ioutil.ReadAll(response.Body)
text := string(body[:])
fmt.Println(text)
return text
}
// PerformAction uses the entered action and name to construct HTTP request and send it to music_player
func (client *Client) PerformAction(action string, name string) string {
path := name
if action != "save" && client.isLocalhostCall() {
var err error = nil
path, err = filepath.Abs(name)
if err != nil {
//ignore error, try with name
path = name
}
_, err = os.Stat(path)
if os.IsNotExist(err) {
// this can be a saved playlist, so try with name
path = name
}
}
response, err := performCall(determineHttpMethod(action), client.formUrl(action, path))
return getDisplayMessage(response, err)
}
// isLocalhostCall checks if music_player's host is localhost
func (client *Client) isLocalhostCall() bool {
return strings.HasPrefix(client.Host, "http://localhost") ||
strings.HasPrefix(client.Host, "https://localhost") ||
strings.HasPrefix(client.Host, "http://127.") ||
strings.HasPrefix(client.Host, "https://127.")
}
// determineHttpMethod determines which method (GET, POST or PUT) is going to be used for the
// HTTP request
func determineHttpMethod(action string) (method string) {
switch action {
case
"songinfo",
"queueinfo",
"playlists":
method = "GET"
case "next",
"previous",
"pause",
"resume",
"add":
method = "POST"
case "play",
"save",
"stop":
method = "PUT"
}
return method
}
// formUrl uses the entered action and name to construct the URL that is going to call music_player
func (client *Client) formUrl(action string, name string) (requestUrl string) {
switch action {
case
"songinfo",
"queueinfo",
"playlists",
"next",
"previous",
"pause",
"resume",
"stop":
requestUrl = client.Host + action
case "add",
"play",
"save":
requestUrl = client.Host + action + "/" + escape(name)
}
return requestUrl
}
// ResponseContainer struct is used to hold the unmarshalled json response of music_player
// Contains code (0 for succes, 1 for failure), message and a list of file names
type ResponseContainer struct {
Code int
Message string
Data []string
}
// performCall send HTTP request to music_player, gets json the response and unmarshals it
func performCall(method string, url string) (ResponseContainer, error) {
var res *http.Response
var err error
container := ResponseContainer{}
if method == "GET" {
res, err = http.Get(url)
} else if method == "POST" {
res, err = http.Post(url, "text/plain", nil)
} else if method == "PUT" {
client := &http.Client{}
request, err1 := http.NewRequest("PUT", url, nil)
if err1 != nil {
return container, err1
}
res, err = client.Do(request)
}
if err != nil {
return container, err
}
var body []byte
body, err = ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
return container, err
}
err = json.Unmarshal(body, &container)
if err != nil {
return container, err
}
return container, nil
}
// getDisplayMessage creates a string message based on the response of music_player
func getDisplayMessage(response ResponseContainer, err error) string {
if err != nil {
return err.Error()
}
if response.Code != 0 {
return response.Message
} else {
buffer := bytes.NewBufferString(response.Message)
data_list := response.Data
if data_list != nil && len(data_list) > 0 {
for _, element := range data_list {
buffer.WriteString("\n")
buffer.WriteString(element)
}
}
return buffer.String()
}
}
// escape does the escaping of query string
func escape(urlPath string) string {
return strings.Replace(url.QueryEscape(urlPath), "+", "%20", -1)
}
better documentation
// Package client provides the primitives for a command-line client for music_player
package client
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
)
// Client struct holds the host on which music_player is running
type Client struct {
Host string
}
// getAlive checks if music_player is running
func (client *Client) getAlive() string {
response, err := http.Get(client.Host)
if err != nil {
fmt.Println("The service is not alive")
return ""
}
defer response.Body.Close()
body, err := ioutil.ReadAll(response.Body)
text := string(body[:])
fmt.Println(text)
return text
}
// PerformAction uses the entered action and name to construct HTTP request and send it to music_player
// Constructs a message from the json response and displays it
func (client *Client) PerformAction(action string, name string) string {
path := name
if action != "save" && client.isLocalhostCall() {
var err error = nil
path, err = filepath.Abs(name)
if err != nil {
//ignore error, try with name
path = name
}
_, err = os.Stat(path)
if os.IsNotExist(err) {
// this can be a saved playlist, so try with name
path = name
}
}
response, err := performCall(determineHttpMethod(action), client.formUrl(action, path))
return getDisplayMessage(response, err)
}
// isLocalhostCall checks if music_player's host is localhost
func (client *Client) isLocalhostCall() bool {
return strings.HasPrefix(client.Host, "http://localhost") ||
strings.HasPrefix(client.Host, "https://localhost") ||
strings.HasPrefix(client.Host, "http://127.") ||
strings.HasPrefix(client.Host, "https://127.")
}
// determineHttpMethod determines which method (GET, POST or PUT) is going to be used for the
// HTTP request
func determineHttpMethod(action string) (method string) {
switch action {
case
"songinfo",
"queueinfo",
"playlists":
method = "GET"
case "next",
"previous",
"pause",
"resume",
"add":
method = "POST"
case "play",
"save",
"stop":
method = "PUT"
}
return method
}
// formUrl uses the entered action and name to construct the URL that is going to call music_player
func (client *Client) formUrl(action string, name string) (requestUrl string) {
switch action {
case
"songinfo",
"queueinfo",
"playlists",
"next",
"previous",
"pause",
"resume",
"stop":
requestUrl = client.Host + action
case "add",
"play",
"save":
requestUrl = client.Host + action + "/" + escape(name)
}
return requestUrl
}
// ResponseContainer struct is used to hold the unmarshalled json response of music_player
// Contains code (0 for succes, 1 for failure), message and a list of file names
type ResponseContainer struct {
Code int
Message string
Data []string
}
// performCall send HTTP request to music_player, gets json the response and unmarshals it
func performCall(method string, url string) (ResponseContainer, error) {
var res *http.Response
var err error
container := ResponseContainer{}
if method == "GET" {
res, err = http.Get(url)
} else if method == "POST" {
res, err = http.Post(url, "text/plain", nil)
} else if method == "PUT" {
client := &http.Client{}
request, err1 := http.NewRequest("PUT", url, nil)
if err1 != nil {
return container, err1
}
res, err = client.Do(request)
}
if err != nil {
return container, err
}
var body []byte
body, err = ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
return container, err
}
err = json.Unmarshal(body, &container)
if err != nil {
return container, err
}
return container, nil
}
// getDisplayMessage creates a string message based on the response of music_player
func getDisplayMessage(response ResponseContainer, err error) string {
if err != nil {
return err.Error()
}
if response.Code != 0 {
return response.Message
} else {
buffer := bytes.NewBufferString(response.Message)
data_list := response.Data
if data_list != nil && len(data_list) > 0 {
for _, element := range data_list {
buffer.WriteString("\n")
buffer.WriteString(element)
}
}
return buffer.String()
}
}
// escape does the escaping of query string
func escape(urlPath string) string {
return strings.Replace(url.QueryEscape(urlPath), "+", "%20", -1)
}
|
package topgun_test
import (
"fmt"
"time"
gclient "code.cloudfoundry.org/garden/client"
gconn "code.cloudfoundry.org/garden/client/connection"
_ "github.com/lib/pq"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
)
var _ = Describe(":life [#129726125] Hijacked containers", func() {
var (
gClient gclient.Client
containerHandle string
)
BeforeEach(func() {
Deploy("deployments/single-vm.yml")
gClient = gclient.New(gconn.New("tcp", fmt.Sprintf("%s:7777", atcIP)))
})
getContainer := func(condition, value string) func() hijackedContainerResult {
return func() (h hijackedContainerResult) {
containers := flyTable("containers")
for _, c := range containers {
if c[condition] == value {
containerHandle = c["handle"]
h.flyContainerExists = true
break
}
}
_, err := gClient.Lookup(containerHandle)
if err == nil {
h.gardenContainerExists = true
}
return
}
}
It("does not delete hijacked build containers from the database, and sets a 5 minute TTL on the container in garden", func() {
By("setting the pipeline that has a build")
fly("set-pipeline", "-n", "-c", "pipelines/task-waiting.yml", "-p", "hijacked-containers-test")
By("triggering the build")
fly("unpause-pipeline", "-p", "hijacked-containers-test")
buildSession := spawnFly("trigger-job", "-w", "-j", "hijacked-containers-test/simple-job")
Eventually(buildSession).Should(gbytes.Say("waiting for /tmp/stop-waiting"))
By("hijacking into the build container")
hijackSession := spawnFly(
"hijack",
"-j", "hijacked-containers-test/simple-job",
"-b", "1",
"-s", "simple-task",
"sleep", "120",
)
By("finishing the build")
<-spawnFly(
"hijack",
"-j", "hijacked-containers-test/simple-job",
"-b", "1",
"-s", "simple-task",
"touch", "/tmp/stop-waiting",
).Exited
<-buildSession.Exited
By("triggering a new build")
buildSession = spawnFly("trigger-job", "-w", "-j", "hijacked-containers-test/simple-job")
Eventually(buildSession).Should(gbytes.Say("waiting for /tmp/stop-waiting"))
<-spawnFly(
"hijack",
"-j", "hijacked-containers-test/simple-job",
"-b", "2",
"-s", "simple-task",
"touch", "/tmp/stop-waiting",
).Exited
<-buildSession.Exited
By("verifying the hijacked container exists via fly and Garden")
Consistently(getContainer("build #", "1"), 2*time.Minute, 30*time.Second).Should(Equal(hijackedContainerResult{true, true}))
By("unhijacking and seeing the container removed via fly/Garden after 5 minutes")
hijackSession.Interrupt()
<-hijackSession.Exited
Eventually(getContainer("build #", "1"), 10*time.Minute, 30*time.Second).Should(Equal(hijackedContainerResult{false, false}))
})
It("does not delete hijacked one-off build containers from the database, and sets a 5 minute TTL on the container in garden", func() {
By("triggering a one-off build")
buildSession := spawnFly("execute", "-c", "tasks/wait.yml")
Eventually(buildSession).Should(gbytes.Say("waiting for /tmp/stop-waiting"))
By("hijacking into the build container")
hijackSession := spawnFly(
"hijack",
"-b", "1",
"--",
"while true; do sleep 1; done",
)
By("waiting for build to finish")
<-spawnFly(
"hijack",
"-b", "1",
"touch", "/tmp/stop-waiting",
).Exited
<-buildSession.Exited
By("verifying the hijacked container exists via fly and Garden")
Consistently(getContainer("build #", "1"), 2*time.Minute, 30*time.Second).Should(Equal(hijackedContainerResult{true, true}))
By("unhijacking and seeing the container removed via fly/Garden after 5 minutes")
hijackSession.Interrupt()
<-hijackSession.Exited
Eventually(getContainer("build #", "1"), 10*time.Minute, 30*time.Second).Should(Equal(hijackedContainerResult{false, false}))
})
It("does not delete hijacked resource containers from the database, and sets a 5 minute TTL on the container in garden", func() {
By("setting the pipeline that has a build")
fly("set-pipeline", "-n", "-c", "pipelines/get-task.yml", "-p", "hijacked-resource-test")
fly("unpause-pipeline", "-p", "hijacked-resource-test")
By("checking resource")
fly("check-resource", "-r", "hijacked-resource-test/tick-tock")
By("hijacking into the resource container")
hijackSession := spawnFly(
"hijack",
"-c", "hijacked-resource-test/tick-tock",
"sleep", "120",
)
By("reconfiguring pipeline without resource")
fly("set-pipeline", "-n", "-c", "pipelines/task-waiting.yml", "-p", "hijacked-resource-test")
By("verifying the hijacked container exists via fly and Garden")
Consistently(getContainer("type", "check"), 2*time.Minute, 30*time.Second).Should(Equal(hijackedContainerResult{true, true}))
By("unhijacking and seeing the container removed via fly/Garden after 5 minutes")
hijackSession.Interrupt()
<-hijackSession.Exited
Eventually(getContainer("type", "check"), 40*time.Minute, 30*time.Second).Should(Equal(hijackedContainerResult{false, false}))
})
})
type hijackedContainerResult struct {
flyContainerExists bool
gardenContainerExists bool
}
Revert "unpend hijack check container test"
don't want this after all; later story will re-add the feature properly
This reverts commit d0b6125612e67a0802fcd21dc42595f6ea5a0b7e.
package topgun_test
import (
"fmt"
"time"
gclient "code.cloudfoundry.org/garden/client"
gconn "code.cloudfoundry.org/garden/client/connection"
_ "github.com/lib/pq"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
)
var _ = Describe(":life [#129726125] Hijacked containers", func() {
var (
gClient gclient.Client
containerHandle string
)
BeforeEach(func() {
Deploy("deployments/single-vm.yml")
gClient = gclient.New(gconn.New("tcp", fmt.Sprintf("%s:7777", atcIP)))
})
getContainer := func(condition, value string) func() hijackedContainerResult {
return func() (h hijackedContainerResult) {
containers := flyTable("containers")
for _, c := range containers {
if c[condition] == value {
containerHandle = c["handle"]
h.flyContainerExists = true
break
}
}
_, err := gClient.Lookup(containerHandle)
if err == nil {
h.gardenContainerExists = true
}
return
}
}
It("does not delete hijacked build containers from the database, and sets a 5 minute TTL on the container in garden", func() {
By("setting the pipeline that has a build")
fly("set-pipeline", "-n", "-c", "pipelines/task-waiting.yml", "-p", "hijacked-containers-test")
By("triggering the build")
fly("unpause-pipeline", "-p", "hijacked-containers-test")
buildSession := spawnFly("trigger-job", "-w", "-j", "hijacked-containers-test/simple-job")
Eventually(buildSession).Should(gbytes.Say("waiting for /tmp/stop-waiting"))
By("hijacking into the build container")
hijackSession := spawnFly(
"hijack",
"-j", "hijacked-containers-test/simple-job",
"-b", "1",
"-s", "simple-task",
"sleep", "120",
)
By("finishing the build")
<-spawnFly(
"hijack",
"-j", "hijacked-containers-test/simple-job",
"-b", "1",
"-s", "simple-task",
"touch", "/tmp/stop-waiting",
).Exited
<-buildSession.Exited
By("triggering a new build")
buildSession = spawnFly("trigger-job", "-w", "-j", "hijacked-containers-test/simple-job")
Eventually(buildSession).Should(gbytes.Say("waiting for /tmp/stop-waiting"))
<-spawnFly(
"hijack",
"-j", "hijacked-containers-test/simple-job",
"-b", "2",
"-s", "simple-task",
"touch", "/tmp/stop-waiting",
).Exited
<-buildSession.Exited
By("verifying the hijacked container exists via fly and Garden")
Consistently(getContainer("build #", "1"), 2*time.Minute, 30*time.Second).Should(Equal(hijackedContainerResult{true, true}))
By("unhijacking and seeing the container removed via fly/Garden after 5 minutes")
hijackSession.Interrupt()
<-hijackSession.Exited
Eventually(getContainer("build #", "1"), 10*time.Minute, 30*time.Second).Should(Equal(hijackedContainerResult{false, false}))
})
It("does not delete hijacked one-off build containers from the database, and sets a 5 minute TTL on the container in garden", func() {
By("triggering a one-off build")
buildSession := spawnFly("execute", "-c", "tasks/wait.yml")
Eventually(buildSession).Should(gbytes.Say("waiting for /tmp/stop-waiting"))
By("hijacking into the build container")
hijackSession := spawnFly(
"hijack",
"-b", "1",
"--",
"while true; do sleep 1; done",
)
By("waiting for build to finish")
<-spawnFly(
"hijack",
"-b", "1",
"touch", "/tmp/stop-waiting",
).Exited
<-buildSession.Exited
By("verifying the hijacked container exists via fly and Garden")
Consistently(getContainer("build #", "1"), 2*time.Minute, 30*time.Second).Should(Equal(hijackedContainerResult{true, true}))
By("unhijacking and seeing the container removed via fly/Garden after 5 minutes")
hijackSession.Interrupt()
<-hijackSession.Exited
Eventually(getContainer("build #", "1"), 10*time.Minute, 30*time.Second).Should(Equal(hijackedContainerResult{false, false}))
})
It("does not delete hijacked resource containers from the database, and sets a 5 minute TTL on the container in garden", func() {
Skip("skipping until hijacking to check containers is fixed #139984521")
By("setting the pipeline that has a build")
fly("set-pipeline", "-n", "-c", "pipelines/get-task.yml", "-p", "hijacked-resource-test")
fly("unpause-pipeline", "-p", "hijacked-resource-test")
By("checking resource")
fly("check-resource", "-r", "hijacked-resource-test/tick-tock")
By("hijacking into the resource container")
hijackSession := spawnFly(
"hijack",
"-c", "hijacked-resource-test/tick-tock",
"sleep", "120",
)
By("reconfiguring pipeline without resource")
fly("set-pipeline", "-n", "-c", "pipelines/task-waiting.yml", "-p", "hijacked-resource-test")
By("verifying the hijacked container exists via fly and Garden")
Consistently(getContainer("type", "check"), 2*time.Minute, 30*time.Second).Should(Equal(hijackedContainerResult{true, true}))
By("unhijacking and seeing the container removed via fly/Garden after 5 minutes")
hijackSession.Interrupt()
<-hijackSession.Exited
Eventually(getContainer("type", "check"), 40*time.Minute, 30*time.Second).Should(Equal(hijackedContainerResult{false, false}))
})
})
type hijackedContainerResult struct {
flyContainerExists bool
gardenContainerExists bool
}
|
package master
import (
"crypto/rand"
"encoding/base64"
"fmt"
. "github.com/KIT-MAMID/mamid/model"
"github.com/KIT-MAMID/mamid/msp"
"github.com/Sirupsen/logrus"
"github.com/jinzhu/gorm"
"time"
)
var caLog = logrus.WithField("module", "cluster_allocator")
type ClusterAllocator struct {
BusWriteChannel *chan<- interface{}
}
type persistence uint
const (
Persistent persistence = 0
Volatile persistence = 1
)
type memberCountTuple map[persistence]uint
func (c *ClusterAllocator) Run(db *DB) {
ticker := time.NewTicker(11 * time.Second)
quit := make(chan struct{})
go func() {
for {
select {
case <-ticker.C:
caLog.Info("Periodic cluster allocator run")
tx := db.Begin()
compileErr := c.CompileMongodLayout(tx)
if compileErr != nil {
caLog.WithError(compileErr).Error("Periodic cluster allocator run failed")
continue
}
if commitErr := tx.Commit().Error; commitErr != nil {
caLog.WithError(commitErr).Error("Periodic cluster allocator commit failed")
continue
}
case <-quit:
ticker.Stop()
return
}
}
}()
}
const MamidManagementUsername = "mamid"
func (c *ClusterAllocator) InitializeGlobalSecrets(tx *gorm.DB) (err error) {
var keyfile MongodKeyfile
res := tx.First(&keyfile)
switch { // Assume there is at most one
case res.Error != nil && !res.RecordNotFound():
return res.Error
case res.Error == nil:
// Do nothing, assume already created
case res.Error != nil && res.RecordNotFound():
// Create keyfile.
// MongoDB documentation indicates contents of the keyfile must be base64 with max 1024 characters
content, err := randomBase64(1024)
if err != nil {
return fmt.Errorf("could not generate keyfile contents: %s", err)
}
keyfile = MongodKeyfile{
Content: content,
}
if err := tx.Create(&keyfile).Error; err != nil {
return fmt.Errorf("could not create keyfile contents: error inserting into database: %s", err)
}
}
var rootCredential MongodbCredential
res = tx.Table("mongodb_root_credentials").First(&rootCredential)
switch { // Assume there is at most one
case res.Error != nil && !res.RecordNotFound():
return res.Error
case res.Error == nil:
// Do nothing, assume already created
case res.Error != nil && res.RecordNotFound():
// Create root credential.
password, err := randomBase64(40)
if err != nil {
return fmt.Errorf("could not generate management user passphrase: %s", err)
}
rootCredential = MongodbCredential{
Username: MamidManagementUsername,
Password: password,
}
if err := tx.Table("mongodb_root_credentials").Create(&rootCredential).Error; err != nil {
return fmt.Errorf("could not create MongoDB root credential: %s", err)
}
}
return nil
}
func randomBase64(len int) (str string, err error) {
randBytes := make([]byte, len)
_, err = rand.Read(randBytes)
if err != nil {
return "", fmt.Errorf("error reading random bytes: %s", err)
}
return base64.StdEncoding.EncodeToString(randBytes)[:len], nil
}
func (c *ClusterAllocator) CompileMongodLayout(tx *gorm.DB) (err error) {
defer func() {
r := recover()
if r == nil {
return
}
switch r {
case r == nil:
return
case r == gorm.ErrInvalidTransaction:
err = r.(error)
default:
panic(r)
}
}()
// mark orphaned Mongod.DesiredState as force_destroyed
// orphaned = Mongods whose parent Replica Set has been destroyed
// NOTE: slaves do not cause orphaned Mongods as only slaves without Monogds can be deleted from the DB
caLog.Debug("updating desired state of orphaned Mongods")
markOrphanedMongodsDestroyedRes := tx.Exec(`
UPDATE mongod_states SET execution_state=?
WHERE id IN (
SELECT desired_state_id FROM mongods m WHERE replica_set_id IS NULL
)`, MongodExecutionStateForceDestroyed)
if markOrphanedMongodsDestroyedRes.Error != nil {
caLog.Errorf("error updating desired state of orphaned Mongods: %s", markOrphanedMongodsDestroyedRes.Error)
panic(markOrphanedMongodsDestroyedRes.Error)
} else {
caLog.Debugf("marked `%d` Mongod.DesiredState of orphaned Mongods as `force_destroyed`", markOrphanedMongodsDestroyedRes.RowsAffected)
}
// remove destroyed Mongods from the database
// destroyed: desired & observed state is `destroyed` OR no observed state
caLog.Debug("removing destroyed Mongods from the database")
removeDestroyedMongodsRes := tx.Exec(`
DELETE FROM mongods WHERE id IN ( -- we use cascadation to also delete the mongod states
-- all mongod id's whose desired and observed state are in ExecutionState destroyed
SELECT m.id
FROM mongods m
LEFT OUTER JOIN mongod_states desired_state ON m.desired_state_id = desired_state.id
LEFT OUTER JOIN mongod_states observed_state ON m.observed_state_id = observed_state.id
WHERE
(desired_state.execution_state = ? OR desired_state.execution_state = ?)
AND
(
observed_state.execution_state = ?
OR
(m.observed_state_id IS NULL --we know mongod does not exist on slave
AND
m.observation_error_id IS NULL)
)
)
`, MongodExecutionStateDestroyed, MongodExecutionStateForceDestroyed, MongodExecutionStateDestroyed)
if removeDestroyedMongodsRes.Error != nil {
caLog.Errorf("error removing destroyed Mongods from the database: %s", removeDestroyedMongodsRes.Error)
panic(removeDestroyedMongodsRes.Error)
} else {
caLog.Debugf("removed `%d` destroyed Mongods from the database", removeDestroyedMongodsRes.RowsAffected)
}
// list of replica sets with number of excess mongods
replicaSets, err := tx.Raw(`SELECT
r.id,
(SELECT COUNT(*) FROM replica_set_effective_members WHERE replica_set_id = r.id AND persistent_storage = ?)
- r.persistent_member_count AS deletable_persistent,
(SELECT COUNT(*) FROM replica_set_effective_members WHERE replica_set_id = r.id AND persistent_storage = ?)
- r.volatile_member_count AS deletable_volatile
FROM replica_sets r`, true, false,
).Rows()
if err != nil {
panic(err)
}
type excessMongodsRow struct {
replicaSetID uint
deletable_persistent, deletable_volatile int
}
excessMongodsRows := make([]excessMongodsRow, 0)
for replicaSets.Next() {
var row excessMongodsRow
err := replicaSets.Scan(&row.replicaSetID, &row.deletable_persistent, &row.deletable_volatile)
if err != nil {
panic(err)
}
excessMongodsRows = append(excessMongodsRows, row)
}
replicaSets.Close()
for _, r := range excessMongodsRows {
for _, p := range []persistence{Persistent, Volatile} {
var deletable_count int
if p.PersistentStorage() {
deletable_count = r.deletable_persistent
} else {
deletable_count = r.deletable_volatile
}
// Assert that deletable_count > 0
// SQLite will not LIMIT if deletable_count is negative!
if deletable_count <= 0 {
continue
}
caLog.Infof("removing excess mongods for replica set `%#v`: up to `%d` `%s` mongods", r.replicaSetID, deletable_count, p)
var deletableMongds []*Mongod
err := tx.Raw(`SELECT m.*
FROM replica_sets r
JOIN mongods m ON m.replica_set_id = r.id
JOIN slaves s ON s.id = m.parent_slave_id
JOIN slave_utilization su ON s.id = su.id
WHERE
r.id = ?
AND s.persistent_storage = ?
AND s.configured_state != ?
ORDER BY (CASE WHEN s.configured_state = ? THEN 1 ELSE 2 END) ASC,
(CASE WHEN s.observation_error_id IS NULL THEN 0 ELSE 1 END) DESC, -- prioritize slaves with observation error
su.utilization DESC
LIMIT ?`, r.replicaSetID, p.PersistentStorage(), SlaveStateMaintenance, SlaveStateDisabled, deletable_count,
).Find(&deletableMongds).Error
if err != nil {
panic(err)
}
caLog.Infof("setting %d mongods for replica set `%#v` to desired state `destroyed`", len(deletableMongds), r.replicaSetID)
for _, m := range deletableMongds {
caLog.Debugf("setting desired mongod_state of mongod `%#v` to `destroyed`", m)
res := tx.Exec("UPDATE mongod_states SET execution_state=? WHERE id=?", MongodExecutionStateDestroyed, m.DesiredStateID)
if res.Error != nil {
panic(res.Error)
}
if res.RowsAffected < 1 {
caLog.Errorf("setting desired mongod_state of mongod `%#v` to `destroyed` did not affect any row", m)
}
if res.RowsAffected > 1 {
caLog.Errorf("internal inconsistency: setting desired mongod_state of mongod `%#v` to `destroyed` affected more than one row", m)
}
}
}
}
//All unsatisfiable replica sets (independent of persistence)
unsatisfiable_replica_set_ids := []int64{}
// Now add new members
for _, p := range []persistence{Persistent, Volatile} {
var memberCountColumnName string
if p.PersistentStorage() {
memberCountColumnName = "persistent_member_count"
} else {
memberCountColumnName = "volatile_member_count"
}
//Unsatisfiable replica sets for the current persistence
unsatisfiable_replica_set_ids_by_persistance := []int64{0} // we always start at 1, this is a workaround for the statement generator producing (NULL) in case of an empty set otherwise
for {
replicaSet := struct {
ReplicaSet
ConfiguredMemberCount int
}{}
// HEAD of degraded replica sets PQ
res := tx.Raw(`SELECT r.*, COUNT(DISTINCT members.mongod_id) as "configured_member_count"
FROM replica_sets r
LEFT OUTER JOIN replica_set_configured_members members
ON r.id = members.replica_set_id
AND members.persistent_storage = ?
WHERE
r.`+memberCountColumnName+` != 0
AND
r.id NOT IN (?)
GROUP BY r.id
HAVING COUNT(DISTINCT members.mongod_id) < r.`+memberCountColumnName+`
ORDER BY COUNT(DISTINCT members.mongod_id) / r.`+memberCountColumnName+`
LIMIT 1`, p.PersistentStorage(), unsatisfiable_replica_set_ids_by_persistance,
).Scan(&replicaSet)
if res.RecordNotFound() {
caLog.Infof("finished repairing degraded replica sets in need of `%s` members", p)
break
} else if res.Error != nil {
panic(res.Error)
}
caLog.Debugf("looking for least busy `%s` slave suitable as mongod host for replica set `%s`", p, replicaSet.Name)
var leastBusySuitableSlave Slave
res = tx.Raw(`SELECT s.*
FROM slave_utilization s
WHERE
s.persistent_storage = ?
AND
s.free_mongods > 0
AND
s.configured_state = ?
AND (
s.risk_group_id NOT IN (
SELECT DISTINCT s.risk_group_id
FROM mongods m
JOIN slaves s ON m.parent_slave_id = s.id
WHERE m.replica_set_id = ?
)
-- 0 is the default risk group that is not a risk group,
-- i.e from which multiple slaves can be allocated for the same replica set
OR s.risk_group_id IS NULL
)
AND
s.id NOT IN ( -- Slaves already hosting a Mongod of the Replica Set
SELECT DISTINCT m.parent_slave_id
FROM mongods m
WHERE m.replica_set_id = ?
)
ORDER BY
(CASE WHEN s.observation_error_id IS NULL THEN 0 ELSE 1 END) ASC, -- prioritize slaves without observation error
s.utilization ASC
LIMIT 1`, p.PersistentStorage(), SlaveStateActive, replicaSet.ID, replicaSet.ID,
).Scan(&leastBusySuitableSlave)
if res.RecordNotFound() {
caLog.Warn("unsatisfiable replica set `%s`: not enough suitable `%s` slaves", replicaSet.Name, p)
unsatisfiable_replica_set_ids_by_persistance = append(unsatisfiable_replica_set_ids_by_persistance, replicaSet.ID)
unsatisfiable_replica_set_ids = append(unsatisfiable_replica_set_ids, replicaSet.ID)
continue
} else if res.Error != nil {
panic(res.Error)
}
caLog.Debugf("found slave `%s` as host for new mongod for replica set `%s`", leastBusySuitableSlave.Hostname, replicaSet.Name)
m, err := c.spawnMongodOnSlave(tx, &leastBusySuitableSlave, &replicaSet.ReplicaSet)
if err != nil {
caLog.Errorf("could not spawn mongod on slave `%s`: %s", leastBusySuitableSlave.Hostname, err.Error())
// the queries should have not returned a slave without free ports
panic(err)
} else {
caLog.Debugf("spawned mongod `%d` for replica set `%s` on slave `%s`", m.ID, replicaSet.Name, leastBusySuitableSlave.Hostname)
}
}
}
// Send replica set constraint status messages on bus for every replica set
if c.BusWriteChannel != nil {
// Get replica sets and the count of their actually configured members from the database
replicaSetsWithMemberCounts, err := tx.Raw(`SELECT
r.*,
(SELECT COUNT(*) FROM replica_set_configured_members WHERE replica_set_id = r.id AND persistent_storage = ?)
AS configured_persistent_members,
(SELECT COUNT(*) FROM replica_set_configured_members WHERE replica_set_id = r.id AND persistent_storage = ?)
AS configured_volatile_members
FROM replica_sets r
`, true, false).Rows()
if err != nil {
panic(err)
}
for replicaSetsWithMemberCounts.Next() {
var replicaSet ReplicaSet
tx.ScanRows(replicaSetsWithMemberCounts, &replicaSet)
configuredMemberCounts := struct {
ConfiguredPersistentMembers uint
ConfiguredVolatileMembers uint
}{}
tx.ScanRows(replicaSetsWithMemberCounts, &configuredMemberCounts)
unsatisfied := false
//Check if replica set is in unsatisfiable list
for _, id := range unsatisfiable_replica_set_ids {
unsatisfied = unsatisfied || (id == replicaSet.ID)
}
*c.BusWriteChannel <- DesiredReplicaSetConstraintStatus{
Unsatisfied: unsatisfied,
ReplicaSet: replicaSet,
ConfiguredPersistentCount: configuredMemberCounts.ConfiguredPersistentMembers,
ConfiguredVolatileCount: configuredMemberCounts.ConfiguredVolatileMembers,
}
}
}
if err == nil {
caLog.Info("Cluster allocator done successfully")
} else {
caLog.WithError(err).Error("Cluster allocator done with error")
}
return err
}
func (c *ClusterAllocator) replicaSets(tx *gorm.DB) (replicaSets []*ReplicaSet) {
if err := tx.Where(ReplicaSet{}).Find(&replicaSets).Error; err != nil {
panic(err)
}
for _, r := range replicaSets {
if err := tx.Model(r).Related(&r.Mongods, "Mongods").Error; err != nil {
panic(err)
}
for _, m := range r.Mongods {
res := tx.Model(m).Related(&m.ObservedState, "ObservedState")
if err := res.Error; !res.RecordNotFound() && err != nil {
panic(err)
}
res = tx.Model(m).Related(&m.DesiredState, "DesiredState")
if err := res.Error; !res.RecordNotFound() && err != nil {
panic(err)
}
//m.ParentSlave is a pointer and gorm does not initialize pointers on its own
var parentSlave Slave
res = tx.Model(m).Related(&parentSlave, "ParentSlave")
if err := res.Error; err != nil {
panic(err)
}
m.ParentSlave = &parentSlave
}
}
return replicaSets
}
func slavePersistence(s *Slave) persistence {
switch s.PersistentStorage {
case true:
return Persistent
default:
return Volatile
}
}
func (p persistence) PersistentStorage() bool {
switch p {
case Persistent:
return true
case Volatile:
return false
default:
panic("invalid value for persistence")
}
}
func (p persistence) String() string {
switch p {
case Persistent:
return "persistent"
case Volatile:
return "volatile"
default:
panic("invalid value for persistence")
}
}
func (c *ClusterAllocator) spawnMongodOnSlave(tx *gorm.DB, s *Slave, r *ReplicaSet) (*Mongod, error) {
var usedPorts []PortNumber
res := tx.Raw(`
SELECT m.port
FROM mongods m
WHERE m.parent_slave_id = ?
ORDER BY m.port ASC
`, s.ID).Pluck("port", &usedPorts)
if !res.RecordNotFound() && res.Error != nil {
panic(res.Error)
}
caLog.Debugf("slave: %#v: found used ports: %v", s, usedPorts)
unusedPort, found := findUnusedPort(usedPorts, s.MongodPortRangeBegin, s.MongodPortRangeEnd)
if !found {
return nil, fmt.Errorf("could not spawn Mongod: no free port on slave `%s`", s.Hostname)
}
m := &Mongod{
Port: unusedPort,
ReplSetName: r.Name,
ParentSlaveID: s.ID,
ReplicaSetID: NullIntValue(r.ID),
}
if err := tx.Create(&m).Error; err != nil {
panic(err)
}
desiredState := MongodState{
ParentMongodID: m.ID,
ShardingRole: r.ShardingRole,
ExecutionState: MongodExecutionStateRunning,
}
if err := tx.Create(&desiredState).Error; err != nil {
panic(err)
}
if err := tx.Model(&m).Update("DesiredStateID", desiredState.ID).Error; err != nil {
panic(err)
}
return m, nil
}
// find free port using merge-join-like loop. results are in [minPort, maxPort)
// assuming usedPorts is sorted ascending
func findUnusedPort(usedPorts []PortNumber, minPort, maxPort PortNumber) (unusedPort PortNumber, found bool) {
usedPortIndex := 0
// make usedPortIndex satisfy invariant
for ; usedPortIndex < len(usedPorts) && !(usedPorts[usedPortIndex] >= minPort); usedPortIndex++ {
}
for currentPort := minPort; currentPort < maxPort; currentPort++ {
if usedPortIndex >= len(usedPorts) { // we passed all used ports
return currentPort, true
}
if usedPorts[usedPortIndex] == currentPort { // current port is used
usedPortIndex++
} else if usedPorts[usedPortIndex] > currentPort { // next used port is after current port
return currentPort, true
}
// invariant: usedPorts[usedPortIndex] >= currentPort || usedPortIndex >= len(usedPorts)
// i.e. no more used ports to check for
}
return 0, false
}
func slaveMaxNumberOfMongods(s *Slave) PortNumber {
res := s.MongodPortRangeEnd - s.MongodPortRangeBegin
if res <= 0 {
panic("datastructure invariant violated: the range of Mongod ports for a slave must be sized greater than 0")
}
return res
}
func slaveUsage(s *Slave) (runningMongods, maxMongods uint) {
return uint(len(s.Mongods)), uint(slaveMaxNumberOfMongods(s))
}
func slaveBusyRate(s *Slave) float64 {
runningMongods, maxMongods := slaveUsage(s)
return float64(runningMongods) / float64(maxMongods)
}
const ( // between 0 and 1000
ReplicaSetMemberPriorityVolatile float64 = 500
ReplicaSetMemberPriorityPersistent float64 = 10
ReplicaSetMemberPriorityToBeRemoved float64 = 1
ReplicaSetMemberPriorityNone float64 = 0
)
// Return the list of msp.HostPort a model.ReplicaSet should have as members
// Calculates priorities and selects voting members
func DesiredMSPReplicaSetMembersForReplicaSetID(tx *gorm.DB, replicaSetID int64) (replicaSetMembers []msp.ReplicaSetMember, initiator Mongod, err error) {
rows, err := tx.Raw(`
SELECT
m.id,
s.hostname,
m.port,
CASE s.configured_state
WHEN ? THEN ? -- prioritize members to be removed lower
ELSE
CASE s.persistent_storage
WHEN false THEN ? -- prioritize volatile members higher
ELSE ?
END
END as priority
FROM mongods m
JOIN replica_sets r ON m.replica_set_id = r.id
JOIN mongod_states desired_state ON m.desired_state_id = desired_state.id
JOIN slaves s ON m.parent_slave_id = s.id
WHERE r.id = ?
AND desired_state.execution_state = ?
ORDER BY
s.configured_state DESC, -- ordered by slave configured_state so that mongods on running slaves become voting first
m.id ASC
`, SlaveStateDisabled, ReplicaSetMemberPriorityToBeRemoved, ReplicaSetMemberPriorityVolatile, ReplicaSetMemberPriorityPersistent, replicaSetID, MongodExecutionStateRunning,
).Rows()
defer rows.Close()
if err != nil {
return []msp.ReplicaSetMember{}, Mongod{}, fmt.Errorf("could not fetch ReplicaSetMembers for ReplicaSet.ID `%v`: %s", replicaSetID, err)
}
var initiatorId int64
for i := 0; rows.Next(); i++ {
member := msp.ReplicaSetMember{}
var mongodId int64
err = rows.Scan(&mongodId, &member.HostPort.Hostname, &member.HostPort.Port, &member.Priority)
if err != nil {
return
}
if i == 0 {
//Use first mongod as initiator as it can vote.
initiatorId = mongodId
}
// A replica set may have at most 7 voting members
// The query is ordered by slave configured_state so that mongods on running slaves become voting first
if i < 7 {
member.Votes = 1
} else {
member.Votes = 0
member.Priority = 0 //Mongodb says: priority must be 0 when non-voting
}
replicaSetMembers = append(replicaSetMembers, member)
}
rows.Close()
if res := tx.First(&initiator, initiatorId); res.Error != nil && !res.RecordNotFound() {
return []msp.ReplicaSetMember{}, Mongod{}, res.Error
}
return
}
FIX: cluster_allocator: priority generation
order by slave state corrected
package master
import (
"crypto/rand"
"encoding/base64"
"fmt"
. "github.com/KIT-MAMID/mamid/model"
"github.com/KIT-MAMID/mamid/msp"
"github.com/Sirupsen/logrus"
"github.com/jinzhu/gorm"
"time"
)
var caLog = logrus.WithField("module", "cluster_allocator")
type ClusterAllocator struct {
BusWriteChannel *chan<- interface{}
}
type persistence uint
const (
Persistent persistence = 0
Volatile persistence = 1
)
type memberCountTuple map[persistence]uint
func (c *ClusterAllocator) Run(db *DB) {
ticker := time.NewTicker(11 * time.Second)
quit := make(chan struct{})
go func() {
for {
select {
case <-ticker.C:
caLog.Info("Periodic cluster allocator run")
tx := db.Begin()
compileErr := c.CompileMongodLayout(tx)
if compileErr != nil {
caLog.WithError(compileErr).Error("Periodic cluster allocator run failed")
continue
}
if commitErr := tx.Commit().Error; commitErr != nil {
caLog.WithError(commitErr).Error("Periodic cluster allocator commit failed")
continue
}
case <-quit:
ticker.Stop()
return
}
}
}()
}
const MamidManagementUsername = "mamid"
func (c *ClusterAllocator) InitializeGlobalSecrets(tx *gorm.DB) (err error) {
var keyfile MongodKeyfile
res := tx.First(&keyfile)
switch { // Assume there is at most one
case res.Error != nil && !res.RecordNotFound():
return res.Error
case res.Error == nil:
// Do nothing, assume already created
case res.Error != nil && res.RecordNotFound():
// Create keyfile.
// MongoDB documentation indicates contents of the keyfile must be base64 with max 1024 characters
content, err := randomBase64(1024)
if err != nil {
return fmt.Errorf("could not generate keyfile contents: %s", err)
}
keyfile = MongodKeyfile{
Content: content,
}
if err := tx.Create(&keyfile).Error; err != nil {
return fmt.Errorf("could not create keyfile contents: error inserting into database: %s", err)
}
}
var rootCredential MongodbCredential
res = tx.Table("mongodb_root_credentials").First(&rootCredential)
switch { // Assume there is at most one
case res.Error != nil && !res.RecordNotFound():
return res.Error
case res.Error == nil:
// Do nothing, assume already created
case res.Error != nil && res.RecordNotFound():
// Create root credential.
password, err := randomBase64(40)
if err != nil {
return fmt.Errorf("could not generate management user passphrase: %s", err)
}
rootCredential = MongodbCredential{
Username: MamidManagementUsername,
Password: password,
}
if err := tx.Table("mongodb_root_credentials").Create(&rootCredential).Error; err != nil {
return fmt.Errorf("could not create MongoDB root credential: %s", err)
}
}
return nil
}
func randomBase64(len int) (str string, err error) {
randBytes := make([]byte, len)
_, err = rand.Read(randBytes)
if err != nil {
return "", fmt.Errorf("error reading random bytes: %s", err)
}
return base64.StdEncoding.EncodeToString(randBytes)[:len], nil
}
func (c *ClusterAllocator) CompileMongodLayout(tx *gorm.DB) (err error) {
defer func() {
r := recover()
if r == nil {
return
}
switch r {
case r == nil:
return
case r == gorm.ErrInvalidTransaction:
err = r.(error)
default:
panic(r)
}
}()
// mark orphaned Mongod.DesiredState as force_destroyed
// orphaned = Mongods whose parent Replica Set has been destroyed
// NOTE: slaves do not cause orphaned Mongods as only slaves without Monogds can be deleted from the DB
caLog.Debug("updating desired state of orphaned Mongods")
markOrphanedMongodsDestroyedRes := tx.Exec(`
UPDATE mongod_states SET execution_state=?
WHERE id IN (
SELECT desired_state_id FROM mongods m WHERE replica_set_id IS NULL
)`, MongodExecutionStateForceDestroyed)
if markOrphanedMongodsDestroyedRes.Error != nil {
caLog.Errorf("error updating desired state of orphaned Mongods: %s", markOrphanedMongodsDestroyedRes.Error)
panic(markOrphanedMongodsDestroyedRes.Error)
} else {
caLog.Debugf("marked `%d` Mongod.DesiredState of orphaned Mongods as `force_destroyed`", markOrphanedMongodsDestroyedRes.RowsAffected)
}
// remove destroyed Mongods from the database
// destroyed: desired & observed state is `destroyed` OR no observed state
caLog.Debug("removing destroyed Mongods from the database")
removeDestroyedMongodsRes := tx.Exec(`
DELETE FROM mongods WHERE id IN ( -- we use cascadation to also delete the mongod states
-- all mongod id's whose desired and observed state are in ExecutionState destroyed
SELECT m.id
FROM mongods m
LEFT OUTER JOIN mongod_states desired_state ON m.desired_state_id = desired_state.id
LEFT OUTER JOIN mongod_states observed_state ON m.observed_state_id = observed_state.id
WHERE
(desired_state.execution_state = ? OR desired_state.execution_state = ?)
AND
(
observed_state.execution_state = ?
OR
(m.observed_state_id IS NULL --we know mongod does not exist on slave
AND
m.observation_error_id IS NULL)
)
)
`, MongodExecutionStateDestroyed, MongodExecutionStateForceDestroyed, MongodExecutionStateDestroyed)
if removeDestroyedMongodsRes.Error != nil {
caLog.Errorf("error removing destroyed Mongods from the database: %s", removeDestroyedMongodsRes.Error)
panic(removeDestroyedMongodsRes.Error)
} else {
caLog.Debugf("removed `%d` destroyed Mongods from the database", removeDestroyedMongodsRes.RowsAffected)
}
// list of replica sets with number of excess mongods
replicaSets, err := tx.Raw(`SELECT
r.id,
(SELECT COUNT(*) FROM replica_set_effective_members WHERE replica_set_id = r.id AND persistent_storage = ?)
- r.persistent_member_count AS deletable_persistent,
(SELECT COUNT(*) FROM replica_set_effective_members WHERE replica_set_id = r.id AND persistent_storage = ?)
- r.volatile_member_count AS deletable_volatile
FROM replica_sets r`, true, false,
).Rows()
if err != nil {
panic(err)
}
type excessMongodsRow struct {
replicaSetID uint
deletable_persistent, deletable_volatile int
}
excessMongodsRows := make([]excessMongodsRow, 0)
for replicaSets.Next() {
var row excessMongodsRow
err := replicaSets.Scan(&row.replicaSetID, &row.deletable_persistent, &row.deletable_volatile)
if err != nil {
panic(err)
}
excessMongodsRows = append(excessMongodsRows, row)
}
replicaSets.Close()
for _, r := range excessMongodsRows {
for _, p := range []persistence{Persistent, Volatile} {
var deletable_count int
if p.PersistentStorage() {
deletable_count = r.deletable_persistent
} else {
deletable_count = r.deletable_volatile
}
// Assert that deletable_count > 0
// SQLite will not LIMIT if deletable_count is negative!
if deletable_count <= 0 {
continue
}
caLog.Infof("removing excess mongods for replica set `%#v`: up to `%d` `%s` mongods", r.replicaSetID, deletable_count, p)
var deletableMongds []*Mongod
err := tx.Raw(`SELECT m.*
FROM replica_sets r
JOIN mongods m ON m.replica_set_id = r.id
JOIN slaves s ON s.id = m.parent_slave_id
JOIN slave_utilization su ON s.id = su.id
WHERE
r.id = ?
AND s.persistent_storage = ?
AND s.configured_state != ?
ORDER BY (CASE WHEN s.configured_state = ? THEN 1 ELSE 2 END) ASC,
(CASE WHEN s.observation_error_id IS NULL THEN 0 ELSE 1 END) DESC, -- prioritize slaves with observation error
su.utilization DESC
LIMIT ?`, r.replicaSetID, p.PersistentStorage(), SlaveStateMaintenance, SlaveStateDisabled, deletable_count,
).Find(&deletableMongds).Error
if err != nil {
panic(err)
}
caLog.Infof("setting %d mongods for replica set `%#v` to desired state `destroyed`", len(deletableMongds), r.replicaSetID)
for _, m := range deletableMongds {
caLog.Debugf("setting desired mongod_state of mongod `%#v` to `destroyed`", m)
res := tx.Exec("UPDATE mongod_states SET execution_state=? WHERE id=?", MongodExecutionStateDestroyed, m.DesiredStateID)
if res.Error != nil {
panic(res.Error)
}
if res.RowsAffected < 1 {
caLog.Errorf("setting desired mongod_state of mongod `%#v` to `destroyed` did not affect any row", m)
}
if res.RowsAffected > 1 {
caLog.Errorf("internal inconsistency: setting desired mongod_state of mongod `%#v` to `destroyed` affected more than one row", m)
}
}
}
}
//All unsatisfiable replica sets (independent of persistence)
unsatisfiable_replica_set_ids := []int64{}
// Now add new members
for _, p := range []persistence{Persistent, Volatile} {
var memberCountColumnName string
if p.PersistentStorage() {
memberCountColumnName = "persistent_member_count"
} else {
memberCountColumnName = "volatile_member_count"
}
//Unsatisfiable replica sets for the current persistence
unsatisfiable_replica_set_ids_by_persistance := []int64{0} // we always start at 1, this is a workaround for the statement generator producing (NULL) in case of an empty set otherwise
for {
replicaSet := struct {
ReplicaSet
ConfiguredMemberCount int
}{}
// HEAD of degraded replica sets PQ
res := tx.Raw(`SELECT r.*, COUNT(DISTINCT members.mongod_id) as "configured_member_count"
FROM replica_sets r
LEFT OUTER JOIN replica_set_configured_members members
ON r.id = members.replica_set_id
AND members.persistent_storage = ?
WHERE
r.`+memberCountColumnName+` != 0
AND
r.id NOT IN (?)
GROUP BY r.id
HAVING COUNT(DISTINCT members.mongod_id) < r.`+memberCountColumnName+`
ORDER BY COUNT(DISTINCT members.mongod_id) / r.`+memberCountColumnName+`
LIMIT 1`, p.PersistentStorage(), unsatisfiable_replica_set_ids_by_persistance,
).Scan(&replicaSet)
if res.RecordNotFound() {
caLog.Infof("finished repairing degraded replica sets in need of `%s` members", p)
break
} else if res.Error != nil {
panic(res.Error)
}
caLog.Debugf("looking for least busy `%s` slave suitable as mongod host for replica set `%s`", p, replicaSet.Name)
var leastBusySuitableSlave Slave
res = tx.Raw(`SELECT s.*
FROM slave_utilization s
WHERE
s.persistent_storage = ?
AND
s.free_mongods > 0
AND
s.configured_state = ?
AND (
s.risk_group_id NOT IN (
SELECT DISTINCT s.risk_group_id
FROM mongods m
JOIN slaves s ON m.parent_slave_id = s.id
WHERE m.replica_set_id = ?
)
-- 0 is the default risk group that is not a risk group,
-- i.e from which multiple slaves can be allocated for the same replica set
OR s.risk_group_id IS NULL
)
AND
s.id NOT IN ( -- Slaves already hosting a Mongod of the Replica Set
SELECT DISTINCT m.parent_slave_id
FROM mongods m
WHERE m.replica_set_id = ?
)
ORDER BY
(CASE WHEN s.observation_error_id IS NULL THEN 0 ELSE 1 END) ASC, -- prioritize slaves without observation error
s.utilization ASC
LIMIT 1`, p.PersistentStorage(), SlaveStateActive, replicaSet.ID, replicaSet.ID,
).Scan(&leastBusySuitableSlave)
if res.RecordNotFound() {
caLog.Warn("unsatisfiable replica set `%s`: not enough suitable `%s` slaves", replicaSet.Name, p)
unsatisfiable_replica_set_ids_by_persistance = append(unsatisfiable_replica_set_ids_by_persistance, replicaSet.ID)
unsatisfiable_replica_set_ids = append(unsatisfiable_replica_set_ids, replicaSet.ID)
continue
} else if res.Error != nil {
panic(res.Error)
}
caLog.Debugf("found slave `%s` as host for new mongod for replica set `%s`", leastBusySuitableSlave.Hostname, replicaSet.Name)
m, err := c.spawnMongodOnSlave(tx, &leastBusySuitableSlave, &replicaSet.ReplicaSet)
if err != nil {
caLog.Errorf("could not spawn mongod on slave `%s`: %s", leastBusySuitableSlave.Hostname, err.Error())
// the queries should have not returned a slave without free ports
panic(err)
} else {
caLog.Debugf("spawned mongod `%d` for replica set `%s` on slave `%s`", m.ID, replicaSet.Name, leastBusySuitableSlave.Hostname)
}
}
}
// Send replica set constraint status messages on bus for every replica set
if c.BusWriteChannel != nil {
// Get replica sets and the count of their actually configured members from the database
replicaSetsWithMemberCounts, err := tx.Raw(`SELECT
r.*,
(SELECT COUNT(*) FROM replica_set_configured_members WHERE replica_set_id = r.id AND persistent_storage = ?)
AS configured_persistent_members,
(SELECT COUNT(*) FROM replica_set_configured_members WHERE replica_set_id = r.id AND persistent_storage = ?)
AS configured_volatile_members
FROM replica_sets r
`, true, false).Rows()
if err != nil {
panic(err)
}
for replicaSetsWithMemberCounts.Next() {
var replicaSet ReplicaSet
tx.ScanRows(replicaSetsWithMemberCounts, &replicaSet)
configuredMemberCounts := struct {
ConfiguredPersistentMembers uint
ConfiguredVolatileMembers uint
}{}
tx.ScanRows(replicaSetsWithMemberCounts, &configuredMemberCounts)
unsatisfied := false
//Check if replica set is in unsatisfiable list
for _, id := range unsatisfiable_replica_set_ids {
unsatisfied = unsatisfied || (id == replicaSet.ID)
}
*c.BusWriteChannel <- DesiredReplicaSetConstraintStatus{
Unsatisfied: unsatisfied,
ReplicaSet: replicaSet,
ConfiguredPersistentCount: configuredMemberCounts.ConfiguredPersistentMembers,
ConfiguredVolatileCount: configuredMemberCounts.ConfiguredVolatileMembers,
}
}
}
if err == nil {
caLog.Info("Cluster allocator done successfully")
} else {
caLog.WithError(err).Error("Cluster allocator done with error")
}
return err
}
func (c *ClusterAllocator) replicaSets(tx *gorm.DB) (replicaSets []*ReplicaSet) {
if err := tx.Where(ReplicaSet{}).Find(&replicaSets).Error; err != nil {
panic(err)
}
for _, r := range replicaSets {
if err := tx.Model(r).Related(&r.Mongods, "Mongods").Error; err != nil {
panic(err)
}
for _, m := range r.Mongods {
res := tx.Model(m).Related(&m.ObservedState, "ObservedState")
if err := res.Error; !res.RecordNotFound() && err != nil {
panic(err)
}
res = tx.Model(m).Related(&m.DesiredState, "DesiredState")
if err := res.Error; !res.RecordNotFound() && err != nil {
panic(err)
}
//m.ParentSlave is a pointer and gorm does not initialize pointers on its own
var parentSlave Slave
res = tx.Model(m).Related(&parentSlave, "ParentSlave")
if err := res.Error; err != nil {
panic(err)
}
m.ParentSlave = &parentSlave
}
}
return replicaSets
}
func slavePersistence(s *Slave) persistence {
switch s.PersistentStorage {
case true:
return Persistent
default:
return Volatile
}
}
func (p persistence) PersistentStorage() bool {
switch p {
case Persistent:
return true
case Volatile:
return false
default:
panic("invalid value for persistence")
}
}
func (p persistence) String() string {
switch p {
case Persistent:
return "persistent"
case Volatile:
return "volatile"
default:
panic("invalid value for persistence")
}
}
func (c *ClusterAllocator) spawnMongodOnSlave(tx *gorm.DB, s *Slave, r *ReplicaSet) (*Mongod, error) {
var usedPorts []PortNumber
res := tx.Raw(`
SELECT m.port
FROM mongods m
WHERE m.parent_slave_id = ?
ORDER BY m.port ASC
`, s.ID).Pluck("port", &usedPorts)
if !res.RecordNotFound() && res.Error != nil {
panic(res.Error)
}
caLog.Debugf("slave: %#v: found used ports: %v", s, usedPorts)
unusedPort, found := findUnusedPort(usedPorts, s.MongodPortRangeBegin, s.MongodPortRangeEnd)
if !found {
return nil, fmt.Errorf("could not spawn Mongod: no free port on slave `%s`", s.Hostname)
}
m := &Mongod{
Port: unusedPort,
ReplSetName: r.Name,
ParentSlaveID: s.ID,
ReplicaSetID: NullIntValue(r.ID),
}
if err := tx.Create(&m).Error; err != nil {
panic(err)
}
desiredState := MongodState{
ParentMongodID: m.ID,
ShardingRole: r.ShardingRole,
ExecutionState: MongodExecutionStateRunning,
}
if err := tx.Create(&desiredState).Error; err != nil {
panic(err)
}
if err := tx.Model(&m).Update("DesiredStateID", desiredState.ID).Error; err != nil {
panic(err)
}
return m, nil
}
// find free port using merge-join-like loop. results are in [minPort, maxPort)
// assuming usedPorts is sorted ascending
func findUnusedPort(usedPorts []PortNumber, minPort, maxPort PortNumber) (unusedPort PortNumber, found bool) {
usedPortIndex := 0
// make usedPortIndex satisfy invariant
for ; usedPortIndex < len(usedPorts) && !(usedPorts[usedPortIndex] >= minPort); usedPortIndex++ {
}
for currentPort := minPort; currentPort < maxPort; currentPort++ {
if usedPortIndex >= len(usedPorts) { // we passed all used ports
return currentPort, true
}
if usedPorts[usedPortIndex] == currentPort { // current port is used
usedPortIndex++
} else if usedPorts[usedPortIndex] > currentPort { // next used port is after current port
return currentPort, true
}
// invariant: usedPorts[usedPortIndex] >= currentPort || usedPortIndex >= len(usedPorts)
// i.e. no more used ports to check for
}
return 0, false
}
func slaveMaxNumberOfMongods(s *Slave) PortNumber {
res := s.MongodPortRangeEnd - s.MongodPortRangeBegin
if res <= 0 {
panic("datastructure invariant violated: the range of Mongod ports for a slave must be sized greater than 0")
}
return res
}
func slaveUsage(s *Slave) (runningMongods, maxMongods uint) {
return uint(len(s.Mongods)), uint(slaveMaxNumberOfMongods(s))
}
func slaveBusyRate(s *Slave) float64 {
runningMongods, maxMongods := slaveUsage(s)
return float64(runningMongods) / float64(maxMongods)
}
const ( // between 0 and 1000
ReplicaSetMemberPriorityVolatile float64 = 500
ReplicaSetMemberPriorityPersistent float64 = 10
ReplicaSetMemberPriorityToBeRemoved float64 = 1
ReplicaSetMemberPriorityNone float64 = 0
)
// Return the list of msp.HostPort a model.ReplicaSet should have as members
// Calculates priorities and selects voting members
func DesiredMSPReplicaSetMembersForReplicaSetID(tx *gorm.DB, replicaSetID int64) (replicaSetMembers []msp.ReplicaSetMember, initiator Mongod, err error) {
rows, err := tx.Raw(`
SELECT
m.id,
s.hostname,
m.port,
CASE s.configured_state
WHEN ? THEN ? -- prioritize members to be removed lower
ELSE
CASE s.persistent_storage
WHEN false THEN ? -- prioritize volatile members higher
ELSE ?
END
END as priority
FROM mongods m
JOIN replica_sets r ON m.replica_set_id = r.id
JOIN mongod_states desired_state ON m.desired_state_id = desired_state.id
JOIN slaves s ON m.parent_slave_id = s.id
WHERE r.id = ?
AND desired_state.execution_state = ?
ORDER BY
s.configured_state ASC, -- ordered by slave configured_state so that mongods on running slaves become voting first
m.id ASC
`, SlaveStateDisabled, ReplicaSetMemberPriorityToBeRemoved, ReplicaSetMemberPriorityVolatile, ReplicaSetMemberPriorityPersistent, replicaSetID, MongodExecutionStateRunning,
).Rows()
defer rows.Close()
if err != nil {
return []msp.ReplicaSetMember{}, Mongod{}, fmt.Errorf("could not fetch ReplicaSetMembers for ReplicaSet.ID `%v`: %s", replicaSetID, err)
}
var initiatorId int64
for i := 0; rows.Next(); i++ {
member := msp.ReplicaSetMember{}
var mongodId int64
err = rows.Scan(&mongodId, &member.HostPort.Hostname, &member.HostPort.Port, &member.Priority)
if err != nil {
return
}
if i == 0 {
//Use first mongod as initiator as it can vote.
initiatorId = mongodId
}
// A replica set may have at most 7 voting members
// The query is ordered by slave configured_state so that mongods on running slaves become voting first
if i < 7 {
member.Votes = 1
} else {
member.Votes = 0
member.Priority = 0 //Mongodb says: priority must be 0 when non-voting
}
replicaSetMembers = append(replicaSetMembers, member)
}
rows.Close()
if res := tx.First(&initiator, initiatorId); res.Error != nil && !res.RecordNotFound() {
return []msp.ReplicaSetMember{}, Mongod{}, res.Error
}
return
}
|
package contractcourt
contractcourt/chain_arbitrator_test: add TestChainArbitratorRepublishCommitment
TestChainArbitratorRepulishCommitment testst that the chain arbitrator
will republish closing transactions for channels marked
CommitementBroadcast in the database at startup.
package contractcourt
import (
"io/ioutil"
"net"
"os"
"testing"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnwallet"
)
// TestChainArbitratorRepulishCommitment testst that the chain arbitrator will
// republish closing transactions for channels marked CommitementBroadcast in
// the database at startup.
func TestChainArbitratorRepublishCommitment(t *testing.T) {
t.Parallel()
tempPath, err := ioutil.TempDir("", "testdb")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tempPath)
db, err := channeldb.Open(tempPath)
if err != nil {
t.Fatal(err)
}
defer db.Close()
// Create 10 test channels and sync them to the database.
const numChans = 10
var channels []*channeldb.OpenChannel
for i := 0; i < numChans; i++ {
lChannel, _, cleanup, err := lnwallet.CreateTestChannels()
if err != nil {
t.Fatal(err)
}
defer cleanup()
channel := lChannel.State()
// We manually set the db here to make sure all channels are
// synced to the same db.
channel.Db = db
addr := &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18556,
}
if err := channel.SyncPending(addr, 101); err != nil {
t.Fatal(err)
}
channels = append(channels, channel)
}
// Mark half of the channels as commitment broadcasted.
for i := 0; i < numChans/2; i++ {
closeTx := channels[i].FundingTxn.Copy()
closeTx.TxIn[0].PreviousOutPoint = channels[i].FundingOutpoint
err := channels[i].MarkCommitmentBroadcasted(closeTx)
if err != nil {
t.Fatal(err)
}
}
// We keep track of the transactions published by the ChainArbitrator
// at startup.
published := make(map[chainhash.Hash]struct{})
chainArbCfg := ChainArbitratorConfig{
ChainIO: &mockChainIO{},
Notifier: &mockNotifier{},
PublishTx: func(tx *wire.MsgTx) error {
published[tx.TxHash()] = struct{}{}
return nil
},
}
chainArb := NewChainArbitrator(
chainArbCfg, db,
)
if err := chainArb.Start(); err != nil {
t.Fatal(err)
}
defer func() {
if err := chainArb.Stop(); err != nil {
t.Fatal(err)
}
}()
// Half of the channels should have had their closing tx re-published.
if len(published) != numChans/2 {
t.Fatalf("expected %d re-published transactions, got %d",
numChans/2, len(published))
}
// And make sure the published transactions are correct, and unique.
for i := 0; i < numChans/2; i++ {
closeTx := channels[i].FundingTxn.Copy()
closeTx.TxIn[0].PreviousOutPoint = channels[i].FundingOutpoint
_, ok := published[closeTx.TxHash()]
if !ok {
t.Fatalf("closing tx not re-published")
}
delete(published, closeTx.TxHash())
}
if len(published) != 0 {
t.Fatalf("unexpected tx published")
}
}
|
package client
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/client/transport"
"github.com/docker/distribution/registry/storage/cache"
"github.com/docker/distribution/registry/storage/cache/memory"
)
// Registry provides an interface for calling Repositories, which returns a catalog of repositories.
type Registry interface {
Repositories(ctx context.Context, repos []string, last string) (n int, err error)
}
// NewRegistry creates a registry namespace which can be used to get a listing of repositories
func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) {
ub, err := v2.NewURLBuilderFromString(baseURL)
if err != nil {
return nil, err
}
client := &http.Client{
Transport: transport,
Timeout: 1 * time.Minute,
}
return ®istry{
client: client,
ub: ub,
context: ctx,
}, nil
}
type registry struct {
client *http.Client
ub *v2.URLBuilder
context context.Context
}
// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size
// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there
// are no more entries
func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) {
var numFilled int
var returnErr error
values := buildCatalogValues(len(entries), last)
u, err := r.ub.BuildCatalogURL(values)
if err != nil {
return 0, err
}
resp, err := r.client.Get(u)
if err != nil {
return 0, err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
var ctlg struct {
Repositories []string `json:"repositories"`
}
decoder := json.NewDecoder(resp.Body)
if err := decoder.Decode(&ctlg); err != nil {
return 0, err
}
for cnt := range ctlg.Repositories {
entries[cnt] = ctlg.Repositories[cnt]
}
numFilled = len(ctlg.Repositories)
link := resp.Header.Get("Link")
if link == "" {
returnErr = io.EOF
}
} else {
return 0, handleErrorResponse(resp)
}
return numFilled, returnErr
}
// NewRepository creates a new Repository for the given repository name and base URL.
func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
if _, err := reference.ParseNamed(name); err != nil {
return nil, err
}
ub, err := v2.NewURLBuilderFromString(baseURL)
if err != nil {
return nil, err
}
client := &http.Client{
Transport: transport,
// TODO(dmcgowan): create cookie jar
}
return &repository{
client: client,
ub: ub,
name: name,
context: ctx,
}, nil
}
type repository struct {
client *http.Client
ub *v2.URLBuilder
context context.Context
name string
}
func (r *repository) Name() string {
return r.name
}
func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
statter := &blobStatter{
name: r.Name(),
ub: r.ub,
client: r.client,
}
return &blobs{
name: r.Name(),
ub: r.ub,
client: r.client,
statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter),
}
}
func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
// todo(richardscothern): options should be sent over the wire
return &manifests{
name: r.Name(),
ub: r.ub,
client: r.client,
etags: make(map[string]string),
}, nil
}
func (r *repository) Signatures() distribution.SignatureService {
ms, _ := r.Manifests(r.context)
return &signatures{
manifests: ms,
}
}
type signatures struct {
manifests distribution.ManifestService
}
func (s *signatures) Get(dgst digest.Digest) ([][]byte, error) {
m, err := s.manifests.Get(dgst)
if err != nil {
return nil, err
}
return m.Signatures()
}
func (s *signatures) Put(dgst digest.Digest, signatures ...[]byte) error {
panic("not implemented")
}
type manifests struct {
name string
ub *v2.URLBuilder
client *http.Client
etags map[string]string
}
func (ms *manifests) Tags() ([]string, error) {
u, err := ms.ub.BuildTagsURL(ms.name)
if err != nil {
return nil, err
}
resp, err := ms.client.Get(u)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
tagsResponse := struct {
Tags []string `json:"tags"`
}{}
if err := json.Unmarshal(b, &tagsResponse); err != nil {
return nil, err
}
return tagsResponse.Tags, nil
}
return nil, handleErrorResponse(resp)
}
func (ms *manifests) Exists(dgst digest.Digest) (bool, error) {
// Call by Tag endpoint since the API uses the same
// URL endpoint for tags and digests.
return ms.ExistsByTag(dgst.String())
}
func (ms *manifests) ExistsByTag(tag string) (bool, error) {
u, err := ms.ub.BuildManifestURL(ms.name, tag)
if err != nil {
return false, err
}
resp, err := ms.client.Head(u)
if err != nil {
return false, err
}
if SuccessStatus(resp.StatusCode) {
return true, nil
} else if resp.StatusCode == http.StatusNotFound {
return false, nil
}
return false, handleErrorResponse(resp)
}
func (ms *manifests) Get(dgst digest.Digest) (*schema1.SignedManifest, error) {
// Call by Tag endpoint since the API uses the same
// URL endpoint for tags and digests.
return ms.GetByTag(dgst.String())
}
// AddEtagToTag allows a client to supply an eTag to GetByTag which will be
// used for a conditional HTTP request. If the eTag matches, a nil manifest
// and nil error will be returned. etag is automatically quoted when added to
// this map.
func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption {
return func(ms distribution.ManifestService) error {
if ms, ok := ms.(*manifests); ok {
ms.etags[tag] = fmt.Sprintf(`"%s"`, etag)
return nil
}
return fmt.Errorf("etag options is a client-only option")
}
}
func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) {
for _, option := range options {
err := option(ms)
if err != nil {
return nil, err
}
}
u, err := ms.ub.BuildManifestURL(ms.name, tag)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return nil, err
}
if _, ok := ms.etags[tag]; ok {
req.Header.Set("If-None-Match", ms.etags[tag])
}
resp, err := ms.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotModified {
return nil, distribution.ErrManifestNotModified
} else if SuccessStatus(resp.StatusCode) {
var sm schema1.SignedManifest
decoder := json.NewDecoder(resp.Body)
if err := decoder.Decode(&sm); err != nil {
return nil, err
}
return &sm, nil
}
return nil, handleErrorResponse(resp)
}
func (ms *manifests) Put(m *schema1.SignedManifest) error {
manifestURL, err := ms.ub.BuildManifestURL(ms.name, m.Tag)
if err != nil {
return err
}
// todo(richardscothern): do something with options here when they become applicable
putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(m.Raw))
if err != nil {
return err
}
resp, err := ms.client.Do(putRequest)
if err != nil {
return err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
// TODO(dmcgowan): make use of digest header
return nil
}
return handleErrorResponse(resp)
}
func (ms *manifests) Delete(dgst digest.Digest) error {
u, err := ms.ub.BuildManifestURL(ms.name, dgst.String())
if err != nil {
return err
}
req, err := http.NewRequest("DELETE", u, nil)
if err != nil {
return err
}
resp, err := ms.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
return nil
}
return handleErrorResponse(resp)
}
type blobs struct {
name string
ub *v2.URLBuilder
client *http.Client
statter distribution.BlobDescriptorService
distribution.BlobDeleter
}
func sanitizeLocation(location, base string) (string, error) {
baseURL, err := url.Parse(base)
if err != nil {
return "", err
}
locationURL, err := url.Parse(location)
if err != nil {
return "", err
}
return baseURL.ResolveReference(locationURL).String(), nil
}
func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
return bs.statter.Stat(ctx, dgst)
}
func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
desc, err := bs.Stat(ctx, dgst)
if err != nil {
return nil, err
}
reader, err := bs.Open(ctx, desc.Digest)
if err != nil {
return nil, err
}
defer reader.Close()
return ioutil.ReadAll(reader)
}
func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst)
if err != nil {
return nil, err
}
return transport.NewHTTPReadSeeker(bs.client, blobURL,
func(resp *http.Response) error {
if resp.StatusCode == http.StatusNotFound {
return distribution.ErrBlobUnknown
}
return handleErrorResponse(resp)
}), nil
}
func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
panic("not implemented")
}
func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
writer, err := bs.Create(ctx)
if err != nil {
return distribution.Descriptor{}, err
}
dgstr := digest.Canonical.New()
n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash()))
if err != nil {
return distribution.Descriptor{}, err
}
if n < int64(len(p)) {
return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p))
}
desc := distribution.Descriptor{
MediaType: mediaType,
Size: int64(len(p)),
Digest: dgstr.Digest(),
}
return writer.Commit(ctx, desc)
}
func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) {
u, err := bs.ub.BuildBlobUploadURL(bs.name)
resp, err := bs.client.Post(u, "", nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
// TODO(dmcgowan): Check for invalid UUID
uuid := resp.Header.Get("Docker-Upload-UUID")
location, err := sanitizeLocation(resp.Header.Get("Location"), u)
if err != nil {
return nil, err
}
return &httpBlobUpload{
statter: bs.statter,
client: bs.client,
uuid: uuid,
startedAt: time.Now(),
location: location,
}, nil
}
return nil, handleErrorResponse(resp)
}
func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
panic("not implemented")
}
func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error {
return bs.statter.Clear(ctx, dgst)
}
type blobStatter struct {
name string
ub *v2.URLBuilder
client *http.Client
}
func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
u, err := bs.ub.BuildBlobURL(bs.name, dgst)
if err != nil {
return distribution.Descriptor{}, err
}
resp, err := bs.client.Head(u)
if err != nil {
return distribution.Descriptor{}, err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
lengthHeader := resp.Header.Get("Content-Length")
if lengthHeader == "" {
return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u)
}
length, err := strconv.ParseInt(lengthHeader, 10, 64)
if err != nil {
return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err)
}
return distribution.Descriptor{
MediaType: resp.Header.Get("Content-Type"),
Size: length,
Digest: dgst,
}, nil
} else if resp.StatusCode == http.StatusNotFound {
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
return distribution.Descriptor{}, handleErrorResponse(resp)
}
func buildCatalogValues(maxEntries int, last string) url.Values {
values := url.Values{}
if maxEntries > 0 {
values.Add("n", strconv.Itoa(maxEntries))
}
if last != "" {
values.Add("last", last)
}
return values
}
func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst)
if err != nil {
return err
}
req, err := http.NewRequest("DELETE", blobURL, nil)
if err != nil {
return err
}
resp, err := bs.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
return nil
}
return handleErrorResponse(resp)
}
func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
return nil
}
Remove unnecessary stat from blob Get method
This calls Stat before Open, which should be unnecessary because Open
can handle the case of a nonexistent blob. Removing the Stat saves a
round trip.
This is similar to the removal of stat in Open in #1226.
Signed-off-by: Aaron Lehmann <8ecfc6017a87905413dcd7d63696a2a4c351b604@docker.com>
package client
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/client/transport"
"github.com/docker/distribution/registry/storage/cache"
"github.com/docker/distribution/registry/storage/cache/memory"
)
// Registry provides an interface for calling Repositories, which returns a catalog of repositories.
type Registry interface {
Repositories(ctx context.Context, repos []string, last string) (n int, err error)
}
// NewRegistry creates a registry namespace which can be used to get a listing of repositories
func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) {
ub, err := v2.NewURLBuilderFromString(baseURL)
if err != nil {
return nil, err
}
client := &http.Client{
Transport: transport,
Timeout: 1 * time.Minute,
}
return ®istry{
client: client,
ub: ub,
context: ctx,
}, nil
}
type registry struct {
client *http.Client
ub *v2.URLBuilder
context context.Context
}
// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size
// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there
// are no more entries
func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) {
var numFilled int
var returnErr error
values := buildCatalogValues(len(entries), last)
u, err := r.ub.BuildCatalogURL(values)
if err != nil {
return 0, err
}
resp, err := r.client.Get(u)
if err != nil {
return 0, err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
var ctlg struct {
Repositories []string `json:"repositories"`
}
decoder := json.NewDecoder(resp.Body)
if err := decoder.Decode(&ctlg); err != nil {
return 0, err
}
for cnt := range ctlg.Repositories {
entries[cnt] = ctlg.Repositories[cnt]
}
numFilled = len(ctlg.Repositories)
link := resp.Header.Get("Link")
if link == "" {
returnErr = io.EOF
}
} else {
return 0, handleErrorResponse(resp)
}
return numFilled, returnErr
}
// NewRepository creates a new Repository for the given repository name and base URL.
func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
if _, err := reference.ParseNamed(name); err != nil {
return nil, err
}
ub, err := v2.NewURLBuilderFromString(baseURL)
if err != nil {
return nil, err
}
client := &http.Client{
Transport: transport,
// TODO(dmcgowan): create cookie jar
}
return &repository{
client: client,
ub: ub,
name: name,
context: ctx,
}, nil
}
type repository struct {
client *http.Client
ub *v2.URLBuilder
context context.Context
name string
}
func (r *repository) Name() string {
return r.name
}
func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
statter := &blobStatter{
name: r.Name(),
ub: r.ub,
client: r.client,
}
return &blobs{
name: r.Name(),
ub: r.ub,
client: r.client,
statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter),
}
}
func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
// todo(richardscothern): options should be sent over the wire
return &manifests{
name: r.Name(),
ub: r.ub,
client: r.client,
etags: make(map[string]string),
}, nil
}
func (r *repository) Signatures() distribution.SignatureService {
ms, _ := r.Manifests(r.context)
return &signatures{
manifests: ms,
}
}
type signatures struct {
manifests distribution.ManifestService
}
func (s *signatures) Get(dgst digest.Digest) ([][]byte, error) {
m, err := s.manifests.Get(dgst)
if err != nil {
return nil, err
}
return m.Signatures()
}
func (s *signatures) Put(dgst digest.Digest, signatures ...[]byte) error {
panic("not implemented")
}
type manifests struct {
name string
ub *v2.URLBuilder
client *http.Client
etags map[string]string
}
func (ms *manifests) Tags() ([]string, error) {
u, err := ms.ub.BuildTagsURL(ms.name)
if err != nil {
return nil, err
}
resp, err := ms.client.Get(u)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
tagsResponse := struct {
Tags []string `json:"tags"`
}{}
if err := json.Unmarshal(b, &tagsResponse); err != nil {
return nil, err
}
return tagsResponse.Tags, nil
}
return nil, handleErrorResponse(resp)
}
func (ms *manifests) Exists(dgst digest.Digest) (bool, error) {
// Call by Tag endpoint since the API uses the same
// URL endpoint for tags and digests.
return ms.ExistsByTag(dgst.String())
}
func (ms *manifests) ExistsByTag(tag string) (bool, error) {
u, err := ms.ub.BuildManifestURL(ms.name, tag)
if err != nil {
return false, err
}
resp, err := ms.client.Head(u)
if err != nil {
return false, err
}
if SuccessStatus(resp.StatusCode) {
return true, nil
} else if resp.StatusCode == http.StatusNotFound {
return false, nil
}
return false, handleErrorResponse(resp)
}
func (ms *manifests) Get(dgst digest.Digest) (*schema1.SignedManifest, error) {
// Call by Tag endpoint since the API uses the same
// URL endpoint for tags and digests.
return ms.GetByTag(dgst.String())
}
// AddEtagToTag allows a client to supply an eTag to GetByTag which will be
// used for a conditional HTTP request. If the eTag matches, a nil manifest
// and nil error will be returned. etag is automatically quoted when added to
// this map.
func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption {
return func(ms distribution.ManifestService) error {
if ms, ok := ms.(*manifests); ok {
ms.etags[tag] = fmt.Sprintf(`"%s"`, etag)
return nil
}
return fmt.Errorf("etag options is a client-only option")
}
}
func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) {
for _, option := range options {
err := option(ms)
if err != nil {
return nil, err
}
}
u, err := ms.ub.BuildManifestURL(ms.name, tag)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return nil, err
}
if _, ok := ms.etags[tag]; ok {
req.Header.Set("If-None-Match", ms.etags[tag])
}
resp, err := ms.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotModified {
return nil, distribution.ErrManifestNotModified
} else if SuccessStatus(resp.StatusCode) {
var sm schema1.SignedManifest
decoder := json.NewDecoder(resp.Body)
if err := decoder.Decode(&sm); err != nil {
return nil, err
}
return &sm, nil
}
return nil, handleErrorResponse(resp)
}
func (ms *manifests) Put(m *schema1.SignedManifest) error {
manifestURL, err := ms.ub.BuildManifestURL(ms.name, m.Tag)
if err != nil {
return err
}
// todo(richardscothern): do something with options here when they become applicable
putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(m.Raw))
if err != nil {
return err
}
resp, err := ms.client.Do(putRequest)
if err != nil {
return err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
// TODO(dmcgowan): make use of digest header
return nil
}
return handleErrorResponse(resp)
}
func (ms *manifests) Delete(dgst digest.Digest) error {
u, err := ms.ub.BuildManifestURL(ms.name, dgst.String())
if err != nil {
return err
}
req, err := http.NewRequest("DELETE", u, nil)
if err != nil {
return err
}
resp, err := ms.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
return nil
}
return handleErrorResponse(resp)
}
type blobs struct {
name string
ub *v2.URLBuilder
client *http.Client
statter distribution.BlobDescriptorService
distribution.BlobDeleter
}
func sanitizeLocation(location, base string) (string, error) {
baseURL, err := url.Parse(base)
if err != nil {
return "", err
}
locationURL, err := url.Parse(location)
if err != nil {
return "", err
}
return baseURL.ResolveReference(locationURL).String(), nil
}
func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
return bs.statter.Stat(ctx, dgst)
}
func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
reader, err := bs.Open(ctx, dgst)
if err != nil {
return nil, err
}
defer reader.Close()
return ioutil.ReadAll(reader)
}
func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst)
if err != nil {
return nil, err
}
return transport.NewHTTPReadSeeker(bs.client, blobURL,
func(resp *http.Response) error {
if resp.StatusCode == http.StatusNotFound {
return distribution.ErrBlobUnknown
}
return handleErrorResponse(resp)
}), nil
}
func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
panic("not implemented")
}
func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
writer, err := bs.Create(ctx)
if err != nil {
return distribution.Descriptor{}, err
}
dgstr := digest.Canonical.New()
n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash()))
if err != nil {
return distribution.Descriptor{}, err
}
if n < int64(len(p)) {
return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p))
}
desc := distribution.Descriptor{
MediaType: mediaType,
Size: int64(len(p)),
Digest: dgstr.Digest(),
}
return writer.Commit(ctx, desc)
}
func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) {
u, err := bs.ub.BuildBlobUploadURL(bs.name)
resp, err := bs.client.Post(u, "", nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
// TODO(dmcgowan): Check for invalid UUID
uuid := resp.Header.Get("Docker-Upload-UUID")
location, err := sanitizeLocation(resp.Header.Get("Location"), u)
if err != nil {
return nil, err
}
return &httpBlobUpload{
statter: bs.statter,
client: bs.client,
uuid: uuid,
startedAt: time.Now(),
location: location,
}, nil
}
return nil, handleErrorResponse(resp)
}
func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
panic("not implemented")
}
func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error {
return bs.statter.Clear(ctx, dgst)
}
type blobStatter struct {
name string
ub *v2.URLBuilder
client *http.Client
}
func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
u, err := bs.ub.BuildBlobURL(bs.name, dgst)
if err != nil {
return distribution.Descriptor{}, err
}
resp, err := bs.client.Head(u)
if err != nil {
return distribution.Descriptor{}, err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
lengthHeader := resp.Header.Get("Content-Length")
if lengthHeader == "" {
return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u)
}
length, err := strconv.ParseInt(lengthHeader, 10, 64)
if err != nil {
return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err)
}
return distribution.Descriptor{
MediaType: resp.Header.Get("Content-Type"),
Size: length,
Digest: dgst,
}, nil
} else if resp.StatusCode == http.StatusNotFound {
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
return distribution.Descriptor{}, handleErrorResponse(resp)
}
func buildCatalogValues(maxEntries int, last string) url.Values {
values := url.Values{}
if maxEntries > 0 {
values.Add("n", strconv.Itoa(maxEntries))
}
if last != "" {
values.Add("last", last)
}
return values
}
func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst)
if err != nil {
return err
}
req, err := http.NewRequest("DELETE", blobURL, nil)
if err != nil {
return err
}
resp, err := bs.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
return nil
}
return handleErrorResponse(resp)
}
func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
return nil
}
|
/**
* Copyright 2015 @ z3q.net.
* name : aftersales_service.go
* author : jarryliu
* date : 2016-07-18 17:16
* description :
* history :
*/
package dps
import (
"github.com/jsix/gof/db"
"go2o/core/domain/interface/after-sales"
"go2o/core/domain/interface/order"
"go2o/core/dto"
"go2o/core/infrastructure/format"
"go2o/core/query"
)
type afterSalesService struct {
_orderRep order.IOrderRep
_rep afterSales.IAfterSalesRep
_query *query.AfterSalesQuery
db.Connector
}
func NewAfterSalesService(rep afterSales.IAfterSalesRep,
q *query.AfterSalesQuery, orderRep order.IOrderRep) *afterSalesService {
return &afterSalesService{
_rep: rep,
_orderRep: orderRep,
_query: q,
}
}
// 提交售后单
func (a *afterSalesService) SubmitAfterSalesOrder(orderId int, asType int,
snapshotId int, quantity int, reason string, img string) (int, error) {
ro := a._rep.CreateAfterSalesOrder(&afterSales.AfterSalesOrder{
// 订单编号
OrderId: orderId,
// 类型,退货、换货、维修
Type: asType,
// 售后原因
Reason: reason,
ReturnSpImage: img,
})
err := ro.SetItem(snapshotId, quantity)
if err == nil {
return ro.Submit()
}
return 0, err
}
// 获取订单的所有售后单
func (a *afterSalesService) GetAllAfterSalesOrderOfSaleOrder(orderId int) []afterSales.AfterSalesOrder {
list := a._rep.GetAllOfSaleOrder(orderId)
arr := make([]afterSales.AfterSalesOrder, len(list))
for i, v := range list {
arr[i] = v.Value()
arr[i].StateText = afterSales.Stat(arr[i].State).String()
}
return arr
}
// 获取会员的分页售后单
func (a *afterSalesService) QueryPagerAfterSalesOrderOfMember(memberId, begin,
size int, where string) (int, []*dto.PagedMemberAfterSalesOrder) {
return a._query.QueryPagerAfterSalesOrderOfMember(memberId, begin, size, where)
}
// 获取商户的分页售后单
func (a *afterSalesService) QueryPagerAfterSalesOrderOfVendor(vendorId, begin,
size int, where string) (int, []*dto.PagedVendorAfterSalesOrder) {
return a._query.QueryPagerAfterSalesOrderOfVendor(vendorId, begin, size, where)
}
//根据order_id获得订单号
func (a *afterSalesService) GetAfterSalesOrder(order_id int) int {
id := 0
a.Connector.ExecScalar("SSELECT order_no FROM sale_order WHERE id=?", &id, order_id)
return id
}
// 获取售后单
func (a *afterSalesService) GetAfterSaleOrder(id int) *afterSales.AfterSalesOrder {
as := a._rep.GetAfterSalesOrder(id)
if as != nil {
v := as.Value()
v.StateText = afterSales.Stat(v.State).String()
v.ReturnSpImage = format.GetResUrl(v.ReturnSpImage)
return &v
}
return nil
}
// 同意售后
func (a *afterSalesService) AgreeAfterSales(id int, remark string) error {
as := a._rep.GetAfterSalesOrder(id)
return as.Agree()
}
// 拒绝售后
func (a *afterSalesService) DeclineAfterSales(id int, reason string) error {
as := a._rep.GetAfterSalesOrder(id)
return as.Decline(reason)
}
// 申请调解
func (a *afterSalesService) RequestIntercede(id int) error {
as := a._rep.GetAfterSalesOrder(id)
return as.RequestIntercede()
}
// 系统确认
func (a *afterSalesService) ConfirmAfterSales(id int) error {
as := a._rep.GetAfterSalesOrder(id)
return as.Confirm()
}
// 系统退回
func (a *afterSalesService) RejectAfterSales(id int, remark string) error {
as := a._rep.GetAfterSalesOrder(id)
if as == nil {
return afterSales.ErrNoSuchOrder
}
return as.Reject(remark)
}
// 处理退款/退货完成,一般是系统自动调用
func (a *afterSalesService) ProcessAfterSalesOrder(id int) error {
as := a._rep.GetAfterSalesOrder(id)
if as == nil {
return afterSales.ErrNoSuchOrder
}
v := as.Value()
switch v.State {
case afterSales.TypeRefund:
return as.Process()
case afterSales.TypeRefund:
return as.Process()
}
return afterSales.ErrAutoProcess
}
// 售后收货
func (a *afterSalesService) ReceiveReturnShipment(id int) error {
as := a._rep.GetAfterSalesOrder(id)
err := as.ReturnReceive()
if err == nil {
if as.Value().State != afterSales.TypeExchange {
err = as.Process()
}
}
return err
}
// 换货发货
func (a *afterSalesService) ExchangeShipment(id int, spName string, spOrder string) error {
ex := a._rep.GetAfterSalesOrder(id).(afterSales.IExchangeOrder)
return ex.ExchangeShip(spName, spOrder)
}
// 换货收货
func (a *afterSalesService) ReceiveExchange(id int) error {
ex := a._rep.GetAfterSalesOrder(id).(afterSales.IExchangeOrder)
return ex.ExchangeReceive()
}
fix
Former-commit-id: a5b97f7c71a878cec74c923561e331c4511eaaf1
/**
* Copyright 2015 @ z3q.net.
* name : aftersales_service.go
* author : jarryliu
* date : 2016-07-18 17:16
* description :
* history :
*/
package dps
import (
"github.com/jsix/gof/db"
"go2o/core/domain/interface/after-sales"
"go2o/core/domain/interface/order"
"go2o/core/dto"
"go2o/core/infrastructure/format"
"go2o/core/query"
)
type afterSalesService struct {
_orderRep order.IOrderRep
_rep afterSales.IAfterSalesRep
_query *query.AfterSalesQuery
db.Connector
}
func NewAfterSalesService(rep afterSales.IAfterSalesRep,
q *query.AfterSalesQuery, orderRep order.IOrderRep) *afterSalesService {
return &afterSalesService{
_rep: rep,
_orderRep: orderRep,
_query: q,
}
}
// 提交售后单
func (a *afterSalesService) SubmitAfterSalesOrder(orderId int, asType int,
snapshotId int, quantity int, reason string, img string) (int, error) {
ro := a._rep.CreateAfterSalesOrder(&afterSales.AfterSalesOrder{
// 订单编号
OrderId: orderId,
// 类型,退货、换货、维修
Type: asType,
// 售后原因
Reason: reason,
ReturnSpImage: img,
})
err := ro.SetItem(snapshotId, quantity)
if err == nil {
return ro.Submit()
}
return 0, err
}
// 获取订单的所有售后单
func (a *afterSalesService) GetAllAfterSalesOrderOfSaleOrder(orderId int) []afterSales.AfterSalesOrder {
list := a._rep.GetAllOfSaleOrder(orderId)
arr := make([]afterSales.AfterSalesOrder, len(list))
for i, v := range list {
arr[i] = v.Value()
arr[i].StateText = afterSales.Stat(arr[i].State).String()
}
return arr
}
// 获取会员的分页售后单
func (a *afterSalesService) QueryPagerAfterSalesOrderOfMember(memberId, begin,
size int, where string) (int, []*dto.PagedMemberAfterSalesOrder) {
return a._query.QueryPagerAfterSalesOrderOfMember(memberId, begin, size, where)
}
// 获取商户的分页售后单
func (a *afterSalesService) QueryPagerAfterSalesOrderOfVendor(vendorId, begin,
size int, where string) (int, []*dto.PagedVendorAfterSalesOrder) {
return a._query.QueryPagerAfterSalesOrderOfVendor(vendorId, begin, size, where)
}
//根据order_id获得订单号
func (a *afterSalesService) GetAfterSalesOrder(order_id int) int {
id := 0
a.Connector.ExecScalar("SSELECT order_no FROM sale_order WHERE id=?", &id, order_id)
return id
}
// 获取售后单
func (a *afterSalesService) GetAfterSaleOrder(id int) *afterSales.AfterSalesOrder {
as := a._rep.GetAfterSalesOrder(id)
if as != nil {
v := as.Value()
v.StateText = afterSales.Stat(v.State).String()
v.ReturnSpImage = format.GetResUrl(v.ReturnSpImage)
return &v
}
return nil
}
// 同意售后
func (a *afterSalesService) AgreeAfterSales(id int, remark string) error {
as := a._rep.GetAfterSalesOrder(id)
return as.Agree()
}
// 拒绝售后
func (a *afterSalesService) DeclineAfterSales(id int, reason string) error {
as := a._rep.GetAfterSalesOrder(id)
return as.Decline(reason)
}
// 申请调解
func (a *afterSalesService) RequestIntercede(id int) error {
as := a._rep.GetAfterSalesOrder(id)
return as.RequestIntercede()
}
// 系统确认
func (a *afterSalesService) ConfirmAfterSales(id int) error {
as := a._rep.GetAfterSalesOrder(id)
return as.Confirm()
}
// 系统退回
func (a *afterSalesService) RejectAfterSales(id int, remark string) error {
as := a._rep.GetAfterSalesOrder(id)
if as == nil {
return afterSales.ErrNoSuchOrder
}
return as.Reject(remark)
}
// 处理退款/退货完成,一般是系统自动调用
func (a *afterSalesService) ProcessAfterSalesOrder(id int) error {
as := a._rep.GetAfterSalesOrder(id)
if as == nil {
return afterSales.ErrNoSuchOrder
}
v := as.Value()
switch v.State {
case afterSales.TypeRefund:
return as.Process()
case afterSales.TypeReturn:
return as.Process()
}
return afterSales.ErrAutoProcess
}
// 售后收货
func (a *afterSalesService) ReceiveReturnShipment(id int) error {
as := a._rep.GetAfterSalesOrder(id)
err := as.ReturnReceive()
if err == nil {
if as.Value().State != afterSales.TypeExchange {
err = as.Process()
}
}
return err
}
// 换货发货
func (a *afterSalesService) ExchangeShipment(id int, spName string, spOrder string) error {
ex := a._rep.GetAfterSalesOrder(id).(afterSales.IExchangeOrder)
return ex.ExchangeShip(spName, spOrder)
}
// 换货收货
func (a *afterSalesService) ReceiveExchange(id int) error {
ex := a._rep.GetAfterSalesOrder(id).(afterSales.IExchangeOrder)
return ex.ExchangeReceive()
}
|
package main
import (
"log"
"github.com/attwad/cdf/indexer"
"github.com/attwad/cdf/testdata"
)
func main() {
if err := indexer.NewElasticIndexer("http://localhost:9200").Index(
testdata.CreateCourse(), testdata.CreateTranscript()); err != nil {
log.Fatal(err)
}
}
removed index main debug
|
package log
import (
"fmt"
"io"
"log"
"math"
"os"
"strings"
)
const (
LevelFatal = iota
LevelError
LevelWarn
LevelInfo
LevelDebug
)
var Level = func() int {
switch os.Getenv("LOG_LEVEL") {
case "FATAL":
return LevelFatal
case "ERROR":
return LevelError
case "WARN":
return LevelWarn
case "DEBUG":
return LevelDebug
default:
return LevelInfo
}
}()
var (
DefaultLogger = New()
defaultOutput io.Writer = os.Stdout
)
func New() *Logger {
return NewWithID("")
}
func NewWithID(id string) *Logger {
return &Logger{
ID: id,
Level: Level, // grab default
l: log.New(defaultOutput, "", 0), // don't touch the default logger on 'log' package
}
}
func Fatal(id, description string, keysAndValues ...interface{}) {
if Level < LevelFatal {
return
}
logMessage(DefaultLogger.l, id, "FATAL", description, keysAndValues...)
}
// Error outputs an error message with an optional list of key/value pairs.
func Error(id, description string, keysAndValues ...interface{}) {
if Level < LevelError {
return
}
logMessage(DefaultLogger.l, id, "ERROR", description, keysAndValues...)
}
// Warn outputs a warning message with an optional list of key/value pairs.
//
// If LogLevel is set below LevelWarn, calling this method will yield no
// side effects.
func Warn(id, description string, keysAndValues ...interface{}) {
if Level < LevelWarn {
return
}
logMessage(DefaultLogger.l, id, "WARN ", description, keysAndValues...)
}
// Info outputs an info message with an optional list of key/value pairs.
//
// If LogLevel is set below LevelInfo, calling this method will yield no
// side effects.
func Info(id, description string, keysAndValues ...interface{}) {
if Level < LevelInfo {
return
}
logMessage(DefaultLogger.l, id, "INFO ", description, keysAndValues...)
}
// Debug outputs an info message with an optional list of key/value pairs.
//
// If LogLevel is set below LevelDebug, calling this method will yield no
// side effects.
func Debug(id, description string, keysAndValues ...interface{}) {
if Level < LevelDebug {
return
}
logMessage(DefaultLogger.l, id, "DEBUG", description, keysAndValues...)
}
// SetOutput sets the output destination for the default logger.
//
// All new logger instances created after this call will use the provided
// io.Writer as destination for their output.
//
// If you specifically want to change the output of DefaultLogger and not
// affect new Logger instance creation, use log.DefaultLogger.SetOutput()
func SetOutput(w io.Writer) {
defaultOutput = w
DefaultLogger.SetOutput(w)
}
type Logger struct {
ID string
Level int
l *log.Logger
}
// Fatal outputs an error message with an optional list of key/value pairs and exits
func (s *Logger) Fatal(description string, keysAndValues ...interface{}) {
if s.Level < LevelFatal {
return
}
logMessage(s.l, s.ID, "FATAL", description, keysAndValues...)
os.Exit(1)
}
// Error outputs an error message with an optional list of key/value pairs.
func (s *Logger) Error(description string, keysAndValues ...interface{}) {
if s.Level < LevelError {
return
}
logMessage(s.l, s.ID, "ERROR", description, keysAndValues...)
}
// Warn outputs a warning message with an optional list of key/value pairs.
//
// If LogLevel is set below LevelWarn, calling this method will yield no
// side effects.
func (s *Logger) Warn(description string, keysAndValues ...interface{}) {
if s.Level < LevelWarn {
return
}
logMessage(s.l, s.ID, "WARN ", description, keysAndValues...)
}
// Info outputs an info message with an optional list of key/value pairs.
//
// If LogLevel is set below LevelInfo, calling this method will yield no
// side effects.
func (s *Logger) Info(description string, keysAndValues ...interface{}) {
if s.Level < LevelInfo {
return
}
logMessage(s.l, s.ID, "INFO ", description, keysAndValues...)
}
// Debug outputs an info message with an optional list of key/value pairs.
//
// If LogLevel is set below LevelDebug, calling this method will yield no
// side effects.
func (s *Logger) Debug(description string, keysAndValues ...interface{}) {
if s.Level < LevelDebug {
return
}
logMessage(s.l, s.ID, "DEBUG", description, keysAndValues...)
}
// SetOutput sets the output destination for the logger.
//
// Useful to change where the log stream ends up being written to.
func (s *Logger) SetOutput(w io.Writer) {
s.l = log.New(w, "", 0)
}
// logMessage writes a formatted message to the default logger.
//
// Format is "SEVERITY | Description [| k1='v1' k2='v2' k3=]"
// with key/value pairs being optional, depending on whether args are provided
func logMessage(logger *log.Logger, id, severity, description string, args ...interface{}) {
// A full log statement is <id> | <severity> | <description> | <keys and values>
items := make([]interface{}, 0, 7)
items = append(items, severity)
items = append(items, "|")
if len(id) > 0 {
items = append(items, id)
items = append(items, "|")
}
items = append(items, description)
if len(args) > 0 {
keysAndValues := expandKeyValuePairs(args)
items = append(items, "|")
items = append(items, keysAndValues)
}
logger.Println(items...)
}
// expandKeyValuePairs converts a list of arguments into a string with
// the format "k='v' foo='bar' bar=".
//
// When the final value is missing, the format "bar=" is used.
func expandKeyValuePairs(keyValuePairs []interface{}) string {
argCount := len(keyValuePairs)
kvPairCount := int(math.Ceil(float64(argCount) / 2)) // math, y u do dis.
kvPairs := make([]string, kvPairCount)
for i := 0; i < kvPairCount; i++ {
keyIndex := i * 2
valueIndex := keyIndex + 1
key := keyValuePairs[keyIndex]
if valueIndex < argCount {
value := keyValuePairs[valueIndex]
kvPairs[i] = fmt.Sprintf("%v='%v'", key, value)
} else {
kvPairs[i] = fmt.Sprintf("%v=", key)
}
}
return strings.Join(kvPairs, " ")
}
Add missing os.Exit to package-level Fatal()
package log
import (
"fmt"
"io"
"log"
"math"
"os"
"strings"
)
const (
LevelFatal = iota
LevelError
LevelWarn
LevelInfo
LevelDebug
)
var Level = func() int {
switch os.Getenv("LOG_LEVEL") {
case "FATAL":
return LevelFatal
case "ERROR":
return LevelError
case "WARN":
return LevelWarn
case "DEBUG":
return LevelDebug
default:
return LevelInfo
}
}()
var (
DefaultLogger = New()
defaultOutput io.Writer = os.Stdout
)
func New() *Logger {
return NewWithID("")
}
func NewWithID(id string) *Logger {
return &Logger{
ID: id,
Level: Level, // grab default
l: log.New(defaultOutput, "", 0), // don't touch the default logger on 'log' package
}
}
func Fatal(id, description string, keysAndValues ...interface{}) {
if Level < LevelFatal {
return
}
logMessage(DefaultLogger.l, id, "FATAL", description, keysAndValues...)
os.Exit(1)
}
// Error outputs an error message with an optional list of key/value pairs.
func Error(id, description string, keysAndValues ...interface{}) {
if Level < LevelError {
return
}
logMessage(DefaultLogger.l, id, "ERROR", description, keysAndValues...)
}
// Warn outputs a warning message with an optional list of key/value pairs.
//
// If LogLevel is set below LevelWarn, calling this method will yield no
// side effects.
func Warn(id, description string, keysAndValues ...interface{}) {
if Level < LevelWarn {
return
}
logMessage(DefaultLogger.l, id, "WARN ", description, keysAndValues...)
}
// Info outputs an info message with an optional list of key/value pairs.
//
// If LogLevel is set below LevelInfo, calling this method will yield no
// side effects.
func Info(id, description string, keysAndValues ...interface{}) {
if Level < LevelInfo {
return
}
logMessage(DefaultLogger.l, id, "INFO ", description, keysAndValues...)
}
// Debug outputs an info message with an optional list of key/value pairs.
//
// If LogLevel is set below LevelDebug, calling this method will yield no
// side effects.
func Debug(id, description string, keysAndValues ...interface{}) {
if Level < LevelDebug {
return
}
logMessage(DefaultLogger.l, id, "DEBUG", description, keysAndValues...)
}
// SetOutput sets the output destination for the default logger.
//
// All new logger instances created after this call will use the provided
// io.Writer as destination for their output.
//
// If you specifically want to change the output of DefaultLogger and not
// affect new Logger instance creation, use log.DefaultLogger.SetOutput()
func SetOutput(w io.Writer) {
defaultOutput = w
DefaultLogger.SetOutput(w)
}
type Logger struct {
ID string
Level int
l *log.Logger
}
// Fatal outputs an error message with an optional list of key/value pairs and exits
func (s *Logger) Fatal(description string, keysAndValues ...interface{}) {
if s.Level < LevelFatal {
return
}
logMessage(s.l, s.ID, "FATAL", description, keysAndValues...)
os.Exit(1)
}
// Error outputs an error message with an optional list of key/value pairs.
func (s *Logger) Error(description string, keysAndValues ...interface{}) {
if s.Level < LevelError {
return
}
logMessage(s.l, s.ID, "ERROR", description, keysAndValues...)
}
// Warn outputs a warning message with an optional list of key/value pairs.
//
// If LogLevel is set below LevelWarn, calling this method will yield no
// side effects.
func (s *Logger) Warn(description string, keysAndValues ...interface{}) {
if s.Level < LevelWarn {
return
}
logMessage(s.l, s.ID, "WARN ", description, keysAndValues...)
}
// Info outputs an info message with an optional list of key/value pairs.
//
// If LogLevel is set below LevelInfo, calling this method will yield no
// side effects.
func (s *Logger) Info(description string, keysAndValues ...interface{}) {
if s.Level < LevelInfo {
return
}
logMessage(s.l, s.ID, "INFO ", description, keysAndValues...)
}
// Debug outputs an info message with an optional list of key/value pairs.
//
// If LogLevel is set below LevelDebug, calling this method will yield no
// side effects.
func (s *Logger) Debug(description string, keysAndValues ...interface{}) {
if s.Level < LevelDebug {
return
}
logMessage(s.l, s.ID, "DEBUG", description, keysAndValues...)
}
// SetOutput sets the output destination for the logger.
//
// Useful to change where the log stream ends up being written to.
func (s *Logger) SetOutput(w io.Writer) {
s.l = log.New(w, "", 0)
}
// logMessage writes a formatted message to the default logger.
//
// Format is "SEVERITY | Description [| k1='v1' k2='v2' k3=]"
// with key/value pairs being optional, depending on whether args are provided
func logMessage(logger *log.Logger, id, severity, description string, args ...interface{}) {
// A full log statement is <id> | <severity> | <description> | <keys and values>
items := make([]interface{}, 0, 7)
items = append(items, severity)
items = append(items, "|")
if len(id) > 0 {
items = append(items, id)
items = append(items, "|")
}
items = append(items, description)
if len(args) > 0 {
keysAndValues := expandKeyValuePairs(args)
items = append(items, "|")
items = append(items, keysAndValues)
}
logger.Println(items...)
}
// expandKeyValuePairs converts a list of arguments into a string with
// the format "k='v' foo='bar' bar=".
//
// When the final value is missing, the format "bar=" is used.
func expandKeyValuePairs(keyValuePairs []interface{}) string {
argCount := len(keyValuePairs)
kvPairCount := int(math.Ceil(float64(argCount) / 2)) // math, y u do dis.
kvPairs := make([]string, kvPairCount)
for i := 0; i < kvPairCount; i++ {
keyIndex := i * 2
valueIndex := keyIndex + 1
key := keyValuePairs[keyIndex]
if valueIndex < argCount {
value := keyValuePairs[valueIndex]
kvPairs[i] = fmt.Sprintf("%v='%v'", key, value)
} else {
kvPairs[i] = fmt.Sprintf("%v=", key)
}
}
return strings.Join(kvPairs, " ")
}
|
package logger
import (
"bytes"
"testing"
)
const (
namet = name + ".Test"
)
func TestGetLevel(t *testing.T) {
n := New("logger.Test.GetLevel")
n.Info(n, "Starting")
m := make(map[Logger]Priority)
m[""] = DefaultPriority
m["."] = DefaultPriority
m["Test"] = DefaultPriority
m[".Test"] = DefaultPriority
SetLevel("Test2", Emergency)
m["Test2"] = Emergency
m["Test2.Test"] = Emergency
m["Test2.Test.Test"] = Emergency
m["Test2.Test.Test.Test"] = Emergency
m["Test2.Test.Test.Test.Test"] = Emergency
m["Test2.Test.Test.Test.Test.Test"] = Emergency
for k, v := range m {
o := GetLevel(k)
if o != v {
n.Error(n, "GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
t.Fail()
}
n.Debug(n, "GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
}
n.Info(n, "Finished")
}
func TestGetParentLevel(t *testing.T) {
n := New("logger.Test.getParentLevel")
n.Info(n, "Starting")
m := make(map[Logger]Priority)
m["."] = DefaultPriority
m["Test"] = DefaultPriority
m["Test.Test"] = DefaultPriority
SetLevel("Test2", Emergency)
m["Test2"] = DefaultPriority
m["Test2.Test"] = Emergency
for k, v := range m {
o := getParentLevel(k)
if o != v {
n.Error(n, "GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
t.Fail()
}
n.Debug(n, "GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
}
n.Info(n, "Finished")
}
func TestgetParent(t *testing.T) {
n := New("logger.Test.getParent")
n.Info(n, "Starting")
m := [][]Logger{
{"", "."},
{".Test", "."},
{".", "."},
{"Test", "."},
{"Test.Test", "Test"},
{"Test.Test.Test", "Test.Test"},
{"Test.Test.Test.Test", "Test.Test.Test"},
}
for i := range m {
a := m[i]
k := a[0]
v := a[1]
o := getParent(k)
if o != v {
n.Error(n, "GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
t.Fail()
}
n.Debug(n, "GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
}
n.Info(n, "Finished")
}
func TestPrintMessage(t *testing.T) {
l := New(namet + ".PrintMessage")
p := "\033[0m"
b := "Test - " + p + p + "Debug" + p + " - "
m := [][]string{
{"", b},
{"Test", b + "Test"},
{"Test.Test", b + "Test.Test"},
{"Test.Test.Test", b + "Test.Test.Test"},
}
r := getLogger("Test")
r.Format = "{{.Logger}} - {{.Priority}} - {{.Message}}"
for _, d := range m {
l.Info("Checking: ", d)
k := d[0]
v := d[1]
var b bytes.Buffer
r.Output = &b
printMessage(r, Debug, k)
o := b.String()
l.Debug("GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
if o != v {
l.Critical("GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
t.Fail()
}
}
}
func TestPrintMessageNoColor(t *testing.T) {
l := New(namet + ".PrintMessage")
m := [][]string{
{"", "Test - Debug - "},
{"Test", "Test - Debug - Test"},
{"Test.Test", "Test - Debug - Test.Test"},
{"Test.Test.Test", "Test - Debug - Test.Test.Test"},
}
r := getLogger("Test")
r.Format = "{{.Logger}} - {{.Priority}} - {{.Message}}"
r.NoColor = true
for _, d := range m {
l.Info("Checking: ", d)
k := d[0]
v := d[1]
var b bytes.Buffer
r.Output = &b
printMessage(r, Debug, k)
o := b.String()
l.Debug("GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
if o != v {
l.Critical("GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
t.Fail()
}
}
}
func TestPrintColors(t *testing.T) {
l := New("logger.Test.PrintColors")
SetLevel("logger.Test.PrintColors", Disable)
//TODO: Compare strings instead of printing.
l.Debug("Debug")
l.Info("Info")
l.Notice("Notice")
l.Warning("Warning")
l.Error("Error")
l.Critical("Critical")
l.Alert("Alert")
l.Emergency("Emergency")
SetNoColor("logger.Test.PrintColors", true)
l.Debug("NoColorDebug")
l.Info("NoColorInfo")
l.Notice("NoColorNotice")
l.Warning("NoColorWarning")
l.Error("NoColorError")
l.Critical("NoColorCritical")
l.Alert("NoColorAlert")
l.Emergency("NoColorEmergency")
}
func TestCheckPriorityOK(t *testing.T) {
l := New(namet + ".CheckPriority.OK")
for k := range priorities {
l.Info("Checking: ", k)
e := checkPriority(k)
l.Debug("Return of ", k, ": ", e)
if e != nil {
l.Critical(e)
t.Fail()
}
}
}
func TestCheckPriorityFail(t *testing.T) {
l := New(namet + ".CheckPriority.FAIL")
k := Disable + 1
l.Info("Checking: ", k)
e := checkPriority(k)
l.Debug("Return of ", k, ": ", e)
if e == nil {
l.Critical("Should not have succeeded")
t.Fail()
return
}
}
func TestCheckPriorityFailDoesNotExist(t *testing.T) {
l := New(namet + ".CheckPriority.FAIL.DoesNotExist")
k := Disable + 1
x := "priority does not exist"
l.Info("Checking: ", k)
e := checkPriority(k)
l.Debug("Return of ", k, ": ", e)
if e != nil {
if e.Error() != x {
l.Critical("Wrong error, EXPECTED: ", x, ", GOT: ", e.Error())
t.Fail()
}
}
}
func BenchmarkLogRootEmergency(b *testing.B) {
for i := 0; i < b.N; i++ {
log(".", Emergency, "Test")
}
}
func BenchmarkLogRootEmergencyNoColor(b *testing.B) {
SetNoColor(".", true)
for i := 0; i < b.N; i++ {
log(".", Emergency, "Test")
}
}
func BenchmarkLogRoot(b *testing.B) {
for i := 0; i < b.N; i++ {
log(".", Debug, "Test")
}
}
func BenchmarkLogChild(b *testing.B) {
for i := 0; i < b.N; i++ {
log("BenchLogChild", Debug, "Test")
}
}
func BenchmarkLogChildChild(b *testing.B) {
for i := 0; i < b.N; i++ {
log("BenchLogChildChild.Test", Debug, "Test")
}
}
func BenchmarkLogChildAllocated(b *testing.B) {
SetLevel("BenchLogChildAllocated", Emergency)
for i := 0; i < b.N; i++ {
log("BenchLogChildAllocated", Debug, "Test")
}
}
func BenchmarkLogChildChildAllocated(b *testing.B) {
SetLevel("BenchLogChildChildAllocated.Test", Emergency)
for i := 0; i < b.N; i++ {
log("BenchLogChildChildAllocated.Test", Debug, "Test")
}
}
func BenchmarkGetParentRoot(b *testing.B) {
for i := 0; i < b.N; i++ {
getParent(".")
}
}
func BenchmarkGetParentChild(b *testing.B) {
for i := 0; i < b.N; i++ {
getParent("BenchgetParentChild")
}
}
func BenchmarkGetParentChildChild(b *testing.B) {
for i := 0; i < b.N; i++ {
getParent("BenchgetParentChildChild.Test")
}
}
func BenchmarkGetParentChildChildChild(b *testing.B) {
for i := 0; i < b.N; i++ {
getParent("BenchgetParentChildChild.Test.Test")
}
}
func BenchmarkGetParentChildChildChildChild(b *testing.B) {
for i := 0; i < b.N; i++ {
getParent("BenchgetParentChildChildChild.Test.Test")
}
}
func BenchmarkGetParentChildChildChildChildChild(b *testing.B) {
for i := 0; i < b.N; i++ {
getParent("BenchgetParentChildChildChildChild.Test.Test.Test")
}
}
func BenchmarkPrintMessage(b *testing.B) {
var a bytes.Buffer
l := getLogger("BenchprintMessage")
l.Output = &a
b.ResetTimer()
for i := 0; i < b.N; i++ {
printMessage(l, Debug, "Message")
}
}
func BenchmarkFormatMessage(b *testing.B) {
l := getLogger("BenchformatMessage")
m := new(message)
m.Time = "Mo 30 Sep 2013 20:29:19 CEST"
m.Logger = l.Logger
m.Priority = "Debug"
m.Message = "Test"
b.ResetTimer()
for i := 0; i < b.N; i++ {
formatMessage(m, l.Format)
}
}
Added test for GetPriorityFormat.
package logger
import (
"bytes"
"testing"
)
const (
namet = name + ".Test"
)
func TestGetLevel(t *testing.T) {
n := New("logger.Test.GetLevel")
n.Info(n, "Starting")
m := make(map[Logger]Priority)
m[""] = DefaultPriority
m["."] = DefaultPriority
m["Test"] = DefaultPriority
m[".Test"] = DefaultPriority
SetLevel("Test2", Emergency)
m["Test2"] = Emergency
m["Test2.Test"] = Emergency
m["Test2.Test.Test"] = Emergency
m["Test2.Test.Test.Test"] = Emergency
m["Test2.Test.Test.Test.Test"] = Emergency
m["Test2.Test.Test.Test.Test.Test"] = Emergency
for k, v := range m {
o := GetLevel(k)
if o != v {
n.Error(n, "GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
t.Fail()
}
n.Debug(n, "GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
}
n.Info(n, "Finished")
}
func TestGetParentLevel(t *testing.T) {
n := New("logger.Test.getParentLevel")
n.Info(n, "Starting")
m := make(map[Logger]Priority)
m["."] = DefaultPriority
m["Test"] = DefaultPriority
m["Test.Test"] = DefaultPriority
SetLevel("Test2", Emergency)
m["Test2"] = DefaultPriority
m["Test2.Test"] = Emergency
for k, v := range m {
o := getParentLevel(k)
if o != v {
n.Error(n, "GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
t.Fail()
}
n.Debug(n, "GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
}
n.Info(n, "Finished")
}
func TestgetParent(t *testing.T) {
n := New("logger.Test.getParent")
n.Info(n, "Starting")
m := [][]Logger{
{"", "."},
{".Test", "."},
{".", "."},
{"Test", "."},
{"Test.Test", "Test"},
{"Test.Test.Test", "Test.Test"},
{"Test.Test.Test.Test", "Test.Test.Test"},
}
for i := range m {
a := m[i]
k := a[0]
v := a[1]
o := getParent(k)
if o != v {
n.Error(n, "GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
t.Fail()
}
n.Debug(n, "GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
}
n.Info(n, "Finished")
}
func TestPrintMessage(t *testing.T) {
l := New(namet + ".PrintMessage")
p := "\033[0m"
b := "Test - " + p + p + "Debug" + p + " - "
m := [][]string{
{"", b},
{"Test", b + "Test"},
{"Test.Test", b + "Test.Test"},
{"Test.Test.Test", b + "Test.Test.Test"},
}
r := getLogger("Test")
r.Format = "{{.Logger}} - {{.Priority}} - {{.Message}}"
for _, d := range m {
l.Info("Checking: ", d)
k := d[0]
v := d[1]
var b bytes.Buffer
r.Output = &b
printMessage(r, Debug, k)
o := b.String()
l.Debug("GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
if o != v {
l.Critical("GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
t.Fail()
}
}
}
func TestPrintMessageNoColor(t *testing.T) {
l := New(namet + ".PrintMessage")
m := [][]string{
{"", "Test - Debug - "},
{"Test", "Test - Debug - Test"},
{"Test.Test", "Test - Debug - Test.Test"},
{"Test.Test.Test", "Test - Debug - Test.Test.Test"},
}
r := getLogger("Test")
r.Format = "{{.Logger}} - {{.Priority}} - {{.Message}}"
r.NoColor = true
for _, d := range m {
l.Info("Checking: ", d)
k := d[0]
v := d[1]
var b bytes.Buffer
r.Output = &b
printMessage(r, Debug, k)
o := b.String()
l.Debug("GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
if o != v {
l.Critical("GOT: '", o, "', EXPECED: '", v, "'", ", KEY: '", k, "'")
t.Fail()
}
}
}
func TestPrintColors(t *testing.T) {
l := New("logger.Test.PrintColors")
SetLevel("logger.Test.PrintColors", Disable)
//TODO: Compare strings instead of printing.
l.Debug("Debug")
l.Info("Info")
l.Notice("Notice")
l.Warning("Warning")
l.Error("Error")
l.Critical("Critical")
l.Alert("Alert")
l.Emergency("Emergency")
SetNoColor("logger.Test.PrintColors", true)
l.Debug("NoColorDebug")
l.Info("NoColorInfo")
l.Notice("NoColorNotice")
l.Warning("NoColorWarning")
l.Error("NoColorError")
l.Critical("NoColorCritical")
l.Alert("NoColorAlert")
l.Emergency("NoColorEmergency")
}
func TestCheckPriorityOK(t *testing.T) {
l := New(namet + ".CheckPriority.OK")
for k := range priorities {
l.Info("Checking: ", k)
e := checkPriority(k)
l.Debug("Return of ", k, ": ", e)
if e != nil {
l.Critical(e)
t.Fail()
}
}
}
func TestCheckPriorityFail(t *testing.T) {
l := New(namet + ".CheckPriority.FAIL")
k := Disable + 1
l.Info("Checking: ", k)
e := checkPriority(k)
l.Debug("Return of ", k, ": ", e)
if e == nil {
l.Critical("Should not have succeeded")
t.Fail()
return
}
}
func TestCheckPriorityFailDoesNotExist(t *testing.T) {
l := New(namet + ".CheckPriority.FAIL.DoesNotExist")
k := Disable + 1
x := "priority does not exist"
l.Info("Checking: ", k)
e := checkPriority(k)
l.Debug("Return of ", k, ": ", e)
if e != nil {
if e.Error() != x {
l.Critical("Wrong error, EXPECTED: ", x, ", GOT: ", e.Error())
t.Fail()
}
}
}
func TestGetPriorityFormat(t *testing.T) {
l := New(namet + ".GetPriorityFormat")
m := [][]int{
{int(Debug), colornone, textnormal},
{int(Notice), colorgreen, textnormal},
{int(Info), colorblue, textnormal},
{int(Warning), coloryellow, textnormal},
{int(Error), coloryellow, textbold},
{int(Critical), colorred, textnormal},
{int(Alert), colorred, textbold},
{int(Emergency), colorred, textblink},
}
for _, d := range m {
p := Priority(d[0])
n, e := NamePriority(p)
if e != nil {
l.Alert("Can not name priority: ", e)
t.Fail()
}
c := d[1]
f := d[2]
a, b := getPriorityFormat(p)
if c != a {
l.Critical("Wrong color for ", n, ", EXPECTED: ", c, ", GOT: ", a)
t.Fail()
}
if f != b {
l.Critical("Wrong format for ", n, ", EXPECTED: ", c, ", GOT: ", b)
t.Fail()
}
}
}
func BenchmarkLogRootEmergency(b *testing.B) {
for i := 0; i < b.N; i++ {
log(".", Emergency, "Test")
}
}
func BenchmarkLogRootEmergencyNoColor(b *testing.B) {
SetNoColor(".", true)
for i := 0; i < b.N; i++ {
log(".", Emergency, "Test")
}
}
func BenchmarkLogRoot(b *testing.B) {
for i := 0; i < b.N; i++ {
log(".", Debug, "Test")
}
}
func BenchmarkLogChild(b *testing.B) {
for i := 0; i < b.N; i++ {
log("BenchLogChild", Debug, "Test")
}
}
func BenchmarkLogChildChild(b *testing.B) {
for i := 0; i < b.N; i++ {
log("BenchLogChildChild.Test", Debug, "Test")
}
}
func BenchmarkLogChildAllocated(b *testing.B) {
SetLevel("BenchLogChildAllocated", Emergency)
for i := 0; i < b.N; i++ {
log("BenchLogChildAllocated", Debug, "Test")
}
}
func BenchmarkLogChildChildAllocated(b *testing.B) {
SetLevel("BenchLogChildChildAllocated.Test", Emergency)
for i := 0; i < b.N; i++ {
log("BenchLogChildChildAllocated.Test", Debug, "Test")
}
}
func BenchmarkGetParentRoot(b *testing.B) {
for i := 0; i < b.N; i++ {
getParent(".")
}
}
func BenchmarkGetParentChild(b *testing.B) {
for i := 0; i < b.N; i++ {
getParent("BenchgetParentChild")
}
}
func BenchmarkGetParentChildChild(b *testing.B) {
for i := 0; i < b.N; i++ {
getParent("BenchgetParentChildChild.Test")
}
}
func BenchmarkGetParentChildChildChild(b *testing.B) {
for i := 0; i < b.N; i++ {
getParent("BenchgetParentChildChild.Test.Test")
}
}
func BenchmarkGetParentChildChildChildChild(b *testing.B) {
for i := 0; i < b.N; i++ {
getParent("BenchgetParentChildChildChild.Test.Test")
}
}
func BenchmarkGetParentChildChildChildChildChild(b *testing.B) {
for i := 0; i < b.N; i++ {
getParent("BenchgetParentChildChildChildChild.Test.Test.Test")
}
}
func BenchmarkPrintMessage(b *testing.B) {
var a bytes.Buffer
l := getLogger("BenchprintMessage")
l.Output = &a
b.ResetTimer()
for i := 0; i < b.N; i++ {
printMessage(l, Debug, "Message")
}
}
func BenchmarkFormatMessage(b *testing.B) {
l := getLogger("BenchformatMessage")
m := new(message)
m.Time = "Mo 30 Sep 2013 20:29:19 CEST"
m.Logger = l.Logger
m.Priority = "Debug"
m.Message = "Test"
b.ResetTimer()
for i := 0; i < b.N; i++ {
formatMessage(m, l.Format)
}
}
|
// Copyright 2013 The go-logger Authors. All rights reserved.
// This code is MIT licensed. See the LICENSE file for more info.
package log
import (
"bytes"
"fmt"
"io"
"math/rand"
"os"
"path/filepath"
"testing"
"runtime"
"time"
)
func TestStream(t *testing.T) {
var buf bytes.Buffer
logr := New(LEVEL_CRITICAL, os.Stdout, &buf)
logr.Streams[1] = &buf
if out := logr.Streams[1]; out != &buf {
t.Errorf("Stream = %p, want %p", out, &buf)
}
}
func TestMultiStreams(t *testing.T) {
rand.Seed(time.Now().UnixNano())
fPath := filepath.Join(os.TempDir(), fmt.Sprint("go_test_",
rand.Int()))
file, err := os.Create(fPath)
if err != nil {
t.Error("Create(%q) = %v; want: nil", fPath, err)
}
defer file.Close()
var buf bytes.Buffer
eLen := 55
logr := New(LEVEL_DEBUG, file, &buf)
logr.Debugln("Testing debug output!")
b := make([]byte, eLen)
n, err := file.ReadAt(b, 0)
if n != eLen || err != nil {
t.Errorf("Read(%d) = %d, %v; want: %d, nil", eLen, n, err,
eLen)
}
if buf.Len() != eLen {
t.Errorf("buf.Len() = %d; want: %d", buf.Len(), eLen)
}
}
func TestLongFileFlag(t *testing.T) {
var buf bytes.Buffer
SetLevel(LEVEL_DEBUG)
SetFlags(LnoPrefix | LlongFileName)
SetStreams(os.Stdout, &buf)
Debugln("Test long file flag")
_, file, _, _ := runtime.Caller(0)
expect := fmt.Sprintf("[DEBUG] %s: Test long file flag\n", file)
if buf.String() != expect {
t.Errorf("\nExpect:\n\t%s\nGot:\n\t%s\n", expect, buf.String())
}
}
func TestShortFileFlag(t *testing.T) {
b := new(bytes.Buffer)
logr := New(LEVEL_DEBUG, b)
logr.Flags = LstdFlags | LshortFile
logr.Debugln("testing short file flag")
_, file, lNum, _ := runtime.Caller(0)
sName := filepath.Base(file)
dOut := b.String()
if strings.Index(dOut, sName) < 0 || strings.Index(dOut, file) > 0 {
t.Errorf("Debugln() = %q; does not contain %s", dOut, file)
}
lSrch := ".go:" + strconv.Itoa(lNum-1)
if strings.Index(dOut, lSrch) < 0 {
t.Errorf("Debugln() = %q; does not contain %q", dOut, lSrch)
}
}
var (
boldPrefix = AnsiEscape(ANSI_BOLD, "TEST>", ANSI_OFF)
colorPrefix = AnsiEscape(ANSI_BOLD, ANSI_RED, "TEST>", ANSI_OFF)
date = "Mon 20060102 15:04:05"
)
var outputTests = []struct {
template string
prefix string
level level
dateFormat string
flags int
text string
want string
wantErr bool
}{
// The %s format specifier is the placeholder for the date.
{logFmt, boldPrefix, LEVEL_ALL, date, LstdFlags, "test number 1",
"%s \x1b[1mTEST>\x1b[0m test number 1", false},
{logFmt, colorPrefix, LEVEL_ALL, date, LstdFlags, "test number 2",
"%s \x1b[1m\x1b[31mTEST>\x1b[0m test number 2", false},
// Test output with coloring turned off
{logFmt, AnsiEscape(ANSI_BOLD, "::", ANSI_OFF), LEVEL_ALL, date, Ldate,
"test number 3", "%s :: test number 3", false},
{logFmt, defaultPrefixColor, LEVEL_DEBUG, time.RubyDate, LstdFlags,
"test number 4",
"%s \x1b[1m\x1b[32m::\x1b[0m \x1b[1m\x1b[37m[DEBUG]\x1b[0m test number 4",
false},
{logFmt, defaultPrefixColor, LEVEL_INFO, time.RubyDate, LstdFlags,
"test number 5",
"%s \x1b[1m\x1b[32m::\x1b[0m \x1b[1m\x1b[32m[INFO]\x1b[0m test number 5",
false},
{logFmt, defaultPrefixColor, LEVEL_WARNING, time.RubyDate, LstdFlags,
"test number 6",
"%s \x1b[1m\x1b[32m::\x1b[0m \x1b[1m\x1b[33m[WARNING]\x1b[0m test number 6",
false},
{logFmt, defaultPrefixColor, LEVEL_ERROR, time.RubyDate, LstdFlags,
"test number 7",
"%s \x1b[1m\x1b[32m::\x1b[0m \x1b[1m\x1b[35m[ERROR]\x1b[0m test number 7",
false},
{logFmt, defaultPrefixColor, LEVEL_CRITICAL, time.RubyDate, LstdFlags,
"test number 8",
"%s \x1b[1m\x1b[32m::\x1b[0m \x1b[1m\x1b[31m[CRITICAL]\x1b[0m test number 8",
false},
// Test date format
{logFmt, defaultPrefixColor, LEVEL_ALL, "Mon 20060102 15:04:05",
Ldate, "test number 9",
"%s :: test number 9", false},
}
func TestOutput(t *testing.T) {
for i, k := range outputTests {
var buf bytes.Buffer
logr := New(LEVEL_DEBUG, &buf)
logr.Prefix = k.prefix
logr.DateFormat = k.dateFormat
logr.Flags = k.flags
logr.Level = k.level
d := time.Now().Format(logr.DateFormat)
n, err := logr.Fprint(k.level, 1, k.text, &buf)
if n != buf.Len() {
t.Error("Error: ", io.ErrShortWrite)
}
want := fmt.Sprintf(k.want, d)
if buf.String() != want || err != nil && !k.wantErr {
t.Errorf("Print test %d failed, \ngot: %q\nwant: "+
"%q", i+1, buf.String(), want)
continue
}
fmt.Printf("Test %d OK: %s\n", i, buf.String())
}
}
func TestLevel(t *testing.T) {
var buf bytes.Buffer
logr := New(LEVEL_CRITICAL, &buf)
logr.Debug("This level should produce no output")
if buf.Len() != 0 {
t.Errorf("Debug() produced output at LEVEL_CRITICAL logging level")
}
logr.Level = LEVEL_DEBUG
logr.Debug("This level should produce output")
if buf.Len() == 0 {
t.Errorf("Debug() did not produce output at the LEVEL_DEBUG logging level")
}
buf.Reset()
logr.Level = LEVEL_CRITICAL
logr.Println("This level should produce output")
if buf.Len() == 0 {
t.Errorf("Debug() did not produce output at the ALL logging level")
}
buf.Reset()
logr.Level = LEVEL_ALL
logr.Debug("This level should produce output")
if buf.Len() == 0 {
t.Errorf("Debug() did not produce output at the ALL logging level")
}
}
func TestPrefixNewline(t *testing.T) {
var buf bytes.Buffer
SetStreams(os.Stdout, &buf)
SetLevel(LEVEL_DEBUG)
Debugln("\n\nThis line should be padded with newlines.\n\n")
c, err := buf.ReadString('\n')
// If text sent with the logging functions is prepended with newlines,
// these newlines must be prepended to the output and stripped from the
// text. First we will make sure the two nl's are at the beginning of
// the output.
if c[0] != '\n' {
t.Errorf(`First byte should be "\n", found "%s"`, string(c[0]))
}
c, err = buf.ReadString('\n')
if err != nil {
t.Error("ReadString unexpected EOF")
}
// Since nl should be stripped from the text and prepended to the
// output, we must make sure the nl is still not in the middle where it
// would be if it had not been stripped.
nlPos := strings.Index(buf.String(), "] ") + 1
if buf.Bytes()[nlPos+1] == '\n' {
t.Errorf(`"\n" found at position %d.`, nlPos+1)
}
}
func TestFlagsDate(t *testing.T) {
var buf bytes.Buffer
SetStreams(os.Stdout, &buf)
SetLevel(LEVEL_DEBUG)
SetFlags(LnoPrefix)
Debugln("This output should not have a date.")
expect := "[DEBUG] This output should not have a date.\n"
if buf.String() != expect {
t.Errorf("\nExpect:\n\t%s\nGot:\n\t%s\n", expect, buf.String())
}
}
Refactor TestShortFileName
// Copyright 2013 The go-logger Authors. All rights reserved.
// This code is MIT licensed. See the LICENSE file for more info.
package log
import (
"bytes"
"fmt"
"io"
"math/rand"
"os"
"path/filepath"
"testing"
"runtime"
"time"
)
func TestStream(t *testing.T) {
var buf bytes.Buffer
logr := New(LEVEL_CRITICAL, os.Stdout, &buf)
logr.Streams[1] = &buf
if out := logr.Streams[1]; out != &buf {
t.Errorf("Stream = %p, want %p", out, &buf)
}
}
func TestMultiStreams(t *testing.T) {
rand.Seed(time.Now().UnixNano())
fPath := filepath.Join(os.TempDir(), fmt.Sprint("go_test_",
rand.Int()))
file, err := os.Create(fPath)
if err != nil {
t.Error("Create(%q) = %v; want: nil", fPath, err)
}
defer file.Close()
var buf bytes.Buffer
eLen := 55
logr := New(LEVEL_DEBUG, file, &buf)
logr.Debugln("Testing debug output!")
b := make([]byte, eLen)
n, err := file.ReadAt(b, 0)
if n != eLen || err != nil {
t.Errorf("Read(%d) = %d, %v; want: %d, nil", eLen, n, err,
eLen)
}
if buf.Len() != eLen {
t.Errorf("buf.Len() = %d; want: %d", buf.Len(), eLen)
}
}
func TestLongFileFlag(t *testing.T) {
var buf bytes.Buffer
SetLevel(LEVEL_DEBUG)
SetFlags(LnoPrefix | LlongFileName)
SetStreams(os.Stdout, &buf)
Debugln("Test long file flag")
_, file, _, _ := runtime.Caller(0)
expect := fmt.Sprintf("[DEBUG] %s: Test long file flag\n", file)
if buf.String() != expect {
t.Errorf("\nExpect:\n\t%s\nGot:\n\t%s\n", expect, buf.String())
}
}
func TestShortFileFlag(t *testing.T) {
var buf bytes.Buffer
SetLevel(LEVEL_DEBUG)
SetFlags(LnoPrefix | LshortFileName)
SetStreams(os.Stdout, &buf)
Debugln("Test short file flag")
_, file, _, _ := runtime.Caller(0)
short := file
for i := len(file) - 1; i > 0; i-- {
if file[i] == '/' {
short = file[i+1:]
break
}
}
file = short
expect := fmt.Sprintf("[DEBUG] %s: Test short file flag\n", file)
if buf.String() != expect {
t.Errorf("\nExpect:\n\t%s\nGot:\n\t%s\n", expect, buf.String())
}
}
var (
boldPrefix = AnsiEscape(ANSI_BOLD, "TEST>", ANSI_OFF)
colorPrefix = AnsiEscape(ANSI_BOLD, ANSI_RED, "TEST>", ANSI_OFF)
date = "Mon 20060102 15:04:05"
)
var outputTests = []struct {
template string
prefix string
level level
dateFormat string
flags int
text string
want string
wantErr bool
}{
// The %s format specifier is the placeholder for the date.
{logFmt, boldPrefix, LEVEL_ALL, date, LstdFlags, "test number 1",
"%s \x1b[1mTEST>\x1b[0m test number 1", false},
{logFmt, colorPrefix, LEVEL_ALL, date, LstdFlags, "test number 2",
"%s \x1b[1m\x1b[31mTEST>\x1b[0m test number 2", false},
// Test output with coloring turned off
{logFmt, AnsiEscape(ANSI_BOLD, "::", ANSI_OFF), LEVEL_ALL, date, Ldate,
"test number 3", "%s :: test number 3", false},
{logFmt, defaultPrefixColor, LEVEL_DEBUG, time.RubyDate, LstdFlags,
"test number 4",
"%s \x1b[1m\x1b[32m::\x1b[0m \x1b[1m\x1b[37m[DEBUG]\x1b[0m test number 4",
false},
{logFmt, defaultPrefixColor, LEVEL_INFO, time.RubyDate, LstdFlags,
"test number 5",
"%s \x1b[1m\x1b[32m::\x1b[0m \x1b[1m\x1b[32m[INFO]\x1b[0m test number 5",
false},
{logFmt, defaultPrefixColor, LEVEL_WARNING, time.RubyDate, LstdFlags,
"test number 6",
"%s \x1b[1m\x1b[32m::\x1b[0m \x1b[1m\x1b[33m[WARNING]\x1b[0m test number 6",
false},
{logFmt, defaultPrefixColor, LEVEL_ERROR, time.RubyDate, LstdFlags,
"test number 7",
"%s \x1b[1m\x1b[32m::\x1b[0m \x1b[1m\x1b[35m[ERROR]\x1b[0m test number 7",
false},
{logFmt, defaultPrefixColor, LEVEL_CRITICAL, time.RubyDate, LstdFlags,
"test number 8",
"%s \x1b[1m\x1b[32m::\x1b[0m \x1b[1m\x1b[31m[CRITICAL]\x1b[0m test number 8",
false},
// Test date format
{logFmt, defaultPrefixColor, LEVEL_ALL, "Mon 20060102 15:04:05",
Ldate, "test number 9",
"%s :: test number 9", false},
}
func TestOutput(t *testing.T) {
for i, k := range outputTests {
var buf bytes.Buffer
logr := New(LEVEL_DEBUG, &buf)
logr.Prefix = k.prefix
logr.DateFormat = k.dateFormat
logr.Flags = k.flags
logr.Level = k.level
d := time.Now().Format(logr.DateFormat)
n, err := logr.Fprint(k.level, 1, k.text, &buf)
if n != buf.Len() {
t.Error("Error: ", io.ErrShortWrite)
}
want := fmt.Sprintf(k.want, d)
if buf.String() != want || err != nil && !k.wantErr {
t.Errorf("Print test %d failed, \ngot: %q\nwant: "+
"%q", i+1, buf.String(), want)
continue
}
fmt.Printf("Test %d OK: %s\n", i, buf.String())
}
}
func TestLevel(t *testing.T) {
var buf bytes.Buffer
logr := New(LEVEL_CRITICAL, &buf)
logr.Debug("This level should produce no output")
if buf.Len() != 0 {
t.Errorf("Debug() produced output at LEVEL_CRITICAL logging level")
}
logr.Level = LEVEL_DEBUG
logr.Debug("This level should produce output")
if buf.Len() == 0 {
t.Errorf("Debug() did not produce output at the LEVEL_DEBUG logging level")
}
buf.Reset()
logr.Level = LEVEL_CRITICAL
logr.Println("This level should produce output")
if buf.Len() == 0 {
t.Errorf("Debug() did not produce output at the ALL logging level")
}
buf.Reset()
logr.Level = LEVEL_ALL
logr.Debug("This level should produce output")
if buf.Len() == 0 {
t.Errorf("Debug() did not produce output at the ALL logging level")
}
}
func TestPrefixNewline(t *testing.T) {
var buf bytes.Buffer
SetStreams(os.Stdout, &buf)
SetLevel(LEVEL_DEBUG)
Debugln("\n\nThis line should be padded with newlines.\n\n")
c, err := buf.ReadString('\n')
// If text sent with the logging functions is prepended with newlines,
// these newlines must be prepended to the output and stripped from the
// text. First we will make sure the two nl's are at the beginning of
// the output.
if c[0] != '\n' {
t.Errorf(`First byte should be "\n", found "%s"`, string(c[0]))
}
c, err = buf.ReadString('\n')
if err != nil {
t.Error("ReadString unexpected EOF")
}
// Since nl should be stripped from the text and prepended to the
// output, we must make sure the nl is still not in the middle where it
// would be if it had not been stripped.
nlPos := strings.Index(buf.String(), "] ") + 1
if buf.Bytes()[nlPos+1] == '\n' {
t.Errorf(`"\n" found at position %d.`, nlPos+1)
}
}
func TestFlagsDate(t *testing.T) {
var buf bytes.Buffer
SetStreams(os.Stdout, &buf)
SetLevel(LEVEL_DEBUG)
SetFlags(LnoPrefix)
Debugln("This output should not have a date.")
expect := "[DEBUG] This output should not have a date.\n"
if buf.String() != expect {
t.Errorf("\nExpect:\n\t%s\nGot:\n\t%s\n", expect, buf.String())
}
}
|
// xbuild ignore
package main
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
)
var runCrossCompile = flag.Bool("cross-compile", true, "run cross compilation tests")
var minioServer = flag.String("minio", "", "path to the minio server binary")
var debug = flag.Bool("debug", false, "output debug messages")
var minioServerEnv = map[string]string{
"MINIO_ACCESS_KEY": "KEBIYDZ87HCIH5D17YCN",
"MINIO_SECRET_KEY": "bVX1KhipSBPopEfmhc7rGz8ooxx27xdJ7Gkh1mVe",
}
var minioEnv = map[string]string{
"RESTIC_TEST_S3_SERVER": "http://127.0.0.1:9000",
"AWS_ACCESS_KEY_ID": "KEBIYDZ87HCIH5D17YCN",
"AWS_SECRET_ACCESS_KEY": "bVX1KhipSBPopEfmhc7rGz8ooxx27xdJ7Gkh1mVe",
}
func init() {
flag.Parse()
}
// CIEnvironment is implemented by environments where tests can be run.
type CIEnvironment interface {
Prepare() error
RunTests() error
Teardown() error
}
// TravisEnvironment is the environment in which Travis tests run.
type TravisEnvironment struct {
goxOSArch []string
minio string
minioSrv *Background
minioTempdir string
env map[string]string
}
func (env *TravisEnvironment) getMinio() error {
if *minioServer != "" {
msg("using minio server at %q\n", *minioServer)
env.minio = *minioServer
return nil
}
tempfile, err := ioutil.TempFile("", "minio-server-")
if err != nil {
return fmt.Errorf("create tempfile for minio download failed: %v\n", err)
}
url := fmt.Sprintf("https://dl.minio.io/server/minio/release/%s-%s/minio",
runtime.GOOS, runtime.GOARCH)
msg("downloading %v\n", url)
res, err := http.Get(url)
if err != nil {
return fmt.Errorf("error downloading minio server: %v\n", err)
}
_, err = io.Copy(tempfile, res.Body)
if err != nil {
return fmt.Errorf("error saving minio server to file: %v\n", err)
}
err = res.Body.Close()
if err != nil {
return fmt.Errorf("error closing HTTP download: %v\n", err)
}
err = tempfile.Close()
if err != nil {
msg("closing tempfile failed: %v\n", err)
return fmt.Errorf("error closing minio server file: %v\n", err)
}
err = os.Chmod(tempfile.Name(), 0755)
if err != nil {
return fmt.Errorf("chmod(minio-server) failed: %v", err)
}
msg("downloaded minio server to %v\n", tempfile.Name())
env.minio = tempfile.Name()
return nil
}
func (env *TravisEnvironment) runMinio() error {
if env.minio == "" {
return nil
}
// start minio server
msg("starting minio server at %s", env.minio)
dir, err := ioutil.TempDir("", "minio-root")
if err != nil {
return fmt.Errorf("running minio server failed: %v", err)
}
env.minioSrv, err = StartBackgroundCommand(minioServerEnv, env.minio,
"server",
"--address", "127.0.0.1:9000",
dir)
if err != nil {
return fmt.Errorf("error running minio server: %v", err)
}
// go func() {
// time.Sleep(300 * time.Millisecond)
// env.minioSrv.Cmd.Process.Kill()
// }()
for k, v := range minioEnv {
env.env[k] = v
}
env.minioTempdir = dir
return nil
}
// Prepare installs dependencies and starts services in order to run the tests.
func (env *TravisEnvironment) Prepare() error {
env.env = make(map[string]string)
msg("preparing environment for Travis CI\n")
for _, pkg := range []string{
"golang.org/x/tools/cmd/cover",
"github.com/mattn/goveralls",
"github.com/pierrre/gotestcover",
} {
err := run("go", "get", pkg)
if err != nil {
return err
}
}
if err := env.getMinio(); err != nil {
return err
}
if err := env.runMinio(); err != nil {
return err
}
if runtime.GOOS == "darwin" {
// install the libraries necessary for fuse
if err := run("brew", "update"); err != nil {
return err
}
if err := run("brew", "cask", "install", "osxfuse"); err != nil {
return err
}
}
if *runCrossCompile {
// only test cross compilation on linux with Travis
if err := run("go", "get", "github.com/mitchellh/gox"); err != nil {
return err
}
if runtime.GOOS == "linux" {
env.goxOSArch = []string{
"linux/386", "linux/amd64",
"windows/386", "windows/amd64",
"darwin/386", "darwin/amd64",
"freebsd/386", "freebsd/amd64",
"opendbsd/386", "opendbsd/amd64",
}
if !strings.HasPrefix(runtime.Version(), "go1.3") {
env.goxOSArch = append(env.goxOSArch,
"linux/arm", "darwin/arm", "freebsd/arm")
}
} else {
env.goxOSArch = []string{runtime.GOOS + "/" + runtime.GOARCH}
}
msg("gox: OS/ARCH %v\n", env.goxOSArch)
v := runtime.Version()
if !strings.HasPrefix(v, "go1.5") && !strings.HasPrefix(v, "go1.6") {
err := run("gox", "-build-toolchain",
"-osarch", strings.Join(env.goxOSArch, " "))
if err != nil {
return err
}
}
}
return nil
}
// Teardown stops backend services and cleans the environment again.
func (env *TravisEnvironment) Teardown() error {
msg("run travis teardown\n")
if env.minioSrv != nil {
msg("stopping minio server\n")
if env.minioSrv.Cmd.ProcessState == nil {
err := env.minioSrv.Cmd.Process.Kill()
if err != nil {
fmt.Fprintf(os.Stderr, "error killing minio server process: %v", err)
}
} else {
result := <-env.minioSrv.Result
if result.Error != nil {
msg("minio server returned error: %v\n", result.Error)
msg("stdout: %s\n", result.Stdout)
msg("stderr: %s\n", result.Stderr)
}
}
err := os.RemoveAll(env.minioTempdir)
if err != nil {
msg("error removing minio tempdir %v: %v\n", env.minioTempdir, err)
}
}
return nil
}
func goVersionAtLeast151() bool {
v := runtime.Version()
if match, _ := regexp.MatchString(`^go1\.[0-4]`, v); match {
return false
}
if v == "go1.5" {
return false
}
return true
}
// Background is a program running in the background.
type Background struct {
Cmd *exec.Cmd
Result chan Result
}
// Result is the result of a program that ran in the background.
type Result struct {
Stdout, Stderr string
Error error
}
// StartBackgroundCommand runs a program in the background.
func StartBackgroundCommand(env map[string]string, cmd string, args ...string) (*Background, error) {
msg("running background command %v %v\n", cmd, args)
b := Background{
Result: make(chan Result, 1),
}
stdout := bytes.NewBuffer(nil)
stderr := bytes.NewBuffer(nil)
c := exec.Command(cmd, args...)
c.Stdout = stdout
c.Stderr = stderr
if *debug {
c.Stdout = io.MultiWriter(c.Stdout, os.Stdout)
c.Stderr = io.MultiWriter(c.Stderr, os.Stderr)
}
c.Env = updateEnv(os.Environ(), env)
b.Cmd = c
err := c.Start()
if err != nil {
msg("error starting background job %v: %v\n", cmd, err)
return nil, err
}
go func() {
err := b.Cmd.Wait()
msg("background job %v returned: %v\n", cmd, err)
msg("stdout: %s\n", stdout.Bytes())
msg("stderr: %s\n", stderr.Bytes())
b.Result <- Result{
Stdout: string(stdout.Bytes()),
Stderr: string(stderr.Bytes()),
Error: err,
}
}()
return &b, nil
}
// RunTests starts the tests for Travis.
func (env *TravisEnvironment) RunTests() error {
// run fuse tests on darwin
if runtime.GOOS != "darwin" {
msg("skip fuse integration tests on %v\n", runtime.GOOS)
os.Setenv("RESTIC_TEST_FUSE", "0")
}
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("Getwd() returned error: %v", err)
}
env.env["GOPATH"] = cwd + ":" + filepath.Join(cwd, "vendor")
if *runCrossCompile {
// compile for all target architectures with tags
for _, tags := range []string{"release", "debug"} {
runWithEnv(env.env, "gox", "-verbose",
"-osarch", strings.Join(env.goxOSArch, " "),
"-tags", tags,
"-output", "/tmp/{{.Dir}}_{{.OS}}_{{.Arch}}",
"cmds/restic")
}
}
// run the build script
if err := run("go", "run", "build.go"); err != nil {
return err
}
// run the tests and gather coverage information
err = runWithEnv(env.env, "gotestcover", "-coverprofile", "all.cov", "cmds/...", "restic/...")
if err != nil {
return err
}
return runGofmt()
}
// AppveyorEnvironment is the environment on Windows.
type AppveyorEnvironment struct{}
// Prepare installs dependencies and starts services in order to run the tests.
func (env *AppveyorEnvironment) Prepare() error {
msg("preparing environment for Appveyor CI\n")
return nil
}
// RunTests start the tests.
func (env *AppveyorEnvironment) RunTests() error {
return run("go", "run", "build.go", "-v", "-T")
}
// Teardown is a noop.
func (env *AppveyorEnvironment) Teardown() error {
return nil
}
// findGoFiles returns a list of go source code file names below dir.
func findGoFiles(dir string) (list []string, err error) {
err = filepath.Walk(dir, func(name string, fi os.FileInfo, err error) error {
if filepath.Base(name) == "vendor" {
return filepath.SkipDir
}
if filepath.Ext(name) == ".go" {
relpath, err := filepath.Rel(dir, name)
if err != nil {
return err
}
list = append(list, relpath)
}
return err
})
return list, err
}
func msg(format string, args ...interface{}) {
fmt.Printf("CI: "+format, args...)
}
func updateEnv(env []string, override map[string]string) []string {
var newEnv []string
for _, s := range env {
d := strings.SplitN(s, "=", 2)
key := d[0]
if _, ok := override[key]; ok {
continue
}
newEnv = append(newEnv, s)
}
for k, v := range override {
newEnv = append(newEnv, k+"="+v)
}
return newEnv
}
func runGofmt() error {
dir, err := os.Getwd()
if err != nil {
return fmt.Errorf("Getwd(): %v\n", err)
}
files, err := findGoFiles(dir)
if err != nil {
return fmt.Errorf("error finding Go files: %v\n", err)
}
msg("runGofmt() with %d files\n", len(files))
args := append([]string{"-l"}, files...)
cmd := exec.Command("gofmt", args...)
cmd.Stderr = os.Stderr
buf, err := cmd.Output()
if err != nil {
return fmt.Errorf("error running gofmt: %v\noutput: %s\n", err, buf)
}
if len(buf) > 0 {
return fmt.Errorf("not formatted with `gofmt`:\n%s\n", buf)
}
return nil
}
func run(command string, args ...string) error {
msg("run %v %v\n", command, strings.Join(args, " "))
return runWithEnv(nil, command, args...)
}
// runWithEnv calls a command with the current environment, except the entries
// of the env map are set additionally.
func runWithEnv(env map[string]string, command string, args ...string) error {
msg("runWithEnv %v %v\n", command, strings.Join(args, " "))
cmd := exec.Command(command, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if env != nil {
cmd.Env = updateEnv(os.Environ(), env)
}
err := cmd.Run()
if err != nil {
return fmt.Errorf("error running %v %v: %v",
command, strings.Join(args, " "), err)
}
return nil
}
func isTravis() bool {
return os.Getenv("TRAVIS_BUILD_DIR") != ""
}
func isAppveyor() bool {
return runtime.GOOS == "windows"
}
func main() {
var env CIEnvironment
switch {
case isTravis():
env = &TravisEnvironment{}
case isAppveyor():
env = &AppveyorEnvironment{}
default:
fmt.Fprintln(os.Stderr, "unknown CI environment")
os.Exit(1)
}
foundError := false
for _, f := range []func() error{env.Prepare, env.RunTests, env.Teardown} {
err := f()
if err != nil {
foundError = true
fmt.Fprintf(os.Stderr, "error: %v\n", err)
}
}
if foundError {
os.Exit(1)
}
}
Remove darwin/arm
// xbuild ignore
package main
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
)
var runCrossCompile = flag.Bool("cross-compile", true, "run cross compilation tests")
var minioServer = flag.String("minio", "", "path to the minio server binary")
var debug = flag.Bool("debug", false, "output debug messages")
var minioServerEnv = map[string]string{
"MINIO_ACCESS_KEY": "KEBIYDZ87HCIH5D17YCN",
"MINIO_SECRET_KEY": "bVX1KhipSBPopEfmhc7rGz8ooxx27xdJ7Gkh1mVe",
}
var minioEnv = map[string]string{
"RESTIC_TEST_S3_SERVER": "http://127.0.0.1:9000",
"AWS_ACCESS_KEY_ID": "KEBIYDZ87HCIH5D17YCN",
"AWS_SECRET_ACCESS_KEY": "bVX1KhipSBPopEfmhc7rGz8ooxx27xdJ7Gkh1mVe",
}
func init() {
flag.Parse()
}
// CIEnvironment is implemented by environments where tests can be run.
type CIEnvironment interface {
Prepare() error
RunTests() error
Teardown() error
}
// TravisEnvironment is the environment in which Travis tests run.
type TravisEnvironment struct {
goxOSArch []string
minio string
minioSrv *Background
minioTempdir string
env map[string]string
}
func (env *TravisEnvironment) getMinio() error {
if *minioServer != "" {
msg("using minio server at %q\n", *minioServer)
env.minio = *minioServer
return nil
}
tempfile, err := ioutil.TempFile("", "minio-server-")
if err != nil {
return fmt.Errorf("create tempfile for minio download failed: %v\n", err)
}
url := fmt.Sprintf("https://dl.minio.io/server/minio/release/%s-%s/minio",
runtime.GOOS, runtime.GOARCH)
msg("downloading %v\n", url)
res, err := http.Get(url)
if err != nil {
return fmt.Errorf("error downloading minio server: %v\n", err)
}
_, err = io.Copy(tempfile, res.Body)
if err != nil {
return fmt.Errorf("error saving minio server to file: %v\n", err)
}
err = res.Body.Close()
if err != nil {
return fmt.Errorf("error closing HTTP download: %v\n", err)
}
err = tempfile.Close()
if err != nil {
msg("closing tempfile failed: %v\n", err)
return fmt.Errorf("error closing minio server file: %v\n", err)
}
err = os.Chmod(tempfile.Name(), 0755)
if err != nil {
return fmt.Errorf("chmod(minio-server) failed: %v", err)
}
msg("downloaded minio server to %v\n", tempfile.Name())
env.minio = tempfile.Name()
return nil
}
func (env *TravisEnvironment) runMinio() error {
if env.minio == "" {
return nil
}
// start minio server
msg("starting minio server at %s", env.minio)
dir, err := ioutil.TempDir("", "minio-root")
if err != nil {
return fmt.Errorf("running minio server failed: %v", err)
}
env.minioSrv, err = StartBackgroundCommand(minioServerEnv, env.minio,
"server",
"--address", "127.0.0.1:9000",
dir)
if err != nil {
return fmt.Errorf("error running minio server: %v", err)
}
// go func() {
// time.Sleep(300 * time.Millisecond)
// env.minioSrv.Cmd.Process.Kill()
// }()
for k, v := range minioEnv {
env.env[k] = v
}
env.minioTempdir = dir
return nil
}
// Prepare installs dependencies and starts services in order to run the tests.
func (env *TravisEnvironment) Prepare() error {
env.env = make(map[string]string)
msg("preparing environment for Travis CI\n")
for _, pkg := range []string{
"golang.org/x/tools/cmd/cover",
"github.com/mattn/goveralls",
"github.com/pierrre/gotestcover",
} {
err := run("go", "get", pkg)
if err != nil {
return err
}
}
if err := env.getMinio(); err != nil {
return err
}
if err := env.runMinio(); err != nil {
return err
}
if runtime.GOOS == "darwin" {
// install the libraries necessary for fuse
if err := run("brew", "update"); err != nil {
return err
}
if err := run("brew", "cask", "install", "osxfuse"); err != nil {
return err
}
}
if *runCrossCompile {
// only test cross compilation on linux with Travis
if err := run("go", "get", "github.com/mitchellh/gox"); err != nil {
return err
}
if runtime.GOOS == "linux" {
env.goxOSArch = []string{
"linux/386", "linux/amd64",
"windows/386", "windows/amd64",
"darwin/386", "darwin/amd64",
"freebsd/386", "freebsd/amd64",
"opendbsd/386", "opendbsd/amd64",
}
if !strings.HasPrefix(runtime.Version(), "go1.3") {
env.goxOSArch = append(env.goxOSArch,
"linux/arm", "freebsd/arm")
}
} else {
env.goxOSArch = []string{runtime.GOOS + "/" + runtime.GOARCH}
}
msg("gox: OS/ARCH %v\n", env.goxOSArch)
v := runtime.Version()
if !strings.HasPrefix(v, "go1.5") && !strings.HasPrefix(v, "go1.6") {
err := run("gox", "-build-toolchain",
"-osarch", strings.Join(env.goxOSArch, " "))
if err != nil {
return err
}
}
}
return nil
}
// Teardown stops backend services and cleans the environment again.
func (env *TravisEnvironment) Teardown() error {
msg("run travis teardown\n")
if env.minioSrv != nil {
msg("stopping minio server\n")
if env.minioSrv.Cmd.ProcessState == nil {
err := env.minioSrv.Cmd.Process.Kill()
if err != nil {
fmt.Fprintf(os.Stderr, "error killing minio server process: %v", err)
}
} else {
result := <-env.minioSrv.Result
if result.Error != nil {
msg("minio server returned error: %v\n", result.Error)
msg("stdout: %s\n", result.Stdout)
msg("stderr: %s\n", result.Stderr)
}
}
err := os.RemoveAll(env.minioTempdir)
if err != nil {
msg("error removing minio tempdir %v: %v\n", env.minioTempdir, err)
}
}
return nil
}
func goVersionAtLeast151() bool {
v := runtime.Version()
if match, _ := regexp.MatchString(`^go1\.[0-4]`, v); match {
return false
}
if v == "go1.5" {
return false
}
return true
}
// Background is a program running in the background.
type Background struct {
Cmd *exec.Cmd
Result chan Result
}
// Result is the result of a program that ran in the background.
type Result struct {
Stdout, Stderr string
Error error
}
// StartBackgroundCommand runs a program in the background.
func StartBackgroundCommand(env map[string]string, cmd string, args ...string) (*Background, error) {
msg("running background command %v %v\n", cmd, args)
b := Background{
Result: make(chan Result, 1),
}
stdout := bytes.NewBuffer(nil)
stderr := bytes.NewBuffer(nil)
c := exec.Command(cmd, args...)
c.Stdout = stdout
c.Stderr = stderr
if *debug {
c.Stdout = io.MultiWriter(c.Stdout, os.Stdout)
c.Stderr = io.MultiWriter(c.Stderr, os.Stderr)
}
c.Env = updateEnv(os.Environ(), env)
b.Cmd = c
err := c.Start()
if err != nil {
msg("error starting background job %v: %v\n", cmd, err)
return nil, err
}
go func() {
err := b.Cmd.Wait()
msg("background job %v returned: %v\n", cmd, err)
msg("stdout: %s\n", stdout.Bytes())
msg("stderr: %s\n", stderr.Bytes())
b.Result <- Result{
Stdout: string(stdout.Bytes()),
Stderr: string(stderr.Bytes()),
Error: err,
}
}()
return &b, nil
}
// RunTests starts the tests for Travis.
func (env *TravisEnvironment) RunTests() error {
// run fuse tests on darwin
if runtime.GOOS != "darwin" {
msg("skip fuse integration tests on %v\n", runtime.GOOS)
os.Setenv("RESTIC_TEST_FUSE", "0")
}
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("Getwd() returned error: %v", err)
}
env.env["GOPATH"] = cwd + ":" + filepath.Join(cwd, "vendor")
if *runCrossCompile {
// compile for all target architectures with tags
for _, tags := range []string{"release", "debug"} {
runWithEnv(env.env, "gox", "-verbose",
"-osarch", strings.Join(env.goxOSArch, " "),
"-tags", tags,
"-output", "/tmp/{{.Dir}}_{{.OS}}_{{.Arch}}",
"cmds/restic")
}
}
// run the build script
if err := run("go", "run", "build.go"); err != nil {
return err
}
// run the tests and gather coverage information
err = runWithEnv(env.env, "gotestcover", "-coverprofile", "all.cov", "cmds/...", "restic/...")
if err != nil {
return err
}
return runGofmt()
}
// AppveyorEnvironment is the environment on Windows.
type AppveyorEnvironment struct{}
// Prepare installs dependencies and starts services in order to run the tests.
func (env *AppveyorEnvironment) Prepare() error {
msg("preparing environment for Appveyor CI\n")
return nil
}
// RunTests start the tests.
func (env *AppveyorEnvironment) RunTests() error {
return run("go", "run", "build.go", "-v", "-T")
}
// Teardown is a noop.
func (env *AppveyorEnvironment) Teardown() error {
return nil
}
// findGoFiles returns a list of go source code file names below dir.
func findGoFiles(dir string) (list []string, err error) {
err = filepath.Walk(dir, func(name string, fi os.FileInfo, err error) error {
if filepath.Base(name) == "vendor" {
return filepath.SkipDir
}
if filepath.Ext(name) == ".go" {
relpath, err := filepath.Rel(dir, name)
if err != nil {
return err
}
list = append(list, relpath)
}
return err
})
return list, err
}
func msg(format string, args ...interface{}) {
fmt.Printf("CI: "+format, args...)
}
func updateEnv(env []string, override map[string]string) []string {
var newEnv []string
for _, s := range env {
d := strings.SplitN(s, "=", 2)
key := d[0]
if _, ok := override[key]; ok {
continue
}
newEnv = append(newEnv, s)
}
for k, v := range override {
newEnv = append(newEnv, k+"="+v)
}
return newEnv
}
func runGofmt() error {
dir, err := os.Getwd()
if err != nil {
return fmt.Errorf("Getwd(): %v\n", err)
}
files, err := findGoFiles(dir)
if err != nil {
return fmt.Errorf("error finding Go files: %v\n", err)
}
msg("runGofmt() with %d files\n", len(files))
args := append([]string{"-l"}, files...)
cmd := exec.Command("gofmt", args...)
cmd.Stderr = os.Stderr
buf, err := cmd.Output()
if err != nil {
return fmt.Errorf("error running gofmt: %v\noutput: %s\n", err, buf)
}
if len(buf) > 0 {
return fmt.Errorf("not formatted with `gofmt`:\n%s\n", buf)
}
return nil
}
func run(command string, args ...string) error {
msg("run %v %v\n", command, strings.Join(args, " "))
return runWithEnv(nil, command, args...)
}
// runWithEnv calls a command with the current environment, except the entries
// of the env map are set additionally.
func runWithEnv(env map[string]string, command string, args ...string) error {
msg("runWithEnv %v %v\n", command, strings.Join(args, " "))
cmd := exec.Command(command, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if env != nil {
cmd.Env = updateEnv(os.Environ(), env)
}
err := cmd.Run()
if err != nil {
return fmt.Errorf("error running %v %v: %v",
command, strings.Join(args, " "), err)
}
return nil
}
func isTravis() bool {
return os.Getenv("TRAVIS_BUILD_DIR") != ""
}
func isAppveyor() bool {
return runtime.GOOS == "windows"
}
func main() {
var env CIEnvironment
switch {
case isTravis():
env = &TravisEnvironment{}
case isAppveyor():
env = &AppveyorEnvironment{}
default:
fmt.Fprintln(os.Stderr, "unknown CI environment")
os.Exit(1)
}
foundError := false
for _, f := range []func() error{env.Prepare, env.RunTests, env.Teardown} {
err := f()
if err != nil {
foundError = true
fmt.Fprintf(os.Stderr, "error: %v\n", err)
}
}
if foundError {
os.Exit(1)
}
}
|
package lua
//#include <lua.h>
//#include <lauxlib.h>
//#include <lualib.h>
//#include <stdlib.h>
//#include "golua.h"
import "C"
import "unsafe"
type LuaError struct {
code int
message string
stackTrace []LuaStackEntry
}
func (err *LuaError) Error() string {
return err.message
}
func (err *LuaError) Code() int {
return err.code
}
func (err *LuaError) StackTrace() []LuaStackEntry {
return err.stackTrace
}
// luaL_argcheck
func (L *State) ArgCheck(cond bool, narg int, extramsg string) {
if cond {
Cextramsg := C.CString(extramsg)
defer C.free(unsafe.Pointer(Cextramsg))
C.luaL_argerror(L.s, C.int(narg), Cextramsg)
}
}
// luaL_argerror
func (L *State) ArgError(narg int, extramsg string) int {
Cextramsg := C.CString(extramsg)
defer C.free(unsafe.Pointer(Cextramsg))
return int(C.luaL_argerror(L.s, C.int(narg), Cextramsg))
}
// luaL_callmeta
func (L *State) CallMeta(obj int, e string) int {
Ce := C.CString(e)
defer C.free(unsafe.Pointer(Ce))
return int(C.luaL_callmeta(L.s, C.int(obj), Ce))
}
// luaL_checkany
func (L *State) CheckAny(narg int) {
C.luaL_checkany(L.s, C.int(narg))
}
// luaL_checkinteger
func (L *State) CheckInteger(narg int) int {
return int(C.luaL_checkinteger(L.s, C.int(narg)))
}
// luaL_checknumber
func (L *State) CheckNumber(narg int) float64 {
return float64(C.luaL_checknumber(L.s, C.int(narg)))
}
// luaL_checkstring
func (L *State) CheckString(narg int) string {
var length C.size_t
return C.GoString(C.luaL_checklstring(L.s, C.int(narg), &length))
}
// luaL_checkoption
//
// BUG(everyone_involved): not implemented
func (L *State) CheckOption(narg int, def string, lst []string) int {
//TODO: complication: lst conversion to const char* lst[] from string slice
return 0
}
// luaL_checktype
func (L *State) CheckType(narg int, t LuaValType) {
C.luaL_checktype(L.s, C.int(narg), C.int(t))
}
// luaL_checkudata
func (L *State) CheckUdata(narg int, tname string) unsafe.Pointer {
Ctname := C.CString(tname)
defer C.free(unsafe.Pointer(Ctname))
return unsafe.Pointer(C.luaL_checkudata(L.s, C.int(narg), Ctname))
}
// Executes file, returns nil for no errors or the lua error string on failure
func (L *State) DoFile(filename string) error {
if r := L.LoadFile(filename); r != 0 {
return &LuaError{r, L.ToString(-1), L.StackTrace()}
}
return L.Call(0, LUA_MULTRET);
}
// Executes the string, returns nil for no errors or the lua error string on failure
func (L *State) DoString(str string) error {
if r := L.LoadString(str); r != 0 {
return &LuaError{r, L.ToString(-1), L.StackTrace()}
}
return L.Call(0, LUA_MULTRET)
}
// Like DoString but panics on error
func (L *State) MustDoString(str string) {
if err := L.DoString(str); err != nil {
panic(err)
}
}
// luaL_getmetafield
func (L *State) GetMetaField(obj int, e string) bool {
Ce := C.CString(e)
defer C.free(unsafe.Pointer(Ce))
return C.luaL_getmetafield(L.s, C.int(obj), Ce) != 0
}
// luaL_getmetatable
func (L *State) LGetMetaTable(tname string) {
Ctname := C.CString(tname)
defer C.free(unsafe.Pointer(Ctname))
C.lua_getfield(L.s, LUA_REGISTRYINDEX, Ctname)
}
// luaL_gsub
func (L *State) GSub(s string, p string, r string) string {
Cs := C.CString(s)
Cp := C.CString(p)
Cr := C.CString(r)
defer func() {
C.free(unsafe.Pointer(Cs))
C.free(unsafe.Pointer(Cp))
C.free(unsafe.Pointer(Cr))
}()
return C.GoString(C.luaL_gsub(L.s, Cs, Cp, Cr))
}
// luaL_loadfile
func (L *State) LoadFile(filename string) int {
Cfilename := C.CString(filename)
defer C.free(unsafe.Pointer(Cfilename))
return int(C.luaL_loadfile(L.s, Cfilename))
}
// luaL_loadstring
func (L *State) LoadString(s string) int {
Cs := C.CString(s)
defer C.free(unsafe.Pointer(Cs))
return int(C.luaL_loadstring(L.s, Cs))
}
// luaL_newmetatable
func (L *State) NewMetaTable(tname string) bool {
Ctname := C.CString(tname)
defer C.free(unsafe.Pointer(Ctname))
return C.luaL_newmetatable(L.s, Ctname) != 0
}
// luaL_newstate
func NewState() *State {
ls := (C.luaL_newstate())
L := newState(ls)
return L
}
// luaL_openlibs
func (L *State) OpenLibs() {
C.luaL_openlibs(L.s)
C.clua_hide_pcall(L.s)
}
// luaL_optinteger
func (L *State) OptInteger(narg int, d int) int {
return int(C.luaL_optinteger(L.s, C.int(narg), C.lua_Integer(d)))
}
// luaL_optnumber
func (L *State) OptNumber(narg int, d float64) float64 {
return float64(C.luaL_optnumber(L.s, C.int(narg), C.lua_Number(d)))
}
// luaL_optstring
func (L *State) OptString(narg int, d string) string {
var length C.size_t
Cd := C.CString(d)
defer C.free(unsafe.Pointer(Cd))
return C.GoString(C.luaL_optlstring(L.s, C.int(narg), Cd, &length))
}
// luaL_ref
func (L *State) Ref(t int) int {
return int(C.luaL_ref(L.s, C.int(t)))
}
// luaL_typename
func (L *State) LTypename(index int) string {
return C.GoString(C.lua_typename(L.s, C.lua_type(L.s, C.int(index))))
}
// luaL_unref
func (L *State) Unref(t int, ref int) {
C.luaL_unref(L.s, C.int(t), C.int(ref))
}
// luaL_where
func (L *State) Where(lvl int) {
C.luaL_where(L.s, C.int(lvl))
}
luaL_argcheck: Raise an error when condition is _not_ true
See http://www.lua.org/manual/5.1/manual.html#luaL_argcheck.
package lua
//#include <lua.h>
//#include <lauxlib.h>
//#include <lualib.h>
//#include <stdlib.h>
//#include "golua.h"
import "C"
import "unsafe"
type LuaError struct {
code int
message string
stackTrace []LuaStackEntry
}
func (err *LuaError) Error() string {
return err.message
}
func (err *LuaError) Code() int {
return err.code
}
func (err *LuaError) StackTrace() []LuaStackEntry {
return err.stackTrace
}
// luaL_argcheck
func (L *State) ArgCheck(cond bool, narg int, extramsg string) {
if !cond {
Cextramsg := C.CString(extramsg)
defer C.free(unsafe.Pointer(Cextramsg))
C.luaL_argerror(L.s, C.int(narg), Cextramsg)
}
}
// luaL_argerror
func (L *State) ArgError(narg int, extramsg string) int {
Cextramsg := C.CString(extramsg)
defer C.free(unsafe.Pointer(Cextramsg))
return int(C.luaL_argerror(L.s, C.int(narg), Cextramsg))
}
// luaL_callmeta
func (L *State) CallMeta(obj int, e string) int {
Ce := C.CString(e)
defer C.free(unsafe.Pointer(Ce))
return int(C.luaL_callmeta(L.s, C.int(obj), Ce))
}
// luaL_checkany
func (L *State) CheckAny(narg int) {
C.luaL_checkany(L.s, C.int(narg))
}
// luaL_checkinteger
func (L *State) CheckInteger(narg int) int {
return int(C.luaL_checkinteger(L.s, C.int(narg)))
}
// luaL_checknumber
func (L *State) CheckNumber(narg int) float64 {
return float64(C.luaL_checknumber(L.s, C.int(narg)))
}
// luaL_checkstring
func (L *State) CheckString(narg int) string {
var length C.size_t
return C.GoString(C.luaL_checklstring(L.s, C.int(narg), &length))
}
// luaL_checkoption
//
// BUG(everyone_involved): not implemented
func (L *State) CheckOption(narg int, def string, lst []string) int {
//TODO: complication: lst conversion to const char* lst[] from string slice
return 0
}
// luaL_checktype
func (L *State) CheckType(narg int, t LuaValType) {
C.luaL_checktype(L.s, C.int(narg), C.int(t))
}
// luaL_checkudata
func (L *State) CheckUdata(narg int, tname string) unsafe.Pointer {
Ctname := C.CString(tname)
defer C.free(unsafe.Pointer(Ctname))
return unsafe.Pointer(C.luaL_checkudata(L.s, C.int(narg), Ctname))
}
// Executes file, returns nil for no errors or the lua error string on failure
func (L *State) DoFile(filename string) error {
if r := L.LoadFile(filename); r != 0 {
return &LuaError{r, L.ToString(-1), L.StackTrace()}
}
return L.Call(0, LUA_MULTRET);
}
// Executes the string, returns nil for no errors or the lua error string on failure
func (L *State) DoString(str string) error {
if r := L.LoadString(str); r != 0 {
return &LuaError{r, L.ToString(-1), L.StackTrace()}
}
return L.Call(0, LUA_MULTRET)
}
// Like DoString but panics on error
func (L *State) MustDoString(str string) {
if err := L.DoString(str); err != nil {
panic(err)
}
}
// luaL_getmetafield
func (L *State) GetMetaField(obj int, e string) bool {
Ce := C.CString(e)
defer C.free(unsafe.Pointer(Ce))
return C.luaL_getmetafield(L.s, C.int(obj), Ce) != 0
}
// luaL_getmetatable
func (L *State) LGetMetaTable(tname string) {
Ctname := C.CString(tname)
defer C.free(unsafe.Pointer(Ctname))
C.lua_getfield(L.s, LUA_REGISTRYINDEX, Ctname)
}
// luaL_gsub
func (L *State) GSub(s string, p string, r string) string {
Cs := C.CString(s)
Cp := C.CString(p)
Cr := C.CString(r)
defer func() {
C.free(unsafe.Pointer(Cs))
C.free(unsafe.Pointer(Cp))
C.free(unsafe.Pointer(Cr))
}()
return C.GoString(C.luaL_gsub(L.s, Cs, Cp, Cr))
}
// luaL_loadfile
func (L *State) LoadFile(filename string) int {
Cfilename := C.CString(filename)
defer C.free(unsafe.Pointer(Cfilename))
return int(C.luaL_loadfile(L.s, Cfilename))
}
// luaL_loadstring
func (L *State) LoadString(s string) int {
Cs := C.CString(s)
defer C.free(unsafe.Pointer(Cs))
return int(C.luaL_loadstring(L.s, Cs))
}
// luaL_newmetatable
func (L *State) NewMetaTable(tname string) bool {
Ctname := C.CString(tname)
defer C.free(unsafe.Pointer(Ctname))
return C.luaL_newmetatable(L.s, Ctname) != 0
}
// luaL_newstate
func NewState() *State {
ls := (C.luaL_newstate())
L := newState(ls)
return L
}
// luaL_openlibs
func (L *State) OpenLibs() {
C.luaL_openlibs(L.s)
C.clua_hide_pcall(L.s)
}
// luaL_optinteger
func (L *State) OptInteger(narg int, d int) int {
return int(C.luaL_optinteger(L.s, C.int(narg), C.lua_Integer(d)))
}
// luaL_optnumber
func (L *State) OptNumber(narg int, d float64) float64 {
return float64(C.luaL_optnumber(L.s, C.int(narg), C.lua_Number(d)))
}
// luaL_optstring
func (L *State) OptString(narg int, d string) string {
var length C.size_t
Cd := C.CString(d)
defer C.free(unsafe.Pointer(Cd))
return C.GoString(C.luaL_optlstring(L.s, C.int(narg), Cd, &length))
}
// luaL_ref
func (L *State) Ref(t int) int {
return int(C.luaL_ref(L.s, C.int(t)))
}
// luaL_typename
func (L *State) LTypename(index int) string {
return C.GoString(C.lua_typename(L.s, C.lua_type(L.s, C.int(index))))
}
// luaL_unref
func (L *State) Unref(t int, ref int) {
C.luaL_unref(L.s, C.int(t), C.int(ref))
}
// luaL_where
func (L *State) Where(lvl int) {
C.luaL_where(L.s, C.int(lvl))
}
|
package main
import (
"encoding/json"
"fmt"
"os"
"gopkg.in/yaml.v2"
"github.com/lxc/lxd/lxc/config"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/gnuflag"
"github.com/lxc/lxd/shared/i18n"
"github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logging"
)
type typeList []string
func (f *typeList) String() string {
return fmt.Sprint(*f)
}
func (f *typeList) Set(value string) error {
if value == "" {
return fmt.Errorf("Invalid type: %s", value)
}
if f == nil {
*f = make(typeList, 1)
} else {
*f = append(*f, value)
}
return nil
}
type monitorCmd struct {
typeArgs typeList
pretty bool
logLevel string
}
func (c *monitorCmd) showByDefault() bool {
return false
}
func (c *monitorCmd) usage() string {
return i18n.G(
`Usage: lxc monitor [<remote>:] [--type=TYPE...] [--pretty]
Monitor a local or remote LXD server.
By default the monitor will listen to all message types.
Message types to listen for can be specified with --type.
*Examples*
lxc monitor --type=logging
Only show log messages.
lxc monitor --pretty --type=logging --loglevel=info
Show a pretty log of messages with info level or higher.
lxc monitor --type=lifecycle
Only show lifecycle events.
`)
}
func (c *monitorCmd) flags() {
gnuflag.BoolVar(&c.pretty, "pretty", false, i18n.G("Pretty rendering"))
gnuflag.Var(&c.typeArgs, "type", i18n.G("Event type to listen for"))
gnuflag.StringVar(&c.logLevel, "loglevel", "", i18n.G("Minimum level for log messages"))
}
func (c *monitorCmd) run(conf *config.Config, args []string) error {
var err error
var remote string
if len(args) > 1 {
return errArgs
}
if len(args) == 0 {
remote, _, err = conf.ParseRemote("")
if err != nil {
return err
}
} else {
remote, _, err = conf.ParseRemote(args[0])
if err != nil {
return err
}
}
d, err := conf.GetContainerServer(remote)
if err != nil {
return err
}
listener, err := d.GetEvents()
if err != nil {
return err
}
logLvl := log15.LvlDebug
if c.logLevel != "" {
logLvl, err = log15.LvlFromString(c.logLevel)
if err != nil {
return err
}
}
handler := func(message interface{}) {
// Special handling for logging only output
if c.pretty && len(c.typeArgs) == 1 && shared.StringInSlice("logging", c.typeArgs) {
render, err := json.Marshal(&message)
if err != nil {
fmt.Printf("error: %s\n", err)
os.Exit(1)
}
event := api.Event{}
err = json.Unmarshal(render, &event)
if err != nil {
fmt.Printf("error: %s\n", err)
os.Exit(1)
}
logEntry := api.EventLogging{}
err = json.Unmarshal(event.Metadata, &logEntry)
if err != nil {
fmt.Printf("error: %s\n", err)
os.Exit(1)
}
lvl, err := log15.LvlFromString(logEntry.Level)
if err != nil {
fmt.Printf("error: %s\n", err)
os.Exit(1)
}
if lvl > logLvl {
return
}
ctx := []interface{}{}
for k, v := range logEntry.Context {
ctx = append(ctx, k)
ctx = append(ctx, v)
}
record := log15.Record{
Time: event.Timestamp,
Lvl: lvl,
Msg: logEntry.Message,
Ctx: ctx,
}
format := logging.TerminalFormat()
fmt.Printf("%s", format.Format(&record))
return
}
render, err := yaml.Marshal(&message)
if err != nil {
fmt.Printf("error: %s\n", err)
os.Exit(1)
}
fmt.Printf("%s\n\n", render)
}
_, err = listener.AddHandler(c.typeArgs, handler)
if err != nil {
return err
}
return listener.Wait()
}
lxc/monitor: Fix formatting
Signed-off-by: Thomas Hipp <5f82c492b3b00e427412d216ce820707a10c51ce@canonical.com>
package main
import (
"encoding/json"
"fmt"
"os"
"gopkg.in/yaml.v2"
"github.com/lxc/lxd/lxc/config"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/gnuflag"
"github.com/lxc/lxd/shared/i18n"
"github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logging"
)
type typeList []string
func (f *typeList) String() string {
return fmt.Sprint(*f)
}
func (f *typeList) Set(value string) error {
if value == "" {
return fmt.Errorf("Invalid type: %s", value)
}
if f == nil {
*f = make(typeList, 1)
} else {
*f = append(*f, value)
}
return nil
}
type monitorCmd struct {
typeArgs typeList
pretty bool
logLevel string
}
func (c *monitorCmd) showByDefault() bool {
return false
}
func (c *monitorCmd) usage() string {
return i18n.G(
`Usage: lxc monitor [<remote>:] [--type=TYPE...] [--pretty]
Monitor a local or remote LXD server.
By default the monitor will listen to all message types.
Message types to listen for can be specified with --type.
*Examples*
lxc monitor --type=logging
Only show log messages.
lxc monitor --pretty --type=logging --loglevel=info
Show a pretty log of messages with info level or higher.
lxc monitor --type=lifecycle
Only show lifecycle events.
`)
}
func (c *monitorCmd) flags() {
gnuflag.BoolVar(&c.pretty, "pretty", false, i18n.G("Pretty rendering"))
gnuflag.Var(&c.typeArgs, "type", i18n.G("Event type to listen for"))
gnuflag.StringVar(&c.logLevel, "loglevel", "", i18n.G("Minimum level for log messages"))
}
func (c *monitorCmd) run(conf *config.Config, args []string) error {
var err error
var remote string
if len(args) > 1 {
return errArgs
}
if len(args) == 0 {
remote, _, err = conf.ParseRemote("")
if err != nil {
return err
}
} else {
remote, _, err = conf.ParseRemote(args[0])
if err != nil {
return err
}
}
d, err := conf.GetContainerServer(remote)
if err != nil {
return err
}
listener, err := d.GetEvents()
if err != nil {
return err
}
logLvl := log15.LvlDebug
if c.logLevel != "" {
logLvl, err = log15.LvlFromString(c.logLevel)
if err != nil {
return err
}
}
handler := func(message interface{}) {
// Special handling for logging only output
if c.pretty && len(c.typeArgs) == 1 && shared.StringInSlice("logging", c.typeArgs) {
render, err := json.Marshal(&message)
if err != nil {
fmt.Printf("error: %s\n", err)
os.Exit(1)
}
event := api.Event{}
err = json.Unmarshal(render, &event)
if err != nil {
fmt.Printf("error: %s\n", err)
os.Exit(1)
}
logEntry := api.EventLogging{}
err = json.Unmarshal(event.Metadata, &logEntry)
if err != nil {
fmt.Printf("error: %s\n", err)
os.Exit(1)
}
lvl, err := log15.LvlFromString(logEntry.Level)
if err != nil {
fmt.Printf("error: %s\n", err)
os.Exit(1)
}
if lvl > logLvl {
return
}
ctx := []interface{}{}
for k, v := range logEntry.Context {
ctx = append(ctx, k)
ctx = append(ctx, v)
}
record := log15.Record{
Time: event.Timestamp,
Lvl: lvl,
Msg: logEntry.Message,
Ctx: ctx,
}
format := logging.TerminalFormat()
fmt.Printf("%s", format.Format(&record))
return
}
render, err := yaml.Marshal(&message)
if err != nil {
fmt.Printf("error: %s\n", err)
os.Exit(1)
}
fmt.Printf("%s\n\n", render)
}
_, err = listener.AddHandler(c.typeArgs, handler)
if err != nil {
return err
}
return listener.Wait()
}
|
package main
import (
"fmt"
"io/ioutil"
"os"
"sort"
"strings"
"syscall"
"github.com/olekukonko/tablewriter"
"gopkg.in/yaml.v2"
"github.com/lxc/lxd"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/i18n"
"github.com/lxc/lxd/shared/termios"
)
type networkCmd struct {
}
func (c *networkCmd) showByDefault() bool {
return true
}
func (c *networkCmd) networkEditHelp() string {
return i18n.G(
`### This is a yaml representation of the network.
### Any line starting with a '# will be ignored.
###
### A network consists of a set of configuration items.
###
### An example would look like:
### name: lxdbr0
### config:
### ipv4.address: 10.62.42.1/24
### ipv4.nat: true
### ipv6.address: fd00:56ad:9f7a:9800::1/64
### ipv6.nat: true
### managed: true
### type: bridge
###
### Note that only the configuration can be changed.`)
}
func (c *networkCmd) usage() string {
return i18n.G(
`Manage networks.
lxc network list [<remote>:] List available networks.
lxc network show [<remote>:]<network> Show details of a network.
lxc network create [<remote>:]<network> [key=value...] Create a network.
lxc network get [<remote>:]<network> <key> Get network configuration.
lxc network set [<remote>:]<network> <key> <value> Set network configuration.
lxc network unset [<remote>:]<network> <key> Unset network configuration.
lxc network delete [<remote>:]<network> Delete a network.
lxc network edit [<remote>:]<network>
Edit network, either by launching external editor or reading STDIN.
Example: lxc network edit <network> # launch editor
cat network.yaml | lxc network edit <network> # read from network.yaml
lxc network attach [<remote>:]<network> <container> [device name] [interface name]
lxc network attach-profile [<remote>:]<network> <profile> [device name] [interface name]
lxc network detach [<remote>:]<network> <container> [device name]
lxc network detach-profile [<remote>:]<network> <container> [device name]`)
}
func (c *networkCmd) flags() {}
func (c *networkCmd) run(config *lxd.Config, args []string) error {
if len(args) < 1 {
return errArgs
}
if args[0] == "list" {
return c.doNetworkList(config, args)
}
if len(args) < 2 {
return errArgs
}
remote, network := config.ParseRemoteAndContainer(args[1])
client, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
switch args[0] {
case "attach":
return c.doNetworkAttach(client, network, args[2:])
case "attach-profile":
return c.doNetworkAttachProfile(client, network, args[2:])
case "create":
return c.doNetworkCreate(client, network, args[2:])
case "delete":
return c.doNetworkDelete(client, network)
case "detach":
return c.doNetworkDetach(client, network, args[2:])
case "detach-profile":
return c.doNetworkDetachProfile(client, network, args[2:])
case "edit":
return c.doNetworkEdit(client, network)
case "get":
return c.doNetworkGet(client, network, args[2:])
case "set":
return c.doNetworkSet(client, network, args[2:])
case "unset":
return c.doNetworkSet(client, network, args[2:])
case "show":
return c.doNetworkShow(client, network)
default:
return errArgs
}
}
func (c *networkCmd) doNetworkAttach(client *lxd.Client, name string, args []string) error {
if len(args) < 1 || len(args) > 3 {
return errArgs
}
container := args[0]
devName := name
if len(args) > 1 {
devName = args[1]
}
network, err := client.NetworkGet(name)
if err != nil {
return err
}
nicType := "macvlan"
if network.Type == "bridge" {
nicType = "bridged"
}
props := []string{fmt.Sprintf("nictype=%s", nicType), fmt.Sprintf("parent=%s", name)}
if len(args) > 2 {
props = append(props, fmt.Sprintf("name=%s", args[2]))
}
resp, err := client.ContainerDeviceAdd(container, devName, "nic", props)
if err != nil {
return err
}
return client.WaitForSuccess(resp.Operation)
}
func (c *networkCmd) doNetworkAttachProfile(client *lxd.Client, name string, args []string) error {
if len(args) < 1 || len(args) > 3 {
return errArgs
}
profile := args[0]
devName := name
if len(args) > 1 {
devName = args[1]
}
network, err := client.NetworkGet(name)
if err != nil {
return err
}
nicType := "macvlan"
if network.Type == "bridge" {
nicType = "bridged"
}
props := []string{fmt.Sprintf("nictype=%s", nicType), fmt.Sprintf("parent=%s", name)}
if len(args) > 2 {
props = append(props, fmt.Sprintf("name=%s", args[2]))
}
_, err = client.ProfileDeviceAdd(profile, devName, "nic", props)
return err
}
func (c *networkCmd) doNetworkCreate(client *lxd.Client, name string, args []string) error {
config := map[string]string{}
for i := 0; i < len(args); i++ {
entry := strings.SplitN(args[i], "=", 2)
if len(entry) < 2 {
return errArgs
}
config[entry[0]] = entry[1]
}
err := client.NetworkCreate(name, config)
if err == nil {
fmt.Printf(i18n.G("Network %s created")+"\n", name)
}
return err
}
func (c *networkCmd) doNetworkDetach(client *lxd.Client, name string, args []string) error {
if len(args) < 1 || len(args) > 2 {
return errArgs
}
containerName := args[0]
devName := ""
if len(args) > 1 {
devName = args[1]
}
container, err := client.ContainerInfo(containerName)
if err != nil {
return err
}
if devName == "" {
for n, d := range container.Devices {
if d["type"] == "nic" && d["parent"] == name {
if devName != "" {
return fmt.Errorf(i18n.G("More than one device matches, specify the device name."))
}
devName = n
}
}
}
if devName == "" {
return fmt.Errorf(i18n.G("No device found for this network"))
}
device, ok := container.Devices[devName]
if !ok {
return fmt.Errorf(i18n.G("The specified device doesn't exist"))
}
if device["type"] != "nic" || device["parent"] != name {
return fmt.Errorf(i18n.G("The specified device doesn't match the network"))
}
resp, err := client.ContainerDeviceDelete(containerName, devName)
if err != nil {
return err
}
return client.WaitForSuccess(resp.Operation)
}
func (c *networkCmd) doNetworkDetachProfile(client *lxd.Client, name string, args []string) error {
if len(args) < 1 || len(args) > 2 {
return errArgs
}
profileName := args[0]
devName := ""
if len(args) > 1 {
devName = args[1]
}
profile, err := client.ProfileConfig(profileName)
if err != nil {
return err
}
if devName == "" {
for n, d := range profile.Devices {
if d["type"] == "nic" && d["parent"] == name {
if devName != "" {
return fmt.Errorf(i18n.G("More than one device matches, specify the device name."))
}
devName = n
}
}
}
if devName == "" {
return fmt.Errorf(i18n.G("No device found for this network"))
}
device, ok := profile.Devices[devName]
if !ok {
return fmt.Errorf(i18n.G("The specified device doesn't exist"))
}
if device["type"] != "nic" || device["parent"] != name {
return fmt.Errorf(i18n.G("The specified device doesn't match the network"))
}
_, err = client.ProfileDeviceDelete(profileName, devName)
return err
}
func (c *networkCmd) doNetworkDelete(client *lxd.Client, name string) error {
err := client.NetworkDelete(name)
if err == nil {
fmt.Printf(i18n.G("Network %s deleted")+"\n", name)
}
return err
}
func (c *networkCmd) doNetworkEdit(client *lxd.Client, name string) error {
// If stdin isn't a terminal, read text from it
if !termios.IsTerminal(int(syscall.Stdin)) {
contents, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
newdata := api.NetworkPut{}
err = yaml.Unmarshal(contents, &newdata)
if err != nil {
return err
}
return client.NetworkPut(name, newdata)
}
// Extract the current value
network, err := client.NetworkGet(name)
if err != nil {
return err
}
if !network.Managed {
return fmt.Errorf(i18n.G("Only managed networks can be modified."))
}
data, err := yaml.Marshal(&network)
if err != nil {
return err
}
// Spawn the editor
content, err := shared.TextEditor("", []byte(c.networkEditHelp()+"\n\n"+string(data)))
if err != nil {
return err
}
for {
// Parse the text received from the editor
newdata := api.NetworkPut{}
err = yaml.Unmarshal(content, &newdata)
if err == nil {
err = client.NetworkPut(name, newdata)
}
// Respawn the editor
if err != nil {
fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err)
fmt.Println(i18n.G("Press enter to open the editor again"))
_, err := os.Stdin.Read(make([]byte, 1))
if err != nil {
return err
}
content, err = shared.TextEditor("", content)
if err != nil {
return err
}
continue
}
break
}
return nil
}
func (c *networkCmd) doNetworkGet(client *lxd.Client, name string, args []string) error {
// we shifted @args so so it should read "<key>"
if len(args) != 1 {
return errArgs
}
resp, err := client.NetworkGet(name)
if err != nil {
return err
}
for k, v := range resp.Config {
if k == args[0] {
fmt.Printf("%s\n", v)
}
}
return nil
}
func (c *networkCmd) doNetworkList(config *lxd.Config, args []string) error {
var remote string
if len(args) > 1 {
var name string
remote, name = config.ParseRemoteAndContainer(args[1])
if name != "" {
return fmt.Errorf(i18n.G("Cannot provide container name to list"))
}
} else {
remote = config.DefaultRemote
}
client, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
networks, err := client.ListNetworks()
if err != nil {
return err
}
data := [][]string{}
for _, network := range networks {
if shared.StringInSlice(network.Type, []string{"loopback", "unknown"}) {
continue
}
strManaged := i18n.G("NO")
if network.Managed {
strManaged = i18n.G("YES")
}
strUsedBy := fmt.Sprintf("%d", len(network.UsedBy))
data = append(data, []string{network.Name, network.Type, strManaged, strUsedBy})
}
table := tablewriter.NewWriter(os.Stdout)
table.SetAutoWrapText(false)
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetRowLine(true)
table.SetHeader([]string{
i18n.G("NAME"),
i18n.G("TYPE"),
i18n.G("MANAGED"),
i18n.G("USED BY")})
sort.Sort(byName(data))
table.AppendBulk(data)
table.Render()
return nil
}
func (c *networkCmd) doNetworkSet(client *lxd.Client, name string, args []string) error {
// we shifted @args so so it should read "<key> [<value>]"
if len(args) < 1 {
return errArgs
}
network, err := client.NetworkGet(name)
if err != nil {
return err
}
if !network.Managed {
return fmt.Errorf(i18n.G("Only managed networks can be modified."))
}
key := args[0]
var value string
if len(args) < 2 {
value = ""
} else {
value = args[1]
}
if !termios.IsTerminal(int(syscall.Stdin)) && value == "-" {
buf, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return fmt.Errorf(i18n.G("Can't read from stdin: %s"), err)
}
value = string(buf[:])
}
network.Config[key] = value
return client.NetworkPut(name, network.Writable())
}
func (c *networkCmd) doNetworkShow(client *lxd.Client, name string) error {
network, err := client.NetworkGet(name)
if err != nil {
return err
}
sort.Strings(network.UsedBy)
data, err := yaml.Marshal(&network)
if err != nil {
return err
}
fmt.Printf("%s", data)
return nil
}
lxc/network: Rework usage to be parsable by help2man
Signed-off-by: Stéphane Graber <089afc6d81f66f1168a9849e15660feae286e024@ubuntu.com>
package main
import (
"fmt"
"io/ioutil"
"os"
"sort"
"strings"
"syscall"
"github.com/olekukonko/tablewriter"
"gopkg.in/yaml.v2"
"github.com/lxc/lxd"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/i18n"
"github.com/lxc/lxd/shared/termios"
)
type networkCmd struct {
}
func (c *networkCmd) showByDefault() bool {
return true
}
func (c *networkCmd) networkEditHelp() string {
return i18n.G(
`### This is a yaml representation of the network.
### Any line starting with a '# will be ignored.
###
### A network consists of a set of configuration items.
###
### An example would look like:
### name: lxdbr0
### config:
### ipv4.address: 10.62.42.1/24
### ipv4.nat: true
### ipv6.address: fd00:56ad:9f7a:9800::1/64
### ipv6.nat: true
### managed: true
### type: bridge
###
### Note that only the configuration can be changed.`)
}
func (c *networkCmd) usage() string {
return i18n.G(
`Usage: lxc network <subcommand> [options]
Manage and attach containers to networks.
lxc network list [<remote>:]
List available networks.
lxc network show [<remote>:]<network>
Show details of a network.
lxc network create [<remote>:]<network> [key=value...]
Create a network.
lxc network get [<remote>:]<network> <key>
Get network configuration.
lxc network set [<remote>:]<network> <key> <value>
Set network configuration.
lxc network unset [<remote>:]<network> <key>
Unset network configuration.
lxc network delete [<remote>:]<network>
Delete a network.
lxc network edit [<remote>:]<network>
Edit network, either by launching external editor or reading STDIN.
lxc network attach [<remote>:]<network> <container> [device name] [interface name]
Attach a network interface connecting the network to a specified container.
lxc network attach-profile [<remote>:]<network> <profile> [device name] [interface name]
Attach a network interface connecting the network to a specified profile.
lxc network detach [<remote>:]<network> <container> [device name]
Remove a network interface connecting the network to a specified container.
lxc network detach-profile [<remote>:]<network> <container> [device name]
Remove a network interface connecting the network to a specified profile.
*Examples*
cat network.yaml | lxc network edit <network>
Update a network using the content of network.yaml`)
}
func (c *networkCmd) flags() {}
func (c *networkCmd) run(config *lxd.Config, args []string) error {
if len(args) < 1 {
return errUsage
}
if args[0] == "list" {
return c.doNetworkList(config, args)
}
if len(args) < 2 {
return errArgs
}
remote, network := config.ParseRemoteAndContainer(args[1])
client, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
switch args[0] {
case "attach":
return c.doNetworkAttach(client, network, args[2:])
case "attach-profile":
return c.doNetworkAttachProfile(client, network, args[2:])
case "create":
return c.doNetworkCreate(client, network, args[2:])
case "delete":
return c.doNetworkDelete(client, network)
case "detach":
return c.doNetworkDetach(client, network, args[2:])
case "detach-profile":
return c.doNetworkDetachProfile(client, network, args[2:])
case "edit":
return c.doNetworkEdit(client, network)
case "get":
return c.doNetworkGet(client, network, args[2:])
case "set":
return c.doNetworkSet(client, network, args[2:])
case "unset":
return c.doNetworkSet(client, network, args[2:])
case "show":
return c.doNetworkShow(client, network)
default:
return errArgs
}
}
func (c *networkCmd) doNetworkAttach(client *lxd.Client, name string, args []string) error {
if len(args) < 1 || len(args) > 3 {
return errArgs
}
container := args[0]
devName := name
if len(args) > 1 {
devName = args[1]
}
network, err := client.NetworkGet(name)
if err != nil {
return err
}
nicType := "macvlan"
if network.Type == "bridge" {
nicType = "bridged"
}
props := []string{fmt.Sprintf("nictype=%s", nicType), fmt.Sprintf("parent=%s", name)}
if len(args) > 2 {
props = append(props, fmt.Sprintf("name=%s", args[2]))
}
resp, err := client.ContainerDeviceAdd(container, devName, "nic", props)
if err != nil {
return err
}
return client.WaitForSuccess(resp.Operation)
}
func (c *networkCmd) doNetworkAttachProfile(client *lxd.Client, name string, args []string) error {
if len(args) < 1 || len(args) > 3 {
return errArgs
}
profile := args[0]
devName := name
if len(args) > 1 {
devName = args[1]
}
network, err := client.NetworkGet(name)
if err != nil {
return err
}
nicType := "macvlan"
if network.Type == "bridge" {
nicType = "bridged"
}
props := []string{fmt.Sprintf("nictype=%s", nicType), fmt.Sprintf("parent=%s", name)}
if len(args) > 2 {
props = append(props, fmt.Sprintf("name=%s", args[2]))
}
_, err = client.ProfileDeviceAdd(profile, devName, "nic", props)
return err
}
func (c *networkCmd) doNetworkCreate(client *lxd.Client, name string, args []string) error {
config := map[string]string{}
for i := 0; i < len(args); i++ {
entry := strings.SplitN(args[i], "=", 2)
if len(entry) < 2 {
return errArgs
}
config[entry[0]] = entry[1]
}
err := client.NetworkCreate(name, config)
if err == nil {
fmt.Printf(i18n.G("Network %s created")+"\n", name)
}
return err
}
func (c *networkCmd) doNetworkDetach(client *lxd.Client, name string, args []string) error {
if len(args) < 1 || len(args) > 2 {
return errArgs
}
containerName := args[0]
devName := ""
if len(args) > 1 {
devName = args[1]
}
container, err := client.ContainerInfo(containerName)
if err != nil {
return err
}
if devName == "" {
for n, d := range container.Devices {
if d["type"] == "nic" && d["parent"] == name {
if devName != "" {
return fmt.Errorf(i18n.G("More than one device matches, specify the device name."))
}
devName = n
}
}
}
if devName == "" {
return fmt.Errorf(i18n.G("No device found for this network"))
}
device, ok := container.Devices[devName]
if !ok {
return fmt.Errorf(i18n.G("The specified device doesn't exist"))
}
if device["type"] != "nic" || device["parent"] != name {
return fmt.Errorf(i18n.G("The specified device doesn't match the network"))
}
resp, err := client.ContainerDeviceDelete(containerName, devName)
if err != nil {
return err
}
return client.WaitForSuccess(resp.Operation)
}
func (c *networkCmd) doNetworkDetachProfile(client *lxd.Client, name string, args []string) error {
if len(args) < 1 || len(args) > 2 {
return errArgs
}
profileName := args[0]
devName := ""
if len(args) > 1 {
devName = args[1]
}
profile, err := client.ProfileConfig(profileName)
if err != nil {
return err
}
if devName == "" {
for n, d := range profile.Devices {
if d["type"] == "nic" && d["parent"] == name {
if devName != "" {
return fmt.Errorf(i18n.G("More than one device matches, specify the device name."))
}
devName = n
}
}
}
if devName == "" {
return fmt.Errorf(i18n.G("No device found for this network"))
}
device, ok := profile.Devices[devName]
if !ok {
return fmt.Errorf(i18n.G("The specified device doesn't exist"))
}
if device["type"] != "nic" || device["parent"] != name {
return fmt.Errorf(i18n.G("The specified device doesn't match the network"))
}
_, err = client.ProfileDeviceDelete(profileName, devName)
return err
}
func (c *networkCmd) doNetworkDelete(client *lxd.Client, name string) error {
err := client.NetworkDelete(name)
if err == nil {
fmt.Printf(i18n.G("Network %s deleted")+"\n", name)
}
return err
}
func (c *networkCmd) doNetworkEdit(client *lxd.Client, name string) error {
// If stdin isn't a terminal, read text from it
if !termios.IsTerminal(int(syscall.Stdin)) {
contents, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
newdata := api.NetworkPut{}
err = yaml.Unmarshal(contents, &newdata)
if err != nil {
return err
}
return client.NetworkPut(name, newdata)
}
// Extract the current value
network, err := client.NetworkGet(name)
if err != nil {
return err
}
if !network.Managed {
return fmt.Errorf(i18n.G("Only managed networks can be modified."))
}
data, err := yaml.Marshal(&network)
if err != nil {
return err
}
// Spawn the editor
content, err := shared.TextEditor("", []byte(c.networkEditHelp()+"\n\n"+string(data)))
if err != nil {
return err
}
for {
// Parse the text received from the editor
newdata := api.NetworkPut{}
err = yaml.Unmarshal(content, &newdata)
if err == nil {
err = client.NetworkPut(name, newdata)
}
// Respawn the editor
if err != nil {
fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err)
fmt.Println(i18n.G("Press enter to open the editor again"))
_, err := os.Stdin.Read(make([]byte, 1))
if err != nil {
return err
}
content, err = shared.TextEditor("", content)
if err != nil {
return err
}
continue
}
break
}
return nil
}
func (c *networkCmd) doNetworkGet(client *lxd.Client, name string, args []string) error {
// we shifted @args so so it should read "<key>"
if len(args) != 1 {
return errArgs
}
resp, err := client.NetworkGet(name)
if err != nil {
return err
}
for k, v := range resp.Config {
if k == args[0] {
fmt.Printf("%s\n", v)
}
}
return nil
}
func (c *networkCmd) doNetworkList(config *lxd.Config, args []string) error {
var remote string
if len(args) > 1 {
var name string
remote, name = config.ParseRemoteAndContainer(args[1])
if name != "" {
return fmt.Errorf(i18n.G("Cannot provide container name to list"))
}
} else {
remote = config.DefaultRemote
}
client, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
networks, err := client.ListNetworks()
if err != nil {
return err
}
data := [][]string{}
for _, network := range networks {
if shared.StringInSlice(network.Type, []string{"loopback", "unknown"}) {
continue
}
strManaged := i18n.G("NO")
if network.Managed {
strManaged = i18n.G("YES")
}
strUsedBy := fmt.Sprintf("%d", len(network.UsedBy))
data = append(data, []string{network.Name, network.Type, strManaged, strUsedBy})
}
table := tablewriter.NewWriter(os.Stdout)
table.SetAutoWrapText(false)
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetRowLine(true)
table.SetHeader([]string{
i18n.G("NAME"),
i18n.G("TYPE"),
i18n.G("MANAGED"),
i18n.G("USED BY")})
sort.Sort(byName(data))
table.AppendBulk(data)
table.Render()
return nil
}
func (c *networkCmd) doNetworkSet(client *lxd.Client, name string, args []string) error {
// we shifted @args so so it should read "<key> [<value>]"
if len(args) < 1 {
return errArgs
}
network, err := client.NetworkGet(name)
if err != nil {
return err
}
if !network.Managed {
return fmt.Errorf(i18n.G("Only managed networks can be modified."))
}
key := args[0]
var value string
if len(args) < 2 {
value = ""
} else {
value = args[1]
}
if !termios.IsTerminal(int(syscall.Stdin)) && value == "-" {
buf, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return fmt.Errorf(i18n.G("Can't read from stdin: %s"), err)
}
value = string(buf[:])
}
network.Config[key] = value
return client.NetworkPut(name, network.Writable())
}
func (c *networkCmd) doNetworkShow(client *lxd.Client, name string) error {
network, err := client.NetworkGet(name)
if err != nil {
return err
}
sort.Strings(network.UsedBy)
data, err := yaml.Marshal(&network)
if err != nil {
return err
}
fmt.Printf("%s", data)
return nil
}
|
package main
import (
"fmt"
"io/ioutil"
"os"
"sort"
"strings"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/lxc/lxd/lxc/utils"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
cli "github.com/lxc/lxd/shared/cmd"
"github.com/lxc/lxd/shared/i18n"
"github.com/lxc/lxd/shared/termios"
)
type cmdProfile struct {
global *cmdGlobal
}
func (c *cmdProfile) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("profile")
cmd.Short = i18n.G("Manage profiles")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Manage profiles`))
// Add
profileAddCmd := cmdProfileAdd{global: c.global, profile: c}
cmd.AddCommand(profileAddCmd.Command())
// Assign
profileAssignCmd := cmdProfileAssign{global: c.global, profile: c}
cmd.AddCommand(profileAssignCmd.Command())
// Copy
profileCopyCmd := cmdProfileCopy{global: c.global, profile: c}
cmd.AddCommand(profileCopyCmd.Command())
// Create
profileCreateCmd := cmdProfileCreate{global: c.global, profile: c}
cmd.AddCommand(profileCreateCmd.Command())
// Delete
profileDeleteCmd := cmdProfileDelete{global: c.global, profile: c}
cmd.AddCommand(profileDeleteCmd.Command())
// Device
profileDeviceCmd := cmdConfigDevice{global: c.global, profile: c}
cmd.AddCommand(profileDeviceCmd.Command())
// Edit
profileEditCmd := cmdProfileEdit{global: c.global, profile: c}
cmd.AddCommand(profileEditCmd.Command())
// Get
profileGetCmd := cmdProfileGet{global: c.global, profile: c}
cmd.AddCommand(profileGetCmd.Command())
// List
profileListCmd := cmdProfileList{global: c.global, profile: c}
cmd.AddCommand(profileListCmd.Command())
// Remove
profileRemoveCmd := cmdProfileRemove{global: c.global, profile: c}
cmd.AddCommand(profileRemoveCmd.Command())
// Rename
profileRenameCmd := cmdProfileRename{global: c.global, profile: c}
cmd.AddCommand(profileRenameCmd.Command())
// Set
profileSetCmd := cmdProfileSet{global: c.global, profile: c}
cmd.AddCommand(profileSetCmd.Command())
// Show
profileShowCmd := cmdProfileShow{global: c.global, profile: c}
cmd.AddCommand(profileShowCmd.Command())
// Unset
profileUnsetCmd := cmdProfileUnset{global: c.global, profile: c, profileSet: &profileSetCmd}
cmd.AddCommand(profileUnsetCmd.Command())
// Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706
cmd.Args = cobra.NoArgs
cmd.Run = func(cmd *cobra.Command, args []string) { cmd.Usage() }
return cmd
}
// Add
type cmdProfileAdd struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileAdd) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("add", i18n.G("[<remote>:]<instance> <profile>"))
cmd.Short = i18n.G("Add profiles to instances")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Add profiles to instances`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileAdd) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, 2)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing instance name"))
}
// Add the profile
inst, etag, err := resource.server.GetInstance(resource.name)
if err != nil {
return err
}
inst.Profiles = append(inst.Profiles, args[1])
op, err := resource.server.UpdateInstance(resource.name, inst.Writable(), etag)
if err != nil {
return err
}
err = op.Wait()
if err != nil {
return err
}
if !c.global.flagQuiet {
fmt.Printf(i18n.G("Profile %s added to %s")+"\n", args[1], resource.name)
}
return nil
}
// Assign
type cmdProfileAssign struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileAssign) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("assign", i18n.G("[<remote>:]<instance> <profiles>"))
cmd.Aliases = []string{"apply"}
cmd.Short = i18n.G("Assign sets of profiles to instances")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Assign sets of profiles to instances`))
cmd.Example = cli.FormatSection("", i18n.G(
`lxc profile assign foo default,bar
Set the profiles for "foo" to "default" and "bar".
lxc profile assign foo default
Reset "foo" to only using the "default" profile.
lxc profile assign foo ''
Remove all profile from "foo"`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileAssign) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, 2)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
// Assign the profiles
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing instance name"))
}
inst, etag, err := resource.server.GetInstance(resource.name)
if err != nil {
return err
}
if args[1] != "" {
inst.Profiles = strings.Split(args[1], ",")
} else {
inst.Profiles = nil
}
op, err := resource.server.UpdateInstance(resource.name, inst.Writable(), etag)
if err != nil {
return err
}
err = op.Wait()
if err != nil {
return err
}
if args[1] == "" {
args[1] = i18n.G("(none)")
}
if !c.global.flagQuiet {
fmt.Printf(i18n.G("Profiles %s applied to %s")+"\n", args[1], resource.name)
}
return nil
}
// Copy
type cmdProfileCopy struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileCopy) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("copy", i18n.G("[<remote>:]<profile> [<remote>:]<profile>"))
cmd.Aliases = []string{"cp"}
cmd.Short = i18n.G("Copy profiles")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Copy profiles`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileCopy) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, 2)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args...)
if err != nil {
return err
}
source := resources[0]
dest := resources[1]
if source.name == "" {
return fmt.Errorf(i18n.G("Missing source profile name"))
}
if dest.name == "" {
dest.name = source.name
}
// Copy the profile
profile, _, err := source.server.GetProfile(source.name)
if err != nil {
return err
}
newProfile := api.ProfilesPost{
ProfilePut: profile.Writable(),
Name: dest.name,
}
return dest.server.CreateProfile(newProfile)
}
// Create
type cmdProfileCreate struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileCreate) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("create", i18n.G("[<remote>:]<profile>"))
cmd.Short = i18n.G("Create profiles")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Create profiles`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileCreate) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 1, 1)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing profile name"))
}
// Create the profile
profile := api.ProfilesPost{}
profile.Name = resource.name
err = resource.server.CreateProfile(profile)
if err != nil {
return err
}
if !c.global.flagQuiet {
fmt.Printf(i18n.G("Profile %s created")+"\n", resource.name)
}
return nil
}
// Delete
type cmdProfileDelete struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileDelete) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("delete", i18n.G("[<remote>:]<profile>"))
cmd.Aliases = []string{"rm"}
cmd.Short = i18n.G("Delete profiles")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Delete profiles`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileDelete) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 1, 1)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing profile name"))
}
// Delete the profile
err = resource.server.DeleteProfile(resource.name)
if err != nil {
return err
}
if !c.global.flagQuiet {
fmt.Printf(i18n.G("Profile %s deleted")+"\n", resource.name)
}
return nil
}
// Edit
type cmdProfileEdit struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileEdit) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("edit", i18n.G("[<remote>:]<profile>"))
cmd.Short = i18n.G("Edit profile configurations as YAML")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Edit profile configurations as YAML`))
cmd.Example = cli.FormatSection("", i18n.G(
`lxc profile edit <profile> < profile.yaml
Update a profile using the content of profile.yaml`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileEdit) helpTemplate() string {
return i18n.G(
`### This is a YAML representation of the profile.
### Any line starting with a '# will be ignored.
###
### A profile consists of a set of configuration items followed by a set of
### devices.
###
### An example would look like:
### name: onenic
### config:
### raw.lxc: lxc.aa_profile=unconfined
### devices:
### eth0:
### nictype: bridged
### parent: lxdbr0
### type: nic
###
### Note that the name is shown but cannot be changed`)
}
func (c *cmdProfileEdit) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 1, 1)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing profile name"))
}
// If stdin isn't a terminal, read text from it
if !termios.IsTerminal(getStdinFd()) {
contents, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
newdata := api.ProfilePut{}
err = yaml.Unmarshal(contents, &newdata)
if err != nil {
return err
}
return resource.server.UpdateProfile(resource.name, newdata, "")
}
// Extract the current value
profile, etag, err := resource.server.GetProfile(resource.name)
if err != nil {
return err
}
data, err := yaml.Marshal(&profile)
if err != nil {
return err
}
// Spawn the editor
content, err := shared.TextEditor("", []byte(c.helpTemplate()+"\n\n"+string(data)))
if err != nil {
return err
}
for {
// Parse the text received from the editor
newdata := api.ProfilePut{}
err = yaml.Unmarshal(content, &newdata)
if err == nil {
err = resource.server.UpdateProfile(resource.name, newdata, etag)
}
// Respawn the editor
if err != nil {
fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err)
fmt.Println(i18n.G("Press enter to open the editor again or ctrl+c to abort change"))
_, err := os.Stdin.Read(make([]byte, 1))
if err != nil {
return err
}
content, err = shared.TextEditor("", content)
if err != nil {
return err
}
continue
}
break
}
return nil
}
// Get
type cmdProfileGet struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileGet) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("get", i18n.G("[<remote>:]<profile> <key>"))
cmd.Short = i18n.G("Get values for profile configuration keys")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Get values for profile configuration keys`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileGet) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, 2)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing profile name"))
}
// Get the configuration key
profile, _, err := resource.server.GetProfile(resource.name)
if err != nil {
return err
}
fmt.Printf("%s\n", profile.Config[args[1]])
return nil
}
// List
type cmdProfileList struct {
global *cmdGlobal
profile *cmdProfile
flagFormat string
}
func (c *cmdProfileList) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("list", i18n.G("[<remote>:]"))
cmd.Aliases = []string{"ls"}
cmd.Short = i18n.G("List profiles")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`List profiles`))
cmd.RunE = c.Run
cmd.Flags().StringVarP(&c.flagFormat, "format", "f", "table", i18n.G("Format (csv|json|table|yaml|compact)")+"``")
return cmd
}
func (c *cmdProfileList) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 0, 1)
if exit {
return err
}
// Parse remote
remote := ""
if len(args) > 0 {
remote = args[0]
}
resources, err := c.global.ParseServers(remote)
if err != nil {
return err
}
resource := resources[0]
// List profiles
profiles, err := resource.server.GetProfiles()
if err != nil {
return err
}
data := [][]string{}
for _, profile := range profiles {
strUsedBy := fmt.Sprintf("%d", len(profile.UsedBy))
data = append(data, []string{profile.Name, profile.Description, strUsedBy})
}
sort.Sort(utils.ByName(data))
header := []string{
i18n.G("NAME"),
i18n.G("DESCRIPTION"),
i18n.G("USED BY")}
return utils.RenderTable(c.flagFormat, header, data, profiles)
}
// Remove
type cmdProfileRemove struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileRemove) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("remove", i18n.G("[<remote>:]<instance> <profile>"))
cmd.Short = i18n.G("Remove profiles from instances")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Remove profiles from instances`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileRemove) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, 2)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing instance name"))
}
// Remove the profile
inst, etag, err := resource.server.GetInstance(resource.name)
if err != nil {
return err
}
if !shared.StringInSlice(args[1], inst.Profiles) {
return fmt.Errorf(i18n.G("Profile %s isn't currently applied to %s"), args[1], resource.name)
}
profiles := []string{}
for _, profile := range inst.Profiles {
if profile == args[1] {
continue
}
profiles = append(profiles, profile)
}
inst.Profiles = profiles
op, err := resource.server.UpdateInstance(resource.name, inst.Writable(), etag)
if err != nil {
return err
}
err = op.Wait()
if err != nil {
return err
}
if !c.global.flagQuiet {
fmt.Printf(i18n.G("Profile %s removed from %s")+"\n", args[1], resource.name)
}
return nil
}
// Rename
type cmdProfileRename struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileRename) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("rename", i18n.G("[<remote>:]<profile> <new-name>"))
cmd.Aliases = []string{"mv"}
cmd.Short = i18n.G("Rename profiles")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Rename profiles`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileRename) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, 2)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing profile name"))
}
// Rename the profile
err = resource.server.RenameProfile(resource.name, api.ProfilePost{Name: args[1]})
if err != nil {
return err
}
if !c.global.flagQuiet {
fmt.Printf(i18n.G("Profile %s renamed to %s")+"\n", resource.name, args[1])
}
return nil
}
// Set
type cmdProfileSet struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileSet) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("set", i18n.G("[<remote>:]<profile> <key><value>..."))
cmd.Short = i18n.G("Set profile configuration keys")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Set profile configuration keys
For backward compatibility, a single configuration key may still be set with:
lxc profile set [<remote>:]<profile> <key> <value>`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileSet) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, -1)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing profile name"))
}
// Get the profile
profile, etag, err := resource.server.GetProfile(resource.name)
if err != nil {
return err
}
// Set the configuration key
keys, err := getConfig(args[1:]...)
if err != nil {
return err
}
for k, v := range keys {
profile.Config[k] = v
}
return resource.server.UpdateProfile(resource.name, profile.Writable(), etag)
}
// Show
type cmdProfileShow struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileShow) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("show", i18n.G("[<remote>:]<profile>"))
cmd.Short = i18n.G("Show profile configurations")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Show profile configurations`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileShow) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 1, 1)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing profile name"))
}
// Show the profile
profile, _, err := resource.server.GetProfile(resource.name)
if err != nil {
return err
}
data, err := yaml.Marshal(&profile)
if err != nil {
return err
}
fmt.Printf("%s", data)
return nil
}
// Unset
type cmdProfileUnset struct {
global *cmdGlobal
profile *cmdProfile
profileSet *cmdProfileSet
}
func (c *cmdProfileUnset) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("unset", i18n.G("[<remote>:]<profile> <key>"))
cmd.Short = i18n.G("Unset profile configuration keys")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Unset profile configuration keys`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileUnset) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, 2)
if exit {
return err
}
args = append(args, "")
return c.profileSet.Run(cmd, args)
}
lxc/profile: Add cross-project copy
Closes #10340
Signed-off-by: Stéphane Graber <089afc6d81f66f1168a9849e15660feae286e024@ubuntu.com>
package main
import (
"fmt"
"io/ioutil"
"os"
"sort"
"strings"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/lxc/lxd/lxc/utils"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
cli "github.com/lxc/lxd/shared/cmd"
"github.com/lxc/lxd/shared/i18n"
"github.com/lxc/lxd/shared/termios"
)
type cmdProfile struct {
global *cmdGlobal
}
func (c *cmdProfile) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("profile")
cmd.Short = i18n.G("Manage profiles")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Manage profiles`))
// Add
profileAddCmd := cmdProfileAdd{global: c.global, profile: c}
cmd.AddCommand(profileAddCmd.Command())
// Assign
profileAssignCmd := cmdProfileAssign{global: c.global, profile: c}
cmd.AddCommand(profileAssignCmd.Command())
// Copy
profileCopyCmd := cmdProfileCopy{global: c.global, profile: c}
cmd.AddCommand(profileCopyCmd.Command())
// Create
profileCreateCmd := cmdProfileCreate{global: c.global, profile: c}
cmd.AddCommand(profileCreateCmd.Command())
// Delete
profileDeleteCmd := cmdProfileDelete{global: c.global, profile: c}
cmd.AddCommand(profileDeleteCmd.Command())
// Device
profileDeviceCmd := cmdConfigDevice{global: c.global, profile: c}
cmd.AddCommand(profileDeviceCmd.Command())
// Edit
profileEditCmd := cmdProfileEdit{global: c.global, profile: c}
cmd.AddCommand(profileEditCmd.Command())
// Get
profileGetCmd := cmdProfileGet{global: c.global, profile: c}
cmd.AddCommand(profileGetCmd.Command())
// List
profileListCmd := cmdProfileList{global: c.global, profile: c}
cmd.AddCommand(profileListCmd.Command())
// Remove
profileRemoveCmd := cmdProfileRemove{global: c.global, profile: c}
cmd.AddCommand(profileRemoveCmd.Command())
// Rename
profileRenameCmd := cmdProfileRename{global: c.global, profile: c}
cmd.AddCommand(profileRenameCmd.Command())
// Set
profileSetCmd := cmdProfileSet{global: c.global, profile: c}
cmd.AddCommand(profileSetCmd.Command())
// Show
profileShowCmd := cmdProfileShow{global: c.global, profile: c}
cmd.AddCommand(profileShowCmd.Command())
// Unset
profileUnsetCmd := cmdProfileUnset{global: c.global, profile: c, profileSet: &profileSetCmd}
cmd.AddCommand(profileUnsetCmd.Command())
// Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706
cmd.Args = cobra.NoArgs
cmd.Run = func(cmd *cobra.Command, args []string) { cmd.Usage() }
return cmd
}
// Add
type cmdProfileAdd struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileAdd) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("add", i18n.G("[<remote>:]<instance> <profile>"))
cmd.Short = i18n.G("Add profiles to instances")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Add profiles to instances`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileAdd) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, 2)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing instance name"))
}
// Add the profile
inst, etag, err := resource.server.GetInstance(resource.name)
if err != nil {
return err
}
inst.Profiles = append(inst.Profiles, args[1])
op, err := resource.server.UpdateInstance(resource.name, inst.Writable(), etag)
if err != nil {
return err
}
err = op.Wait()
if err != nil {
return err
}
if !c.global.flagQuiet {
fmt.Printf(i18n.G("Profile %s added to %s")+"\n", args[1], resource.name)
}
return nil
}
// Assign
type cmdProfileAssign struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileAssign) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("assign", i18n.G("[<remote>:]<instance> <profiles>"))
cmd.Aliases = []string{"apply"}
cmd.Short = i18n.G("Assign sets of profiles to instances")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Assign sets of profiles to instances`))
cmd.Example = cli.FormatSection("", i18n.G(
`lxc profile assign foo default,bar
Set the profiles for "foo" to "default" and "bar".
lxc profile assign foo default
Reset "foo" to only using the "default" profile.
lxc profile assign foo ''
Remove all profile from "foo"`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileAssign) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, 2)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
// Assign the profiles
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing instance name"))
}
inst, etag, err := resource.server.GetInstance(resource.name)
if err != nil {
return err
}
if args[1] != "" {
inst.Profiles = strings.Split(args[1], ",")
} else {
inst.Profiles = nil
}
op, err := resource.server.UpdateInstance(resource.name, inst.Writable(), etag)
if err != nil {
return err
}
err = op.Wait()
if err != nil {
return err
}
if args[1] == "" {
args[1] = i18n.G("(none)")
}
if !c.global.flagQuiet {
fmt.Printf(i18n.G("Profiles %s applied to %s")+"\n", args[1], resource.name)
}
return nil
}
// Copy
type cmdProfileCopy struct {
global *cmdGlobal
profile *cmdProfile
flagTargetProject string
}
func (c *cmdProfileCopy) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("copy", i18n.G("[<remote>:]<profile> [<remote>:]<profile>"))
cmd.Aliases = []string{"cp"}
cmd.Short = i18n.G("Copy profiles")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Copy profiles`))
cmd.Flags().StringVar(&c.flagTargetProject, "target-project", "", i18n.G("Copy to a project different from the source")+"``")
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileCopy) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, 2)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args...)
if err != nil {
return err
}
source := resources[0]
dest := resources[1]
if source.name == "" {
return fmt.Errorf(i18n.G("Missing source profile name"))
}
if dest.name == "" {
dest.name = source.name
}
// Copy the profile
profile, _, err := source.server.GetProfile(source.name)
if err != nil {
return err
}
newProfile := api.ProfilesPost{
ProfilePut: profile.Writable(),
Name: dest.name,
}
if c.flagTargetProject != "" {
dest.server.UseTarget(c.flagTargetProject)
}
return dest.server.CreateProfile(newProfile)
}
// Create
type cmdProfileCreate struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileCreate) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("create", i18n.G("[<remote>:]<profile>"))
cmd.Short = i18n.G("Create profiles")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Create profiles`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileCreate) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 1, 1)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing profile name"))
}
// Create the profile
profile := api.ProfilesPost{}
profile.Name = resource.name
err = resource.server.CreateProfile(profile)
if err != nil {
return err
}
if !c.global.flagQuiet {
fmt.Printf(i18n.G("Profile %s created")+"\n", resource.name)
}
return nil
}
// Delete
type cmdProfileDelete struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileDelete) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("delete", i18n.G("[<remote>:]<profile>"))
cmd.Aliases = []string{"rm"}
cmd.Short = i18n.G("Delete profiles")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Delete profiles`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileDelete) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 1, 1)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing profile name"))
}
// Delete the profile
err = resource.server.DeleteProfile(resource.name)
if err != nil {
return err
}
if !c.global.flagQuiet {
fmt.Printf(i18n.G("Profile %s deleted")+"\n", resource.name)
}
return nil
}
// Edit
type cmdProfileEdit struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileEdit) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("edit", i18n.G("[<remote>:]<profile>"))
cmd.Short = i18n.G("Edit profile configurations as YAML")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Edit profile configurations as YAML`))
cmd.Example = cli.FormatSection("", i18n.G(
`lxc profile edit <profile> < profile.yaml
Update a profile using the content of profile.yaml`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileEdit) helpTemplate() string {
return i18n.G(
`### This is a YAML representation of the profile.
### Any line starting with a '# will be ignored.
###
### A profile consists of a set of configuration items followed by a set of
### devices.
###
### An example would look like:
### name: onenic
### config:
### raw.lxc: lxc.aa_profile=unconfined
### devices:
### eth0:
### nictype: bridged
### parent: lxdbr0
### type: nic
###
### Note that the name is shown but cannot be changed`)
}
func (c *cmdProfileEdit) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 1, 1)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing profile name"))
}
// If stdin isn't a terminal, read text from it
if !termios.IsTerminal(getStdinFd()) {
contents, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
newdata := api.ProfilePut{}
err = yaml.Unmarshal(contents, &newdata)
if err != nil {
return err
}
return resource.server.UpdateProfile(resource.name, newdata, "")
}
// Extract the current value
profile, etag, err := resource.server.GetProfile(resource.name)
if err != nil {
return err
}
data, err := yaml.Marshal(&profile)
if err != nil {
return err
}
// Spawn the editor
content, err := shared.TextEditor("", []byte(c.helpTemplate()+"\n\n"+string(data)))
if err != nil {
return err
}
for {
// Parse the text received from the editor
newdata := api.ProfilePut{}
err = yaml.Unmarshal(content, &newdata)
if err == nil {
err = resource.server.UpdateProfile(resource.name, newdata, etag)
}
// Respawn the editor
if err != nil {
fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err)
fmt.Println(i18n.G("Press enter to open the editor again or ctrl+c to abort change"))
_, err := os.Stdin.Read(make([]byte, 1))
if err != nil {
return err
}
content, err = shared.TextEditor("", content)
if err != nil {
return err
}
continue
}
break
}
return nil
}
// Get
type cmdProfileGet struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileGet) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("get", i18n.G("[<remote>:]<profile> <key>"))
cmd.Short = i18n.G("Get values for profile configuration keys")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Get values for profile configuration keys`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileGet) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, 2)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing profile name"))
}
// Get the configuration key
profile, _, err := resource.server.GetProfile(resource.name)
if err != nil {
return err
}
fmt.Printf("%s\n", profile.Config[args[1]])
return nil
}
// List
type cmdProfileList struct {
global *cmdGlobal
profile *cmdProfile
flagFormat string
}
func (c *cmdProfileList) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("list", i18n.G("[<remote>:]"))
cmd.Aliases = []string{"ls"}
cmd.Short = i18n.G("List profiles")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`List profiles`))
cmd.RunE = c.Run
cmd.Flags().StringVarP(&c.flagFormat, "format", "f", "table", i18n.G("Format (csv|json|table|yaml|compact)")+"``")
return cmd
}
func (c *cmdProfileList) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 0, 1)
if exit {
return err
}
// Parse remote
remote := ""
if len(args) > 0 {
remote = args[0]
}
resources, err := c.global.ParseServers(remote)
if err != nil {
return err
}
resource := resources[0]
// List profiles
profiles, err := resource.server.GetProfiles()
if err != nil {
return err
}
data := [][]string{}
for _, profile := range profiles {
strUsedBy := fmt.Sprintf("%d", len(profile.UsedBy))
data = append(data, []string{profile.Name, profile.Description, strUsedBy})
}
sort.Sort(utils.ByName(data))
header := []string{
i18n.G("NAME"),
i18n.G("DESCRIPTION"),
i18n.G("USED BY")}
return utils.RenderTable(c.flagFormat, header, data, profiles)
}
// Remove
type cmdProfileRemove struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileRemove) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("remove", i18n.G("[<remote>:]<instance> <profile>"))
cmd.Short = i18n.G("Remove profiles from instances")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Remove profiles from instances`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileRemove) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, 2)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing instance name"))
}
// Remove the profile
inst, etag, err := resource.server.GetInstance(resource.name)
if err != nil {
return err
}
if !shared.StringInSlice(args[1], inst.Profiles) {
return fmt.Errorf(i18n.G("Profile %s isn't currently applied to %s"), args[1], resource.name)
}
profiles := []string{}
for _, profile := range inst.Profiles {
if profile == args[1] {
continue
}
profiles = append(profiles, profile)
}
inst.Profiles = profiles
op, err := resource.server.UpdateInstance(resource.name, inst.Writable(), etag)
if err != nil {
return err
}
err = op.Wait()
if err != nil {
return err
}
if !c.global.flagQuiet {
fmt.Printf(i18n.G("Profile %s removed from %s")+"\n", args[1], resource.name)
}
return nil
}
// Rename
type cmdProfileRename struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileRename) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("rename", i18n.G("[<remote>:]<profile> <new-name>"))
cmd.Aliases = []string{"mv"}
cmd.Short = i18n.G("Rename profiles")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Rename profiles`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileRename) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, 2)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing profile name"))
}
// Rename the profile
err = resource.server.RenameProfile(resource.name, api.ProfilePost{Name: args[1]})
if err != nil {
return err
}
if !c.global.flagQuiet {
fmt.Printf(i18n.G("Profile %s renamed to %s")+"\n", resource.name, args[1])
}
return nil
}
// Set
type cmdProfileSet struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileSet) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("set", i18n.G("[<remote>:]<profile> <key><value>..."))
cmd.Short = i18n.G("Set profile configuration keys")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Set profile configuration keys
For backward compatibility, a single configuration key may still be set with:
lxc profile set [<remote>:]<profile> <key> <value>`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileSet) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, -1)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing profile name"))
}
// Get the profile
profile, etag, err := resource.server.GetProfile(resource.name)
if err != nil {
return err
}
// Set the configuration key
keys, err := getConfig(args[1:]...)
if err != nil {
return err
}
for k, v := range keys {
profile.Config[k] = v
}
return resource.server.UpdateProfile(resource.name, profile.Writable(), etag)
}
// Show
type cmdProfileShow struct {
global *cmdGlobal
profile *cmdProfile
}
func (c *cmdProfileShow) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("show", i18n.G("[<remote>:]<profile>"))
cmd.Short = i18n.G("Show profile configurations")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Show profile configurations`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileShow) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 1, 1)
if exit {
return err
}
// Parse remote
resources, err := c.global.ParseServers(args[0])
if err != nil {
return err
}
resource := resources[0]
if resource.name == "" {
return fmt.Errorf(i18n.G("Missing profile name"))
}
// Show the profile
profile, _, err := resource.server.GetProfile(resource.name)
if err != nil {
return err
}
data, err := yaml.Marshal(&profile)
if err != nil {
return err
}
fmt.Printf("%s", data)
return nil
}
// Unset
type cmdProfileUnset struct {
global *cmdGlobal
profile *cmdProfile
profileSet *cmdProfileSet
}
func (c *cmdProfileUnset) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("unset", i18n.G("[<remote>:]<profile> <key>"))
cmd.Short = i18n.G("Unset profile configuration keys")
cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(
`Unset profile configuration keys`))
cmd.RunE = c.Run
return cmd
}
func (c *cmdProfileUnset) Run(cmd *cobra.Command, args []string) error {
// Quick checks.
exit, err := c.global.CheckArgs(cmd, args, 2, 2)
if exit {
return err
}
args = append(args, "")
return c.profileSet.Run(cmd, args)
}
|
package main
import (
"fmt"
"io/ioutil"
"os"
"sort"
"strconv"
"strings"
"syscall"
"github.com/olekukonko/tablewriter"
"gopkg.in/yaml.v2"
"github.com/lxc/lxd"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/i18n"
"github.com/lxc/lxd/shared/termios"
)
type byNameAndType [][]string
func (a byNameAndType) Len() int {
return len(a)
}
func (a byNameAndType) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a byNameAndType) Less(i, j int) bool {
if a[i][0] != a[j][0] {
return a[i][0] < a[j][0]
}
if a[i][1] == "" {
return false
}
if a[j][1] == "" {
return true
}
return a[i][1] < a[j][1]
}
type storageCmd struct {
}
func (c *storageCmd) showByDefault() bool {
return true
}
func (c *storageCmd) storagePoolEditHelp() string {
return i18n.G(
`### This is a yaml representation of a storage pool.
### Any line starting with a '# will be ignored.
###
### A storage pool consists of a set of configuration items.
###
### An example would look like:
### name: default
### driver: zfs
### used_by: []
### config:
### size: "61203283968"
### source: /home/chb/mnt/lxd_test/default.img
### zfs.pool_name: default`)
}
func (c *storageCmd) storagePoolVolumeEditHelp() string {
return i18n.G(
`### This is a yaml representation of a storage volume.
### Any line starting with a '# will be ignored.
###
### A storage volume consists of a set of configuration items.
###
### name: vol1
### type: custom
### used_by: []
### config:
### size: "61203283968"`)
}
func (c *storageCmd) usage() string {
return i18n.G(
`Manage storage.
lxc storage list [<remote>:] List available storage pools.
lxc storage show [<remote>:]<pool> Show details of a storage pool.
lxc storage create [<remote>:]<pool> <driver> [key=value]... Create a storage pool.
lxc storage get [<remote>:]<pool> <key> Get storage pool configuration.
lxc storage set [<remote>:]<pool> <key> <value> Set storage pool configuration.
lxc storage unset [<remote>:]<pool> <key> Unset storage pool configuration.
lxc storage delete [<remote>:]<pool> Delete a storage pool.
lxc storage edit [<remote>:]<pool>
Edit storage pool, either by launching external editor or reading STDIN.
Example: lxc storage edit [<remote>:]<pool> # launch editor
cat pool.yaml | lxc storage edit [<remote>:]<pool> # read from pool.yaml
lxc storage volume list [<remote>:]<pool> List available storage volumes on a storage pool.
lxc storage volume show [<remote>:]<pool> <volume> Show details of a storage volume on a storage pool.
lxc storage volume create [<remote>:]<pool> <volume> [key=value]... Create a storage volume on a storage pool.
lxc storage volume get [<remote>:]<pool> <volume> <key> Get storage volume configuration on a storage pool.
lxc storage volume set [<remote>:]<pool> <volume> <key> <value> Set storage volume configuration on a storage pool.
lxc storage volume unset [<remote>:]<pool> <volume> <key> Unset storage volume configuration on a storage pool.
lxc storage volume delete [<remote>:]<pool> <volume> Delete a storage volume on a storage pool.
lxc storage volume edit [<remote>:]<pool> <volume>
Edit storage pool, either by launching external editor or reading STDIN.
Example: lxc storage volume edit [<remote>:]<pool> <volume> # launch editor
cat pool.yaml | lxc storage volume edit [<remote>:]<pool> <volume> # read from pool.yaml
lxc storage volume attach [<remote>:]<pool> <volume> <container> [device name] <path>
lxc storage volume attach-profile [<remote:>]<pool> <volume> <profile> [device name] <path>
lxc storage volume detach [<remote>:]<pool> <volume> <container> [device name]
lxc storage volume detach-profile [<remote:>]<pool> <volume> <profile> [device name]
Unless specified through a prefix, all volume operations affect "custom" (user created) volumes.
Examples:
To show the properties of a custom volume called "data" in the "default" pool:
lxc storage volume show default data
To show the properties of the filesystem for a container called "data" in the "default" pool:
lxc storage volume show default container/data
`)
}
func (c *storageCmd) flags() {}
func (c *storageCmd) run(config *lxd.Config, args []string) error {
if len(args) < 1 {
return errArgs
}
if args[0] == "list" {
return c.doStoragePoolsList(config, args)
}
if len(args) < 2 {
return errArgs
}
remote, sub := config.ParseRemoteAndContainer(args[1])
client, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
if args[0] == "volume" {
switch args[1] {
case "attach":
if len(args) < 5 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeAttach(client, pool, volume, args[4:])
case "attach-profile":
if len(args) < 5 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeAttachProfile(client, pool, volume, args[4:])
case "create":
if len(args) < 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeCreate(client, pool, volume, args[4:])
case "delete":
if len(args) != 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeDelete(client, pool, volume)
case "detach":
if len(args) < 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeDetach(client, pool, volume, args[4:])
case "detach-profile":
if len(args) < 5 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeDetachProfile(client, pool, volume, args[4:])
case "edit":
if len(args) != 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeEdit(client, pool, volume)
case "get":
if len(args) < 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeGet(client, pool, volume, args[3:])
case "list":
if len(args) != 3 {
return errArgs
}
pool := args[2]
return c.doStoragePoolVolumesList(config, remote, pool, args)
case "set":
if len(args) < 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeSet(client, pool, volume, args[3:])
case "unset":
if len(args) < 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeSet(client, pool, volume, args[3:])
case "show":
if len(args) != 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeShow(client, pool, volume)
default:
return errArgs
}
} else {
pool := sub
switch args[0] {
case "create":
if len(args) < 3 {
return errArgs
}
driver := args[2]
return c.doStoragePoolCreate(client, pool, driver, args[3:])
case "delete":
return c.doStoragePoolDelete(client, pool)
case "edit":
return c.doStoragePoolEdit(client, pool)
case "get":
if len(args) < 2 {
return errArgs
}
return c.doStoragePoolGet(client, pool, args[2:])
case "set":
if len(args) < 2 {
return errArgs
}
return c.doStoragePoolSet(client, pool, args[2:])
case "unset":
if len(args) < 2 {
return errArgs
}
return c.doStoragePoolSet(client, pool, args[2:])
case "show":
if len(args) < 2 {
return errArgs
}
return c.doStoragePoolShow(client, pool)
default:
return errArgs
}
}
}
func (c *storageCmd) parseVolume(name string) (string, string) {
defaultType := "custom"
fields := strings.SplitN(name, "/", 2)
if len(fields) == 1 {
return fields[0], defaultType
}
return fields[1], fields[0]
}
func (c *storageCmd) doStoragePoolVolumeAttach(client *lxd.Client, pool string, volume string, args []string) error {
if len(args) < 2 || len(args) > 3 {
return errArgs
}
container := args[0]
devPath := ""
devName := ""
if len(args) == 2 {
// Only the path has been given to us.
devPath = args[1]
devName = volume
} else if len(args) == 3 {
// Path and device name have been given to us.
devName = args[1]
devPath = args[2]
}
volName, volType := c.parseVolume(volume)
if volType != "custom" {
return fmt.Errorf(i18n.G("Only \"custom\" volumes can be attached to containers."))
}
// Check if the requested storage volume actually
// exists on the requested storage pool.
vol, err := client.StoragePoolVolumeTypeGet(pool, volName, volType)
if err != nil {
return err
}
props := []string{fmt.Sprintf("pool=%s", pool), fmt.Sprintf("path=%s", devPath), fmt.Sprintf("source=%s", vol.Name)}
resp, err := client.ContainerDeviceAdd(container, devName, "disk", props)
if err != nil {
return err
}
return client.WaitForSuccess(resp.Operation)
}
func (c *storageCmd) doStoragePoolVolumeDetach(client *lxd.Client, pool string, volume string, args []string) error {
if len(args) < 1 || len(args) > 2 {
return errArgs
}
containerName := args[0]
devName := ""
if len(args) == 2 {
devName = args[1]
}
container, err := client.ContainerInfo(containerName)
if err != nil {
return err
}
if devName == "" {
for n, d := range container.Devices {
if d["type"] == "disk" && d["pool"] == pool && d["source"] == volume {
if devName != "" {
return fmt.Errorf(i18n.G("More than one device matches, specify the device name."))
}
devName = n
}
}
}
if devName == "" {
return fmt.Errorf(i18n.G("No device found for this storage volume."))
}
_, ok := container.Devices[devName]
if !ok {
return fmt.Errorf(i18n.G("The specified device doesn't exist"))
}
resp, err := client.ContainerDeviceDelete(containerName, devName)
if err != nil {
return err
}
return client.WaitForSuccess(resp.Operation)
}
func (c *storageCmd) doStoragePoolVolumeAttachProfile(client *lxd.Client, pool string, volume string, args []string) error {
if len(args) < 2 || len(args) > 3 {
return errArgs
}
profile := args[0]
devPath := ""
devName := ""
if len(args) == 2 {
// Only the path has been given to us.
devPath = args[1]
devName = volume
} else if len(args) == 3 {
// Path and device name have been given to us.
devName = args[1]
devPath = args[2]
}
volName, volType := c.parseVolume(volume)
if volType != "custom" {
return fmt.Errorf(i18n.G("Only \"custom\" volumes can be attached to containers."))
}
// Check if the requested storage volume actually
// exists on the requested storage pool.
vol, err := client.StoragePoolVolumeTypeGet(pool, volName, volType)
if err != nil {
return err
}
props := []string{fmt.Sprintf("pool=%s", pool), fmt.Sprintf("path=%s", devPath), fmt.Sprintf("source=%s", vol.Name)}
_, err = client.ProfileDeviceAdd(profile, devName, "disk", props)
return err
}
func (c *storageCmd) doStoragePoolCreate(client *lxd.Client, name string, driver string, args []string) error {
config := map[string]string{}
for i := 0; i < len(args); i++ {
entry := strings.SplitN(args[i], "=", 2)
if len(entry) < 2 {
return errArgs
}
config[entry[0]] = entry[1]
}
err := client.StoragePoolCreate(name, driver, config)
if err == nil {
fmt.Printf(i18n.G("Storage pool %s created")+"\n", name)
}
return err
}
func (c *storageCmd) doStoragePoolVolumeDetachProfile(client *lxd.Client, pool string, volume string, args []string) error {
if len(args) < 1 || len(args) > 2 {
return errArgs
}
profileName := args[0]
devName := ""
if len(args) > 1 {
devName = args[1]
}
profile, err := client.ProfileConfig(profileName)
if err != nil {
return err
}
if devName == "" {
for n, d := range profile.Devices {
if d["type"] == "disk" && d["pool"] == pool && d["source"] == volume {
if devName != "" {
return fmt.Errorf(i18n.G("More than one device matches, specify the device name."))
}
devName = n
}
}
}
if devName == "" {
return fmt.Errorf(i18n.G("No device found for this storage volume."))
}
_, ok := profile.Devices[devName]
if !ok {
return fmt.Errorf(i18n.G("The specified device doesn't exist"))
}
_, err = client.ProfileDeviceDelete(profileName, devName)
return err
}
func (c *storageCmd) doStoragePoolDelete(client *lxd.Client, name string) error {
err := client.StoragePoolDelete(name)
if err == nil {
fmt.Printf(i18n.G("Storage pool %s deleted")+"\n", name)
}
return err
}
func (c *storageCmd) doStoragePoolEdit(client *lxd.Client, name string) error {
// If stdin isn't a terminal, read text from it
if !termios.IsTerminal(int(syscall.Stdin)) {
contents, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
newdata := api.StoragePool{}
err = yaml.Unmarshal(contents, &newdata)
if err != nil {
return err
}
return client.StoragePoolPut(name, newdata)
}
// Extract the current value
pool, err := client.StoragePoolGet(name)
if err != nil {
return err
}
data, err := yaml.Marshal(&pool)
if err != nil {
return err
}
// Spawn the editor
content, err := shared.TextEditor("", []byte(c.storagePoolEditHelp()+"\n\n"+string(data)))
if err != nil {
return err
}
for {
// Parse the text received from the editor
newdata := api.StoragePool{}
err = yaml.Unmarshal(content, &newdata)
if err == nil {
err = client.StoragePoolPut(name, newdata)
}
// Respawn the editor
if err != nil {
fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err)
fmt.Println(i18n.G("Press enter to open the editor again"))
_, err := os.Stdin.Read(make([]byte, 1))
if err != nil {
return err
}
content, err = shared.TextEditor("", content)
if err != nil {
return err
}
continue
}
break
}
return nil
}
func (c *storageCmd) doStoragePoolGet(client *lxd.Client, name string, args []string) error {
// we shifted @args so so it should read "<key>"
if len(args) != 1 {
return errArgs
}
resp, err := client.StoragePoolGet(name)
if err != nil {
return err
}
for k, v := range resp.Config {
if k == args[0] {
fmt.Printf("%s\n", v)
}
}
return nil
}
func (c *storageCmd) doStoragePoolsList(config *lxd.Config, args []string) error {
var remote string
if len(args) > 1 {
var name string
remote, name = config.ParseRemoteAndContainer(args[1])
if name != "" {
return fmt.Errorf(i18n.G("Cannot provide container name to list"))
}
} else {
remote = config.DefaultRemote
}
client, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
pools, err := client.ListStoragePools()
if err != nil {
return err
}
data := [][]string{}
for _, pool := range pools {
usedby := strconv.Itoa(len(pool.UsedBy))
data = append(data, []string{pool.Name, pool.Driver, pool.Config["source"], usedby})
}
table := tablewriter.NewWriter(os.Stdout)
table.SetAutoWrapText(false)
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetRowLine(true)
table.SetHeader([]string{
i18n.G("NAME"),
i18n.G("DRIVER"),
i18n.G("SOURCE"),
i18n.G("USED BY")})
sort.Sort(byName(data))
table.AppendBulk(data)
table.Render()
return nil
}
func (c *storageCmd) doStoragePoolSet(client *lxd.Client, name string, args []string) error {
// we shifted @args so so it should read "<key> [<value>]"
if len(args) < 1 {
return errArgs
}
pool, err := client.StoragePoolGet(name)
if err != nil {
return err
}
key := args[0]
var value string
if len(args) < 2 {
value = ""
} else {
value = args[1]
}
if !termios.IsTerminal(int(syscall.Stdin)) && value == "-" {
buf, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return fmt.Errorf("Can't read from stdin: %s", err)
}
value = string(buf[:])
}
pool.Config[key] = value
return client.StoragePoolPut(name, pool)
}
func (c *storageCmd) doStoragePoolShow(client *lxd.Client, name string) error {
pool, err := client.StoragePoolGet(name)
if err != nil {
return err
}
sz, err := strconv.ParseUint(pool.Config["size"], 10, 64)
if err == nil {
pool.Config["size"] = shared.GetByteSizeString(int64(sz), 0)
}
sort.Strings(pool.UsedBy)
data, err := yaml.Marshal(&pool)
fmt.Printf("%s", data)
return nil
}
func (c *storageCmd) doStoragePoolVolumesList(config *lxd.Config, remote string, pool string, args []string) error {
client, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
volumes, err := client.StoragePoolVolumesList(pool)
if err != nil {
return err
}
data := [][]string{}
for _, volume := range volumes {
usedby := strconv.Itoa(len(volume.UsedBy))
data = append(data, []string{volume.Type, volume.Name, usedby})
}
table := tablewriter.NewWriter(os.Stdout)
table.SetAutoWrapText(false)
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetRowLine(true)
table.SetHeader([]string{
i18n.G("TYPE"),
i18n.G("NAME"),
i18n.G("USED BY")})
sort.Sort(byNameAndType(data))
table.AppendBulk(data)
table.Render()
return nil
}
func (c *storageCmd) doStoragePoolVolumeCreate(client *lxd.Client, pool string, volume string, args []string) error {
config := map[string]string{}
for i := 0; i < len(args); i++ {
entry := strings.SplitN(args[i], "=", 2)
if len(entry) < 2 {
return errArgs
}
config[entry[0]] = entry[1]
}
volName, volType := c.parseVolume(volume)
err := client.StoragePoolVolumeTypeCreate(pool, volName, volType, config)
if err == nil {
fmt.Printf(i18n.G("Storage volume %s created")+"\n", volume)
}
return err
}
func (c *storageCmd) doStoragePoolVolumeDelete(client *lxd.Client, pool string, volume string) error {
volName, volType := c.parseVolume(volume)
err := client.StoragePoolVolumeTypeDelete(pool, volName, volType)
if err == nil {
fmt.Printf(i18n.G("Storage volume %s deleted")+"\n", volume)
}
return err
}
func (c *storageCmd) doStoragePoolVolumeGet(client *lxd.Client, pool string, volume string, args []string) error {
// we shifted @args so so it should read "<key>"
if len(args) != 2 {
return errArgs
}
volName, volType := c.parseVolume(volume)
resp, err := client.StoragePoolVolumeTypeGet(pool, volName, volType)
if err != nil {
return err
}
for k, v := range resp.Config {
if k == args[1] {
fmt.Printf("%s\n", v)
}
}
return nil
}
func (c *storageCmd) doStoragePoolVolumeSet(client *lxd.Client, pool string, volume string, args []string) error {
// we shifted @args so so it should read "<key> [<value>]"
if len(args) < 2 {
return errArgs
}
volName, volType := c.parseVolume(volume)
volumeConfig, err := client.StoragePoolVolumeTypeGet(pool, volName, volType)
if err != nil {
return err
}
key := args[1]
var value string
if len(args) < 3 {
value = ""
} else {
value = args[2]
}
if !termios.IsTerminal(int(syscall.Stdin)) && value == "-" {
buf, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return fmt.Errorf("Can't read from stdin: %s", err)
}
value = string(buf[:])
}
volumeConfig.Config[key] = value
return client.StoragePoolVolumeTypePut(pool, volName, volType, volumeConfig)
}
func (c *storageCmd) doStoragePoolVolumeShow(client *lxd.Client, pool string, volume string) error {
volName, volType := c.parseVolume(volume)
volumeStruct, err := client.StoragePoolVolumeTypeGet(pool, volName, volType)
if err != nil {
return err
}
sz, err := strconv.ParseUint(volumeStruct.Config["size"], 10, 64)
if err == nil {
volumeStruct.Config["size"] = shared.GetByteSizeString(int64(sz), 0)
}
sort.Strings(volumeStruct.UsedBy)
data, err := yaml.Marshal(&volumeStruct)
fmt.Printf("%s", data)
return nil
}
func (c *storageCmd) doStoragePoolVolumeEdit(client *lxd.Client, pool string, volume string) error {
volName, volType := c.parseVolume(volume)
// If stdin isn't a terminal, read text from it
if !termios.IsTerminal(int(syscall.Stdin)) {
contents, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
newdata := api.StorageVolume{}
err = yaml.Unmarshal(contents, &newdata)
if err != nil {
return err
}
return client.StoragePoolVolumeTypePut(pool, volName, volType, newdata)
}
// Extract the current value
vol, err := client.StoragePoolVolumeTypeGet(pool, volName, volType)
if err != nil {
return err
}
data, err := yaml.Marshal(&vol)
if err != nil {
return err
}
// Spawn the editor
content, err := shared.TextEditor("", []byte(c.storagePoolVolumeEditHelp()+"\n\n"+string(data)))
if err != nil {
return err
}
for {
// Parse the text received from the editor
newdata := api.StorageVolume{}
err = yaml.Unmarshal(content, &newdata)
if err == nil {
err = client.StoragePoolVolumeTypePut(pool, volName, volType, newdata)
}
// Respawn the editor
if err != nil {
fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err)
fmt.Println(i18n.G("Press enter to open the editor again"))
_, err := os.Stdin.Read(make([]byte, 1))
if err != nil {
return err
}
content, err = shared.TextEditor("", content)
if err != nil {
return err
}
continue
}
break
}
return nil
}
lxc/storage: make volume.size user friendly
Signed-off-by: Christian Brauner <48455ab3070520a2d174545c7239d6d0fabd9a83@ubuntu.com>
package main
import (
"fmt"
"io/ioutil"
"os"
"sort"
"strconv"
"strings"
"syscall"
"github.com/olekukonko/tablewriter"
"gopkg.in/yaml.v2"
"github.com/lxc/lxd"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/i18n"
"github.com/lxc/lxd/shared/termios"
)
type byNameAndType [][]string
func (a byNameAndType) Len() int {
return len(a)
}
func (a byNameAndType) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a byNameAndType) Less(i, j int) bool {
if a[i][0] != a[j][0] {
return a[i][0] < a[j][0]
}
if a[i][1] == "" {
return false
}
if a[j][1] == "" {
return true
}
return a[i][1] < a[j][1]
}
type storageCmd struct {
}
func (c *storageCmd) showByDefault() bool {
return true
}
func (c *storageCmd) storagePoolEditHelp() string {
return i18n.G(
`### This is a yaml representation of a storage pool.
### Any line starting with a '# will be ignored.
###
### A storage pool consists of a set of configuration items.
###
### An example would look like:
### name: default
### driver: zfs
### used_by: []
### config:
### size: "61203283968"
### source: /home/chb/mnt/lxd_test/default.img
### zfs.pool_name: default`)
}
func (c *storageCmd) storagePoolVolumeEditHelp() string {
return i18n.G(
`### This is a yaml representation of a storage volume.
### Any line starting with a '# will be ignored.
###
### A storage volume consists of a set of configuration items.
###
### name: vol1
### type: custom
### used_by: []
### config:
### size: "61203283968"`)
}
func (c *storageCmd) usage() string {
return i18n.G(
`Manage storage.
lxc storage list [<remote>:] List available storage pools.
lxc storage show [<remote>:]<pool> Show details of a storage pool.
lxc storage create [<remote>:]<pool> <driver> [key=value]... Create a storage pool.
lxc storage get [<remote>:]<pool> <key> Get storage pool configuration.
lxc storage set [<remote>:]<pool> <key> <value> Set storage pool configuration.
lxc storage unset [<remote>:]<pool> <key> Unset storage pool configuration.
lxc storage delete [<remote>:]<pool> Delete a storage pool.
lxc storage edit [<remote>:]<pool>
Edit storage pool, either by launching external editor or reading STDIN.
Example: lxc storage edit [<remote>:]<pool> # launch editor
cat pool.yaml | lxc storage edit [<remote>:]<pool> # read from pool.yaml
lxc storage volume list [<remote>:]<pool> List available storage volumes on a storage pool.
lxc storage volume show [<remote>:]<pool> <volume> Show details of a storage volume on a storage pool.
lxc storage volume create [<remote>:]<pool> <volume> [key=value]... Create a storage volume on a storage pool.
lxc storage volume get [<remote>:]<pool> <volume> <key> Get storage volume configuration on a storage pool.
lxc storage volume set [<remote>:]<pool> <volume> <key> <value> Set storage volume configuration on a storage pool.
lxc storage volume unset [<remote>:]<pool> <volume> <key> Unset storage volume configuration on a storage pool.
lxc storage volume delete [<remote>:]<pool> <volume> Delete a storage volume on a storage pool.
lxc storage volume edit [<remote>:]<pool> <volume>
Edit storage pool, either by launching external editor or reading STDIN.
Example: lxc storage volume edit [<remote>:]<pool> <volume> # launch editor
cat pool.yaml | lxc storage volume edit [<remote>:]<pool> <volume> # read from pool.yaml
lxc storage volume attach [<remote>:]<pool> <volume> <container> [device name] <path>
lxc storage volume attach-profile [<remote:>]<pool> <volume> <profile> [device name] <path>
lxc storage volume detach [<remote>:]<pool> <volume> <container> [device name]
lxc storage volume detach-profile [<remote:>]<pool> <volume> <profile> [device name]
Unless specified through a prefix, all volume operations affect "custom" (user created) volumes.
Examples:
To show the properties of a custom volume called "data" in the "default" pool:
lxc storage volume show default data
To show the properties of the filesystem for a container called "data" in the "default" pool:
lxc storage volume show default container/data
`)
}
func (c *storageCmd) flags() {}
func (c *storageCmd) run(config *lxd.Config, args []string) error {
if len(args) < 1 {
return errArgs
}
if args[0] == "list" {
return c.doStoragePoolsList(config, args)
}
if len(args) < 2 {
return errArgs
}
remote, sub := config.ParseRemoteAndContainer(args[1])
client, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
if args[0] == "volume" {
switch args[1] {
case "attach":
if len(args) < 5 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeAttach(client, pool, volume, args[4:])
case "attach-profile":
if len(args) < 5 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeAttachProfile(client, pool, volume, args[4:])
case "create":
if len(args) < 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeCreate(client, pool, volume, args[4:])
case "delete":
if len(args) != 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeDelete(client, pool, volume)
case "detach":
if len(args) < 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeDetach(client, pool, volume, args[4:])
case "detach-profile":
if len(args) < 5 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeDetachProfile(client, pool, volume, args[4:])
case "edit":
if len(args) != 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeEdit(client, pool, volume)
case "get":
if len(args) < 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeGet(client, pool, volume, args[3:])
case "list":
if len(args) != 3 {
return errArgs
}
pool := args[2]
return c.doStoragePoolVolumesList(config, remote, pool, args)
case "set":
if len(args) < 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeSet(client, pool, volume, args[3:])
case "unset":
if len(args) < 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeSet(client, pool, volume, args[3:])
case "show":
if len(args) != 4 {
return errArgs
}
pool := args[2]
volume := args[3]
return c.doStoragePoolVolumeShow(client, pool, volume)
default:
return errArgs
}
} else {
pool := sub
switch args[0] {
case "create":
if len(args) < 3 {
return errArgs
}
driver := args[2]
return c.doStoragePoolCreate(client, pool, driver, args[3:])
case "delete":
return c.doStoragePoolDelete(client, pool)
case "edit":
return c.doStoragePoolEdit(client, pool)
case "get":
if len(args) < 2 {
return errArgs
}
return c.doStoragePoolGet(client, pool, args[2:])
case "set":
if len(args) < 2 {
return errArgs
}
return c.doStoragePoolSet(client, pool, args[2:])
case "unset":
if len(args) < 2 {
return errArgs
}
return c.doStoragePoolSet(client, pool, args[2:])
case "show":
if len(args) < 2 {
return errArgs
}
return c.doStoragePoolShow(client, pool)
default:
return errArgs
}
}
}
func (c *storageCmd) parseVolume(name string) (string, string) {
defaultType := "custom"
fields := strings.SplitN(name, "/", 2)
if len(fields) == 1 {
return fields[0], defaultType
}
return fields[1], fields[0]
}
func (c *storageCmd) doStoragePoolVolumeAttach(client *lxd.Client, pool string, volume string, args []string) error {
if len(args) < 2 || len(args) > 3 {
return errArgs
}
container := args[0]
devPath := ""
devName := ""
if len(args) == 2 {
// Only the path has been given to us.
devPath = args[1]
devName = volume
} else if len(args) == 3 {
// Path and device name have been given to us.
devName = args[1]
devPath = args[2]
}
volName, volType := c.parseVolume(volume)
if volType != "custom" {
return fmt.Errorf(i18n.G("Only \"custom\" volumes can be attached to containers."))
}
// Check if the requested storage volume actually
// exists on the requested storage pool.
vol, err := client.StoragePoolVolumeTypeGet(pool, volName, volType)
if err != nil {
return err
}
props := []string{fmt.Sprintf("pool=%s", pool), fmt.Sprintf("path=%s", devPath), fmt.Sprintf("source=%s", vol.Name)}
resp, err := client.ContainerDeviceAdd(container, devName, "disk", props)
if err != nil {
return err
}
return client.WaitForSuccess(resp.Operation)
}
func (c *storageCmd) doStoragePoolVolumeDetach(client *lxd.Client, pool string, volume string, args []string) error {
if len(args) < 1 || len(args) > 2 {
return errArgs
}
containerName := args[0]
devName := ""
if len(args) == 2 {
devName = args[1]
}
container, err := client.ContainerInfo(containerName)
if err != nil {
return err
}
if devName == "" {
for n, d := range container.Devices {
if d["type"] == "disk" && d["pool"] == pool && d["source"] == volume {
if devName != "" {
return fmt.Errorf(i18n.G("More than one device matches, specify the device name."))
}
devName = n
}
}
}
if devName == "" {
return fmt.Errorf(i18n.G("No device found for this storage volume."))
}
_, ok := container.Devices[devName]
if !ok {
return fmt.Errorf(i18n.G("The specified device doesn't exist"))
}
resp, err := client.ContainerDeviceDelete(containerName, devName)
if err != nil {
return err
}
return client.WaitForSuccess(resp.Operation)
}
func (c *storageCmd) doStoragePoolVolumeAttachProfile(client *lxd.Client, pool string, volume string, args []string) error {
if len(args) < 2 || len(args) > 3 {
return errArgs
}
profile := args[0]
devPath := ""
devName := ""
if len(args) == 2 {
// Only the path has been given to us.
devPath = args[1]
devName = volume
} else if len(args) == 3 {
// Path and device name have been given to us.
devName = args[1]
devPath = args[2]
}
volName, volType := c.parseVolume(volume)
if volType != "custom" {
return fmt.Errorf(i18n.G("Only \"custom\" volumes can be attached to containers."))
}
// Check if the requested storage volume actually
// exists on the requested storage pool.
vol, err := client.StoragePoolVolumeTypeGet(pool, volName, volType)
if err != nil {
return err
}
props := []string{fmt.Sprintf("pool=%s", pool), fmt.Sprintf("path=%s", devPath), fmt.Sprintf("source=%s", vol.Name)}
_, err = client.ProfileDeviceAdd(profile, devName, "disk", props)
return err
}
func (c *storageCmd) doStoragePoolCreate(client *lxd.Client, name string, driver string, args []string) error {
config := map[string]string{}
for i := 0; i < len(args); i++ {
entry := strings.SplitN(args[i], "=", 2)
if len(entry) < 2 {
return errArgs
}
config[entry[0]] = entry[1]
}
err := client.StoragePoolCreate(name, driver, config)
if err == nil {
fmt.Printf(i18n.G("Storage pool %s created")+"\n", name)
}
return err
}
func (c *storageCmd) doStoragePoolVolumeDetachProfile(client *lxd.Client, pool string, volume string, args []string) error {
if len(args) < 1 || len(args) > 2 {
return errArgs
}
profileName := args[0]
devName := ""
if len(args) > 1 {
devName = args[1]
}
profile, err := client.ProfileConfig(profileName)
if err != nil {
return err
}
if devName == "" {
for n, d := range profile.Devices {
if d["type"] == "disk" && d["pool"] == pool && d["source"] == volume {
if devName != "" {
return fmt.Errorf(i18n.G("More than one device matches, specify the device name."))
}
devName = n
}
}
}
if devName == "" {
return fmt.Errorf(i18n.G("No device found for this storage volume."))
}
_, ok := profile.Devices[devName]
if !ok {
return fmt.Errorf(i18n.G("The specified device doesn't exist"))
}
_, err = client.ProfileDeviceDelete(profileName, devName)
return err
}
func (c *storageCmd) doStoragePoolDelete(client *lxd.Client, name string) error {
err := client.StoragePoolDelete(name)
if err == nil {
fmt.Printf(i18n.G("Storage pool %s deleted")+"\n", name)
}
return err
}
func (c *storageCmd) doStoragePoolEdit(client *lxd.Client, name string) error {
// If stdin isn't a terminal, read text from it
if !termios.IsTerminal(int(syscall.Stdin)) {
contents, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
newdata := api.StoragePool{}
err = yaml.Unmarshal(contents, &newdata)
if err != nil {
return err
}
return client.StoragePoolPut(name, newdata)
}
// Extract the current value
pool, err := client.StoragePoolGet(name)
if err != nil {
return err
}
data, err := yaml.Marshal(&pool)
if err != nil {
return err
}
// Spawn the editor
content, err := shared.TextEditor("", []byte(c.storagePoolEditHelp()+"\n\n"+string(data)))
if err != nil {
return err
}
for {
// Parse the text received from the editor
newdata := api.StoragePool{}
err = yaml.Unmarshal(content, &newdata)
if err == nil {
err = client.StoragePoolPut(name, newdata)
}
// Respawn the editor
if err != nil {
fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err)
fmt.Println(i18n.G("Press enter to open the editor again"))
_, err := os.Stdin.Read(make([]byte, 1))
if err != nil {
return err
}
content, err = shared.TextEditor("", content)
if err != nil {
return err
}
continue
}
break
}
return nil
}
func (c *storageCmd) doStoragePoolGet(client *lxd.Client, name string, args []string) error {
// we shifted @args so so it should read "<key>"
if len(args) != 1 {
return errArgs
}
resp, err := client.StoragePoolGet(name)
if err != nil {
return err
}
for k, v := range resp.Config {
if k == args[0] {
fmt.Printf("%s\n", v)
}
}
return nil
}
func (c *storageCmd) doStoragePoolsList(config *lxd.Config, args []string) error {
var remote string
if len(args) > 1 {
var name string
remote, name = config.ParseRemoteAndContainer(args[1])
if name != "" {
return fmt.Errorf(i18n.G("Cannot provide container name to list"))
}
} else {
remote = config.DefaultRemote
}
client, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
pools, err := client.ListStoragePools()
if err != nil {
return err
}
data := [][]string{}
for _, pool := range pools {
usedby := strconv.Itoa(len(pool.UsedBy))
data = append(data, []string{pool.Name, pool.Driver, pool.Config["source"], usedby})
}
table := tablewriter.NewWriter(os.Stdout)
table.SetAutoWrapText(false)
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetRowLine(true)
table.SetHeader([]string{
i18n.G("NAME"),
i18n.G("DRIVER"),
i18n.G("SOURCE"),
i18n.G("USED BY")})
sort.Sort(byName(data))
table.AppendBulk(data)
table.Render()
return nil
}
func (c *storageCmd) doStoragePoolSet(client *lxd.Client, name string, args []string) error {
// we shifted @args so so it should read "<key> [<value>]"
if len(args) < 1 {
return errArgs
}
pool, err := client.StoragePoolGet(name)
if err != nil {
return err
}
key := args[0]
var value string
if len(args) < 2 {
value = ""
} else {
value = args[1]
}
if !termios.IsTerminal(int(syscall.Stdin)) && value == "-" {
buf, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return fmt.Errorf("Can't read from stdin: %s", err)
}
value = string(buf[:])
}
pool.Config[key] = value
return client.StoragePoolPut(name, pool)
}
func (c *storageCmd) doStoragePoolShow(client *lxd.Client, name string) error {
pool, err := client.StoragePoolGet(name)
if err != nil {
return err
}
sz, err := strconv.ParseUint(pool.Config["size"], 10, 64)
if err == nil {
pool.Config["size"] = shared.GetByteSizeString(int64(sz), 0)
}
sz, err = strconv.ParseUint(pool.Config["volume.size"], 10, 64)
if err == nil {
pool.Config["volume.size"] = shared.GetByteSizeString(int64(sz), 0)
}
sort.Strings(pool.UsedBy)
data, err := yaml.Marshal(&pool)
fmt.Printf("%s", data)
return nil
}
func (c *storageCmd) doStoragePoolVolumesList(config *lxd.Config, remote string, pool string, args []string) error {
client, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
volumes, err := client.StoragePoolVolumesList(pool)
if err != nil {
return err
}
data := [][]string{}
for _, volume := range volumes {
usedby := strconv.Itoa(len(volume.UsedBy))
data = append(data, []string{volume.Type, volume.Name, usedby})
}
table := tablewriter.NewWriter(os.Stdout)
table.SetAutoWrapText(false)
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetRowLine(true)
table.SetHeader([]string{
i18n.G("TYPE"),
i18n.G("NAME"),
i18n.G("USED BY")})
sort.Sort(byNameAndType(data))
table.AppendBulk(data)
table.Render()
return nil
}
func (c *storageCmd) doStoragePoolVolumeCreate(client *lxd.Client, pool string, volume string, args []string) error {
config := map[string]string{}
for i := 0; i < len(args); i++ {
entry := strings.SplitN(args[i], "=", 2)
if len(entry) < 2 {
return errArgs
}
config[entry[0]] = entry[1]
}
volName, volType := c.parseVolume(volume)
err := client.StoragePoolVolumeTypeCreate(pool, volName, volType, config)
if err == nil {
fmt.Printf(i18n.G("Storage volume %s created")+"\n", volume)
}
return err
}
func (c *storageCmd) doStoragePoolVolumeDelete(client *lxd.Client, pool string, volume string) error {
volName, volType := c.parseVolume(volume)
err := client.StoragePoolVolumeTypeDelete(pool, volName, volType)
if err == nil {
fmt.Printf(i18n.G("Storage volume %s deleted")+"\n", volume)
}
return err
}
func (c *storageCmd) doStoragePoolVolumeGet(client *lxd.Client, pool string, volume string, args []string) error {
// we shifted @args so so it should read "<key>"
if len(args) != 2 {
return errArgs
}
volName, volType := c.parseVolume(volume)
resp, err := client.StoragePoolVolumeTypeGet(pool, volName, volType)
if err != nil {
return err
}
for k, v := range resp.Config {
if k == args[1] {
fmt.Printf("%s\n", v)
}
}
return nil
}
func (c *storageCmd) doStoragePoolVolumeSet(client *lxd.Client, pool string, volume string, args []string) error {
// we shifted @args so so it should read "<key> [<value>]"
if len(args) < 2 {
return errArgs
}
volName, volType := c.parseVolume(volume)
volumeConfig, err := client.StoragePoolVolumeTypeGet(pool, volName, volType)
if err != nil {
return err
}
key := args[1]
var value string
if len(args) < 3 {
value = ""
} else {
value = args[2]
}
if !termios.IsTerminal(int(syscall.Stdin)) && value == "-" {
buf, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return fmt.Errorf("Can't read from stdin: %s", err)
}
value = string(buf[:])
}
volumeConfig.Config[key] = value
return client.StoragePoolVolumeTypePut(pool, volName, volType, volumeConfig)
}
func (c *storageCmd) doStoragePoolVolumeShow(client *lxd.Client, pool string, volume string) error {
volName, volType := c.parseVolume(volume)
volumeStruct, err := client.StoragePoolVolumeTypeGet(pool, volName, volType)
if err != nil {
return err
}
sz, err := strconv.ParseUint(volumeStruct.Config["size"], 10, 64)
if err == nil {
volumeStruct.Config["size"] = shared.GetByteSizeString(int64(sz), 0)
}
sort.Strings(volumeStruct.UsedBy)
data, err := yaml.Marshal(&volumeStruct)
fmt.Printf("%s", data)
return nil
}
func (c *storageCmd) doStoragePoolVolumeEdit(client *lxd.Client, pool string, volume string) error {
volName, volType := c.parseVolume(volume)
// If stdin isn't a terminal, read text from it
if !termios.IsTerminal(int(syscall.Stdin)) {
contents, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
newdata := api.StorageVolume{}
err = yaml.Unmarshal(contents, &newdata)
if err != nil {
return err
}
return client.StoragePoolVolumeTypePut(pool, volName, volType, newdata)
}
// Extract the current value
vol, err := client.StoragePoolVolumeTypeGet(pool, volName, volType)
if err != nil {
return err
}
data, err := yaml.Marshal(&vol)
if err != nil {
return err
}
// Spawn the editor
content, err := shared.TextEditor("", []byte(c.storagePoolVolumeEditHelp()+"\n\n"+string(data)))
if err != nil {
return err
}
for {
// Parse the text received from the editor
newdata := api.StorageVolume{}
err = yaml.Unmarshal(content, &newdata)
if err == nil {
err = client.StoragePoolVolumeTypePut(pool, volName, volType, newdata)
}
// Respawn the editor
if err != nil {
fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err)
fmt.Println(i18n.G("Press enter to open the editor again"))
_, err := os.Stdin.Read(make([]byte, 1))
if err != nil {
return err
}
content, err = shared.TextEditor("", content)
if err != nil {
return err
}
continue
}
break
}
return nil
}
|
//go:build linux && cgo && !agent
// +build linux,cgo,!agent
package db
import (
"database/sql"
"fmt"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/lxc/lxd/lxd/db/cluster"
"github.com/lxc/lxd/lxd/db/query"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/osarch"
"github.com/lxc/lxd/shared/version"
)
// ClusterRole represents the role of a member in a cluster.
type ClusterRole string
// ClusterRoleDatabase represents the database role in a cluster.
const ClusterRoleDatabase = ClusterRole("database")
// ClusterRoles maps role ids into human-readable names.
//
// Note: the database role is currently stored directly in the raft
// configuration which acts as single source of truth for it. This map should
// only contain LXD-specific cluster roles.
var ClusterRoles = map[int]ClusterRole{}
// NodeInfo holds information about a single LXD instance in a cluster.
type NodeInfo struct {
ID int64 // Stable node identifier
Name string // User-assigned name of the node
Address string // Network address of the node
Description string // Node description (optional)
Schema int // Schema version of the LXD code running the node
APIExtensions int // Number of API extensions of the LXD code running on the node
Heartbeat time.Time // Timestamp of the last heartbeat
Roles []string // List of cluster roles
Architecture int // Node architecture
}
// IsOffline returns true if the last successful heartbeat time of the node is
// older than the given threshold.
func (n NodeInfo) IsOffline(threshold time.Duration) bool {
return nodeIsOffline(threshold, n.Heartbeat)
}
// ToAPI returns a LXD API entry.
func (n NodeInfo) ToAPI(cluster *Cluster, node *Node) (*api.ClusterMember, error) {
// Load some needed data.
var err error
var offlineThreshold time.Duration
var maxVersion [2]int
var failureDomain string
// From cluster database.
err = cluster.Transaction(func(tx *ClusterTx) error {
// Get offline threshold.
offlineThreshold, err = tx.GetNodeOfflineThreshold()
if err != nil {
return errors.Wrap(err, "Load offline threshold config")
}
// Get failure domains.
nodesDomains, err := tx.GetNodesFailureDomains()
if err != nil {
return errors.Wrap(err, "Load nodes failure domains")
}
domainsNames, err := tx.GetFailureDomainsNames()
if err != nil {
return errors.Wrap(err, "Load failure domains names")
}
domainID := nodesDomains[n.Address]
failureDomain = domainsNames[domainID]
// Get the highest schema and API versions.
maxVersion, err = tx.GetNodeMaxVersion()
if err != nil {
return errors.Wrap(err, "Get max version")
}
return nil
})
if err != nil {
return nil, err
}
// From local database.
var raftNode *RaftNode
err = node.Transaction(func(tx *NodeTx) error {
nodes, err := tx.GetRaftNodes()
if err != nil {
return errors.Wrap(err, "Load offline threshold config")
}
for _, node := range nodes {
if node.Address != n.Address {
continue
}
raftNode = &node
break
}
return nil
})
if err != nil {
return nil, err
}
// Fill in the struct.
result := api.ClusterMember{}
result.Description = n.Description
result.ServerName = n.Name
result.URL = fmt.Sprintf("https://%s", n.Address)
result.Database = raftNode != nil && raftNode.Role == RaftVoter
result.Roles = n.Roles
if result.Database {
result.Roles = append(result.Roles, string(ClusterRoleDatabase))
}
result.Architecture, err = osarch.ArchitectureName(n.Architecture)
if err != nil {
return nil, err
}
result.FailureDomain = failureDomain
if n.IsOffline(offlineThreshold) {
result.Status = "Offline"
result.Message = fmt.Sprintf("No heartbeat for %s (%s)", time.Now().Sub(n.Heartbeat), n.Heartbeat)
} else {
// Check if up to date.
n, err := util.CompareVersions(maxVersion, n.Version())
if err != nil {
return nil, err
}
if n == 1 {
result.Status = "Blocked"
result.Message = "Needs updating to newer version"
} else {
result.Status = "Online"
result.Message = "Fully operational"
}
}
return &result, nil
}
// Version returns the node's version, composed by its schema level and
// number of extensions.
func (n NodeInfo) Version() [2]int {
return [2]int{n.Schema, n.APIExtensions}
}
// GetNodeByAddress returns the node with the given network address.
func (c *ClusterTx) GetNodeByAddress(address string) (NodeInfo, error) {
null := NodeInfo{}
nodes, err := c.nodes(false /* not pending */, "address=?", address)
if err != nil {
return null, err
}
switch len(nodes) {
case 0:
return null, ErrNoSuchObject
case 1:
return nodes[0], nil
default:
return null, fmt.Errorf("more than one node matches")
}
}
// GetNodeMaxVersion returns the highest version possible on the cluster.
func (c *ClusterTx) GetNodeMaxVersion() ([2]int, error) {
version := [2]int{}
// Get the maximum DB schema.
var maxSchema int
row := c.tx.QueryRow("SELECT MAX(schema) FROM nodes")
err := row.Scan(&maxSchema)
if err != nil {
return version, err
}
// Get the maximum API extension.
var maxAPI int
row = c.tx.QueryRow("SELECT MAX(api_extensions) FROM nodes")
err = row.Scan(&maxAPI)
if err != nil {
return version, err
}
// Compute the combined version.
version = [2]int{maxSchema, maxAPI}
return version, nil
}
// GetNodeWithID returns the node with the given ID.
func (c *ClusterTx) GetNodeWithID(nodeID int) (NodeInfo, error) {
null := NodeInfo{}
nodes, err := c.nodes(false /* not pending */, "id=?", nodeID)
if err != nil {
return null, err
}
switch len(nodes) {
case 0:
return null, ErrNoSuchObject
case 1:
return nodes[0], nil
default:
return null, fmt.Errorf("more than one node matches")
}
}
// GetPendingNodeByAddress returns the pending node with the given network address.
func (c *ClusterTx) GetPendingNodeByAddress(address string) (NodeInfo, error) {
null := NodeInfo{}
nodes, err := c.nodes(true /*pending */, "address=?", address)
if err != nil {
return null, err
}
switch len(nodes) {
case 0:
return null, ErrNoSuchObject
case 1:
return nodes[0], nil
default:
return null, fmt.Errorf("more than one node matches")
}
}
// GetNodeByName returns the node with the given name.
func (c *ClusterTx) GetNodeByName(name string) (NodeInfo, error) {
null := NodeInfo{}
nodes, err := c.nodes(false /* not pending */, "name=?", name)
if err != nil {
return null, err
}
switch len(nodes) {
case 0:
return null, ErrNoSuchObject
case 1:
return nodes[0], nil
default:
return null, fmt.Errorf("more than one node matches")
}
}
// GetLocalNodeName returns the name of the node this method is invoked on.
func (c *ClusterTx) GetLocalNodeName() (string, error) {
stmt := "SELECT name FROM nodes WHERE id=?"
names, err := query.SelectStrings(c.tx, stmt, c.nodeID)
if err != nil {
return "", err
}
switch len(names) {
case 0:
return "", nil
case 1:
return names[0], nil
default:
return "", fmt.Errorf("inconsistency: non-unique node ID")
}
}
// GetLocalNodeAddress returns the address of the node this method is invoked on.
func (c *ClusterTx) GetLocalNodeAddress() (string, error) {
stmt := "SELECT address FROM nodes WHERE id=?"
addresses, err := query.SelectStrings(c.tx, stmt, c.nodeID)
if err != nil {
return "", err
}
switch len(addresses) {
case 0:
return "", nil
case 1:
return addresses[0], nil
default:
return "", fmt.Errorf("inconsistency: non-unique node ID")
}
}
// NodeIsOutdated returns true if there's some cluster node having an API or
// schema version greater than the node this method is invoked on.
func (c *ClusterTx) NodeIsOutdated() (bool, error) {
nodes, err := c.nodes(false /* not pending */, "")
if err != nil {
return false, errors.Wrap(err, "Failed to fetch nodes")
}
// Figure our own version.
version := [2]int{}
for _, node := range nodes {
if node.ID == c.nodeID {
version = node.Version()
}
}
if version[0] == 0 || version[1] == 0 {
return false, fmt.Errorf("Inconsistency: local node not found")
}
// Check if any of the other nodes is greater than us.
for _, node := range nodes {
if node.ID == c.nodeID {
continue
}
n, err := util.CompareVersions(node.Version(), version)
if err != nil {
errors.Wrapf(err, "Failed to compare with version of node %s", node.Name)
}
if n == 1 {
// The other node's version is greater than ours.
return true, nil
}
}
return false, nil
}
// GetNodes returns all LXD nodes part of the cluster.
//
// If this LXD instance is not clustered, a list with a single node whose
// address is 0.0.0.0 is returned.
func (c *ClusterTx) GetNodes() ([]NodeInfo, error) {
return c.nodes(false /* not pending */, "")
}
// GetNodesCount returns the number of nodes in the LXD cluster.
//
// Since there's always at least one node row, even when not-clustered, the
// return value is greater than zero
func (c *ClusterTx) GetNodesCount() (int, error) {
count, err := query.Count(c.tx, "nodes", "")
if err != nil {
return 0, errors.Wrap(err, "failed to count existing nodes")
}
return count, nil
}
// RenameNode changes the name of an existing node.
//
// Return an error if a node with the same name already exists.
func (c *ClusterTx) RenameNode(old, new string) error {
count, err := query.Count(c.tx, "nodes", "name=?", new)
if err != nil {
return errors.Wrap(err, "failed to check existing nodes")
}
if count != 0 {
return ErrAlreadyDefined
}
stmt := `UPDATE nodes SET name=? WHERE name=?`
result, err := c.tx.Exec(stmt, new, old)
if err != nil {
return errors.Wrap(err, "failed to update node name")
}
n, err := result.RowsAffected()
if err != nil {
return errors.Wrap(err, "failed to get rows count")
}
if n != 1 {
return fmt.Errorf("expected to update one row, not %d", n)
}
return nil
}
// SetDescription changes the description of the given node.
func (c *ClusterTx) SetDescription(id int64, description string) error {
stmt := `UPDATE nodes SET description=? WHERE id=?`
result, err := c.tx.Exec(stmt, description, id)
if err != nil {
return errors.Wrap(err, "Failed to update node name")
}
n, err := result.RowsAffected()
if err != nil {
return errors.Wrap(err, "Failed to get rows count")
}
if n != 1 {
return fmt.Errorf("Expected to update one row, not %d", n)
}
return nil
}
// Nodes returns all LXD nodes part of the cluster.
func (c *ClusterTx) nodes(pending bool, where string, args ...interface{}) ([]NodeInfo, error) {
// Get node roles
sql := "SELECT node_id, role FROM nodes_roles"
nodeRoles := map[int64][]string{}
rows, err := c.tx.Query(sql)
if err != nil {
if err.Error() != "no such table: nodes_roles" {
return nil, err
}
} else {
// Don't fail on a missing table, we need to handle updates
defer rows.Close()
for i := 0; rows.Next(); i++ {
var nodeID int64
var role int
err := rows.Scan(&nodeID, &role)
if err != nil {
return nil, err
}
if nodeRoles[nodeID] == nil {
nodeRoles[nodeID] = []string{}
}
roleName := string(ClusterRoles[role])
nodeRoles[nodeID] = append(nodeRoles[nodeID], roleName)
}
}
err = rows.Err()
if err != nil {
return nil, err
}
// Process node entries
nodes := []NodeInfo{}
dest := func(i int) []interface{} {
nodes = append(nodes, NodeInfo{})
return []interface{}{
&nodes[i].ID,
&nodes[i].Name,
&nodes[i].Address,
&nodes[i].Description,
&nodes[i].Schema,
&nodes[i].APIExtensions,
&nodes[i].Heartbeat,
&nodes[i].Architecture,
}
}
if pending {
args = append([]interface{}{1}, args...)
} else {
args = append([]interface{}{0}, args...)
}
// Get the node entries
sql = "SELECT id, name, address, description, schema, api_extensions, heartbeat, arch FROM nodes WHERE pending=?"
if where != "" {
sql += fmt.Sprintf("AND %s ", where)
}
sql += "ORDER BY id"
stmt, err := c.tx.Prepare(sql)
if err != nil {
return nil, err
}
defer stmt.Close()
err = query.SelectObjects(stmt, dest, args...)
if err != nil {
return nil, errors.Wrap(err, "Failed to fetch nodes")
}
// Add the roles
for i, node := range nodes {
roles, ok := nodeRoles[node.ID]
if ok {
nodes[i].Roles = roles
}
}
return nodes, nil
}
// CreateNode adds a node to the current list of LXD nodes that are part of the
// cluster. The node's architecture will be the architecture of the machine the
// method is being run on. It returns the ID of the newly inserted row.
func (c *ClusterTx) CreateNode(name string, address string) (int64, error) {
arch, err := osarch.ArchitectureGetLocalID()
if err != nil {
return -1, err
}
return c.CreateNodeWithArch(name, address, arch)
}
// CreateNodeWithArch is the same as NodeAdd, but lets setting the node
// architecture explicitly.
func (c *ClusterTx) CreateNodeWithArch(name string, address string, arch int) (int64, error) {
columns := []string{"name", "address", "schema", "api_extensions", "arch"}
values := []interface{}{name, address, cluster.SchemaVersion, version.APIExtensionsCount(), arch}
return query.UpsertObject(c.tx, "nodes", columns, values)
}
// SetNodePendingFlag toggles the pending flag for the node. A node is pending when
// it's been accepted in the cluster, but has not yet actually joined it.
func (c *ClusterTx) SetNodePendingFlag(id int64, pending bool) error {
value := 0
if pending {
value = 1
}
result, err := c.tx.Exec("UPDATE nodes SET pending=? WHERE id=?", value, id)
if err != nil {
return err
}
n, err := result.RowsAffected()
if err != nil {
return err
}
if n != 1 {
return fmt.Errorf("query updated %d rows instead of 1", n)
}
return nil
}
// UpdateNode updates the name an address of a node.
func (c *ClusterTx) UpdateNode(id int64, name string, address string) error {
result, err := c.tx.Exec("UPDATE nodes SET name=?, address=? WHERE id=?", name, address, id)
if err != nil {
return err
}
n, err := result.RowsAffected()
if err != nil {
return err
}
if n != 1 {
return fmt.Errorf("query updated %d rows instead of 1", n)
}
return nil
}
// CreateNodeRole adds a role to the node.
func (c *ClusterTx) CreateNodeRole(id int64, role ClusterRole) error {
// Translate role names to ids
roleID := -1
for k, v := range ClusterRoles {
if v == role {
roleID = k
break
}
}
if roleID < 0 {
return fmt.Errorf("Invalid role: %v", role)
}
// Update the database record
_, err := c.tx.Exec("INSERT INTO nodes_roles (node_id, role) VALUES (?, ?)", id, roleID)
if err != nil {
return err
}
return nil
}
// RemoveNodeRole removes a role from the node.
func (c *ClusterTx) RemoveNodeRole(id int64, role ClusterRole) error {
// Translate role names to ids
roleID := -1
for k, v := range ClusterRoles {
if v == role {
roleID = k
break
}
}
if roleID < 0 {
return fmt.Errorf("Invalid role: %v", role)
}
// Update the database record
_, err := c.tx.Exec("DELETE FROM nodes_roles WHERE node_id=? AND role=?", id, roleID)
if err != nil {
return err
}
return nil
}
// UpdateNodeRoles changes the list of roles on a member.
func (c *ClusterTx) UpdateNodeRoles(id int64, roles []ClusterRole) error {
getRoleID := func(role ClusterRole) (int, error) {
for k, v := range ClusterRoles {
if v == role {
return k, nil
}
}
return -1, fmt.Errorf("Invalid cluster role '%s'", role)
}
// Translate role names to ids
roleIDs := []int{}
for _, role := range roles {
// Skip internal-only roles.
if role == ClusterRoleDatabase {
continue
}
roleID, err := getRoleID(role)
if err != nil {
return err
}
roleIDs = append(roleIDs, roleID)
}
// Update the database record
_, err := c.tx.Exec("DELETE FROM nodes_roles WHERE node_id=?", id)
if err != nil {
return err
}
for _, roleID := range roleIDs {
_, err := c.tx.Exec("INSERT INTO nodes_roles (node_id, role) VALUES (?, ?)", id, roleID)
if err != nil {
return err
}
}
return nil
}
// UpdateNodeFailureDomain changes the failure domain of a node.
func (c *ClusterTx) UpdateNodeFailureDomain(id int64, domain string) error {
var domainID interface{}
if domain == "" {
return fmt.Errorf("Failure domain name can't be empty")
}
if domain == "default" {
domainID = nil
} else {
row := c.tx.QueryRow("SELECT id FROM nodes_failure_domains WHERE name=?", domain)
err := row.Scan(&domainID)
if err != nil {
if err != sql.ErrNoRows {
return errors.Wrapf(err, "Load failure domain name")
}
result, err := c.tx.Exec("INSERT INTO nodes_failure_domains (name) VALUES (?)", domain)
if err != nil {
return errors.Wrapf(err, "Create new failure domain")
}
domainID, err = result.LastInsertId()
if err != nil {
return errors.Wrapf(err, "Get last inserted ID")
}
}
}
result, err := c.tx.Exec("UPDATE nodes SET failure_domain_id=? WHERE id=?", domainID, id)
if err != nil {
return err
}
n, err := result.RowsAffected()
if err != nil {
return err
}
if n != 1 {
return fmt.Errorf("Query updated %d rows instead of 1", n)
}
return nil
}
// GetNodeFailureDomain returns the failure domain associated with the node with the given ID.
func (c *ClusterTx) GetNodeFailureDomain(id int64) (string, error) {
stmt := `
SELECT coalesce(nodes_failure_domains.name,'default')
FROM nodes LEFT JOIN nodes_failure_domains ON nodes.failure_domain_id = nodes_failure_domains.id
WHERE nodes.id=?
`
var domain string
err := c.tx.QueryRow(stmt, id).Scan(&domain)
if err != nil {
return "", err
}
return domain, nil
}
// GetNodesFailureDomains returns a map associating each node address with its
// failure domain code.
func (c *ClusterTx) GetNodesFailureDomains() (map[string]uint64, error) {
stmt, err := c.tx.Prepare("SELECT address, coalesce(failure_domain_id, 0) FROM nodes")
if err != nil {
return nil, err
}
rows := []struct {
Address string
FailureDomainID int64
}{}
dest := func(i int) []interface{} {
rows = append(rows, struct {
Address string
FailureDomainID int64
}{})
return []interface{}{&rows[len(rows)-1].Address, &rows[len(rows)-1].FailureDomainID}
}
err = query.SelectObjects(stmt, dest)
if err != nil {
return nil, err
}
domains := map[string]uint64{}
for _, row := range rows {
domains[row.Address] = uint64(row.FailureDomainID)
}
return domains, nil
}
// GetFailureDomainsNames return a map associating failure domain IDs to their
// names.
func (c *ClusterTx) GetFailureDomainsNames() (map[uint64]string, error) {
stmt, err := c.tx.Prepare("SELECT id, name FROM nodes_failure_domains")
if err != nil {
return nil, err
}
rows := []struct {
ID int64
Name string
}{}
dest := func(i int) []interface{} {
rows = append(rows, struct {
ID int64
Name string
}{})
return []interface{}{&rows[len(rows)-1].ID, &rows[len(rows)-1].Name}
}
err = query.SelectObjects(stmt, dest)
if err != nil {
return nil, err
}
domains := map[uint64]string{
0: "default", // Default failure domain, when not set
}
for _, row := range rows {
domains[uint64(row.ID)] = row.Name
}
return domains, nil
}
// RemoveNode removes the node with the given id.
func (c *ClusterTx) RemoveNode(id int64) error {
result, err := c.tx.Exec("DELETE FROM nodes WHERE id=?", id)
if err != nil {
return err
}
n, err := result.RowsAffected()
if err != nil {
return err
}
if n != 1 {
return fmt.Errorf("query deleted %d rows instead of 1", n)
}
return nil
}
// SetNodeHeartbeat updates the heartbeat column of the node with the given address.
func (c *ClusterTx) SetNodeHeartbeat(address string, heartbeat time.Time) error {
stmt := "UPDATE nodes SET heartbeat=? WHERE address=?"
result, err := c.tx.Exec(stmt, heartbeat, address)
if err != nil {
return err
}
n, err := result.RowsAffected()
if err != nil {
return err
}
if n != 1 {
return fmt.Errorf("expected to update one row and not %d", n)
}
return nil
}
// NodeIsEmpty returns an empty string if the node with the given ID has no
// containers or images associated with it. Otherwise, it returns a message
// say what's left.
func (c *ClusterTx) NodeIsEmpty(id int64) (string, error) {
// Check if the node has any instances.
containers, err := query.SelectStrings(c.tx, "SELECT name FROM instances WHERE node_id=?", id)
if err != nil {
return "", errors.Wrapf(err, "Failed to get instances for node %d", id)
}
if len(containers) > 0 {
message := fmt.Sprintf(
"Node still has the following containers: %s", strings.Join(containers, ", "))
return message, nil
}
// Check if the node has any images available only in it.
images := []struct {
fingerprint string
nodeID int64
}{}
dest := func(i int) []interface{} {
images = append(images, struct {
fingerprint string
nodeID int64
}{})
return []interface{}{&images[i].fingerprint, &images[i].nodeID}
}
stmt, err := c.tx.Prepare(`
SELECT fingerprint, node_id FROM images JOIN images_nodes ON images.id=images_nodes.image_id`)
if err != nil {
return "", err
}
defer stmt.Close()
err = query.SelectObjects(stmt, dest)
if err != nil {
return "", errors.Wrapf(err, "Failed to get image list for node %d", id)
}
index := map[string][]int64{} // Map fingerprints to IDs of nodes
for _, image := range images {
index[image.fingerprint] = append(index[image.fingerprint], image.nodeID)
}
fingerprints := []string{}
for fingerprint, ids := range index {
if len(ids) > 1 {
continue
}
if ids[0] == id {
fingerprints = append(fingerprints, fingerprint)
}
}
if len(fingerprints) > 0 {
message := fmt.Sprintf(
"Node still has the following images: %s", strings.Join(fingerprints, ", "))
return message, nil
}
// Check if the node has any custom volumes.
volumes, err := query.SelectStrings(
c.tx, "SELECT storage_volumes.name FROM storage_volumes JOIN storage_pools ON storage_volumes.storage_pool_id=storage_pools.id WHERE storage_volumes.node_id=? AND storage_volumes.type=? AND storage_pools.driver NOT IN ('ceph', 'cephfs')",
id, StoragePoolVolumeTypeCustom)
if err != nil {
return "", errors.Wrapf(err, "Failed to get custom volumes for node %d", id)
}
if len(volumes) > 0 {
message := fmt.Sprintf(
"Node still has the following custom volumes: %s", strings.Join(volumes, ", "))
return message, nil
}
return "", nil
}
// ClearNode removes any instance or image associated with this node.
func (c *ClusterTx) ClearNode(id int64) error {
_, err := c.tx.Exec("DELETE FROM instances WHERE node_id=?", id)
if err != nil {
return err
}
// Get the IDs of the images this node is hosting.
ids, err := query.SelectIntegers(c.tx, "SELECT image_id FROM images_nodes WHERE node_id=?", id)
if err != nil {
return err
}
// Delete the association
_, err = c.tx.Exec("DELETE FROM images_nodes WHERE node_id=?", id)
if err != nil {
return err
}
// Delete the image as well if this was the only node with it.
for _, id := range ids {
count, err := query.Count(c.tx, "images_nodes", "image_id=?", id)
if err != nil {
return err
}
if count > 0 {
continue
}
_, err = c.tx.Exec("DELETE FROM images WHERE id=?", id)
if err != nil {
return err
}
}
return nil
}
// GetNodeOfflineThreshold returns the amount of time that needs to elapse after
// which a series of unsuccessful heartbeat will make the node be considered
// offline.
func (c *ClusterTx) GetNodeOfflineThreshold() (time.Duration, error) {
threshold := time.Duration(DefaultOfflineThreshold) * time.Second
values, err := query.SelectStrings(
c.tx, "SELECT value FROM config WHERE key='cluster.offline_threshold'")
if err != nil {
return -1, err
}
if len(values) > 0 {
seconds, err := strconv.Atoi(values[0])
if err != nil {
return -1, err
}
threshold = time.Duration(seconds) * time.Second
}
return threshold, nil
}
// GetNodeWithLeastInstances returns the name of the non-offline node with with
// the least number of containers (either already created or being created with
// an operation). If archs is not empty, then return only nodes with an
// architecture in that list.
func (c *ClusterTx) GetNodeWithLeastInstances(archs []int, defaultArch int) (string, error) {
threshold, err := c.GetNodeOfflineThreshold()
if err != nil {
return "", errors.Wrap(err, "failed to get offline threshold")
}
nodes, err := c.GetNodes()
if err != nil {
return "", errors.Wrap(err, "failed to get current nodes")
}
name := ""
containers := -1
isDefaultArchChosen := false
for _, node := range nodes {
if node.IsOffline(threshold) {
continue
}
// Get personalities too.
personalities, err := osarch.ArchitecturePersonalities(node.Architecture)
if err != nil {
return "", err
}
supported := []int{node.Architecture}
supported = append(supported, personalities...)
match := false
isDefaultArch := false
for _, entry := range supported {
if shared.IntInSlice(entry, archs) {
match = true
}
if entry == defaultArch {
isDefaultArch = true
}
}
if len(archs) > 0 && !match {
continue
}
if !isDefaultArch && isDefaultArchChosen {
continue
}
// Fetch the number of containers already created on this node.
created, err := query.Count(c.tx, "instances", "node_id=?", node.ID)
if err != nil {
return "", errors.Wrap(err, "Failed to get instances count")
}
// Fetch the number of containers currently being created on this node.
pending, err := query.Count(
c.tx, "operations", "node_id=? AND type=?", node.ID, OperationInstanceCreate)
if err != nil {
return "", errors.Wrap(err, "Failed to get pending instances count")
}
count := created + pending
if containers == -1 || count < containers || (isDefaultArch == true && isDefaultArchChosen == false) {
containers = count
name = node.Name
if isDefaultArch {
isDefaultArchChosen = true
}
}
}
return name, nil
}
// SetNodeVersion updates the schema and API version of the node with the
// given id. This is used only in tests.
func (c *ClusterTx) SetNodeVersion(id int64, version [2]int) error {
stmt := "UPDATE nodes SET schema=?, api_extensions=? WHERE id=?"
result, err := c.tx.Exec(stmt, version[0], version[1], id)
if err != nil {
return errors.Wrap(err, "Failed to update nodes table")
}
n, err := result.RowsAffected()
if err != nil {
return errors.Wrap(err, "Failed to get affected rows")
}
if n != 1 {
return fmt.Errorf("Expected exactly one row to be updated")
}
return nil
}
func nodeIsOffline(threshold time.Duration, heartbeat time.Time) bool {
return heartbeat.Before(time.Now().Add(-threshold))
}
// DefaultOfflineThreshold is the default value for the
// cluster.offline_threshold configuration key, expressed in seconds.
const DefaultOfflineThreshold = 20
lxd/db/node: Updates SetNodeHeartbeat to return ErrNoSuchObject if row doesn't exist to be updated
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
//go:build linux && cgo && !agent
// +build linux,cgo,!agent
package db
import (
"database/sql"
"fmt"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/lxc/lxd/lxd/db/cluster"
"github.com/lxc/lxd/lxd/db/query"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/osarch"
"github.com/lxc/lxd/shared/version"
)
// ClusterRole represents the role of a member in a cluster.
type ClusterRole string
// ClusterRoleDatabase represents the database role in a cluster.
const ClusterRoleDatabase = ClusterRole("database")
// ClusterRoles maps role ids into human-readable names.
//
// Note: the database role is currently stored directly in the raft
// configuration which acts as single source of truth for it. This map should
// only contain LXD-specific cluster roles.
var ClusterRoles = map[int]ClusterRole{}
// NodeInfo holds information about a single LXD instance in a cluster.
type NodeInfo struct {
ID int64 // Stable node identifier
Name string // User-assigned name of the node
Address string // Network address of the node
Description string // Node description (optional)
Schema int // Schema version of the LXD code running the node
APIExtensions int // Number of API extensions of the LXD code running on the node
Heartbeat time.Time // Timestamp of the last heartbeat
Roles []string // List of cluster roles
Architecture int // Node architecture
}
// IsOffline returns true if the last successful heartbeat time of the node is
// older than the given threshold.
func (n NodeInfo) IsOffline(threshold time.Duration) bool {
return nodeIsOffline(threshold, n.Heartbeat)
}
// ToAPI returns a LXD API entry.
func (n NodeInfo) ToAPI(cluster *Cluster, node *Node) (*api.ClusterMember, error) {
// Load some needed data.
var err error
var offlineThreshold time.Duration
var maxVersion [2]int
var failureDomain string
// From cluster database.
err = cluster.Transaction(func(tx *ClusterTx) error {
// Get offline threshold.
offlineThreshold, err = tx.GetNodeOfflineThreshold()
if err != nil {
return errors.Wrap(err, "Load offline threshold config")
}
// Get failure domains.
nodesDomains, err := tx.GetNodesFailureDomains()
if err != nil {
return errors.Wrap(err, "Load nodes failure domains")
}
domainsNames, err := tx.GetFailureDomainsNames()
if err != nil {
return errors.Wrap(err, "Load failure domains names")
}
domainID := nodesDomains[n.Address]
failureDomain = domainsNames[domainID]
// Get the highest schema and API versions.
maxVersion, err = tx.GetNodeMaxVersion()
if err != nil {
return errors.Wrap(err, "Get max version")
}
return nil
})
if err != nil {
return nil, err
}
// From local database.
var raftNode *RaftNode
err = node.Transaction(func(tx *NodeTx) error {
nodes, err := tx.GetRaftNodes()
if err != nil {
return errors.Wrap(err, "Load offline threshold config")
}
for _, node := range nodes {
if node.Address != n.Address {
continue
}
raftNode = &node
break
}
return nil
})
if err != nil {
return nil, err
}
// Fill in the struct.
result := api.ClusterMember{}
result.Description = n.Description
result.ServerName = n.Name
result.URL = fmt.Sprintf("https://%s", n.Address)
result.Database = raftNode != nil && raftNode.Role == RaftVoter
result.Roles = n.Roles
if result.Database {
result.Roles = append(result.Roles, string(ClusterRoleDatabase))
}
result.Architecture, err = osarch.ArchitectureName(n.Architecture)
if err != nil {
return nil, err
}
result.FailureDomain = failureDomain
if n.IsOffline(offlineThreshold) {
result.Status = "Offline"
result.Message = fmt.Sprintf("No heartbeat for %s (%s)", time.Now().Sub(n.Heartbeat), n.Heartbeat)
} else {
// Check if up to date.
n, err := util.CompareVersions(maxVersion, n.Version())
if err != nil {
return nil, err
}
if n == 1 {
result.Status = "Blocked"
result.Message = "Needs updating to newer version"
} else {
result.Status = "Online"
result.Message = "Fully operational"
}
}
return &result, nil
}
// Version returns the node's version, composed by its schema level and
// number of extensions.
func (n NodeInfo) Version() [2]int {
return [2]int{n.Schema, n.APIExtensions}
}
// GetNodeByAddress returns the node with the given network address.
func (c *ClusterTx) GetNodeByAddress(address string) (NodeInfo, error) {
null := NodeInfo{}
nodes, err := c.nodes(false /* not pending */, "address=?", address)
if err != nil {
return null, err
}
switch len(nodes) {
case 0:
return null, ErrNoSuchObject
case 1:
return nodes[0], nil
default:
return null, fmt.Errorf("more than one node matches")
}
}
// GetNodeMaxVersion returns the highest version possible on the cluster.
func (c *ClusterTx) GetNodeMaxVersion() ([2]int, error) {
version := [2]int{}
// Get the maximum DB schema.
var maxSchema int
row := c.tx.QueryRow("SELECT MAX(schema) FROM nodes")
err := row.Scan(&maxSchema)
if err != nil {
return version, err
}
// Get the maximum API extension.
var maxAPI int
row = c.tx.QueryRow("SELECT MAX(api_extensions) FROM nodes")
err = row.Scan(&maxAPI)
if err != nil {
return version, err
}
// Compute the combined version.
version = [2]int{maxSchema, maxAPI}
return version, nil
}
// GetNodeWithID returns the node with the given ID.
func (c *ClusterTx) GetNodeWithID(nodeID int) (NodeInfo, error) {
null := NodeInfo{}
nodes, err := c.nodes(false /* not pending */, "id=?", nodeID)
if err != nil {
return null, err
}
switch len(nodes) {
case 0:
return null, ErrNoSuchObject
case 1:
return nodes[0], nil
default:
return null, fmt.Errorf("more than one node matches")
}
}
// GetPendingNodeByAddress returns the pending node with the given network address.
func (c *ClusterTx) GetPendingNodeByAddress(address string) (NodeInfo, error) {
null := NodeInfo{}
nodes, err := c.nodes(true /*pending */, "address=?", address)
if err != nil {
return null, err
}
switch len(nodes) {
case 0:
return null, ErrNoSuchObject
case 1:
return nodes[0], nil
default:
return null, fmt.Errorf("more than one node matches")
}
}
// GetNodeByName returns the node with the given name.
func (c *ClusterTx) GetNodeByName(name string) (NodeInfo, error) {
null := NodeInfo{}
nodes, err := c.nodes(false /* not pending */, "name=?", name)
if err != nil {
return null, err
}
switch len(nodes) {
case 0:
return null, ErrNoSuchObject
case 1:
return nodes[0], nil
default:
return null, fmt.Errorf("more than one node matches")
}
}
// GetLocalNodeName returns the name of the node this method is invoked on.
func (c *ClusterTx) GetLocalNodeName() (string, error) {
stmt := "SELECT name FROM nodes WHERE id=?"
names, err := query.SelectStrings(c.tx, stmt, c.nodeID)
if err != nil {
return "", err
}
switch len(names) {
case 0:
return "", nil
case 1:
return names[0], nil
default:
return "", fmt.Errorf("inconsistency: non-unique node ID")
}
}
// GetLocalNodeAddress returns the address of the node this method is invoked on.
func (c *ClusterTx) GetLocalNodeAddress() (string, error) {
stmt := "SELECT address FROM nodes WHERE id=?"
addresses, err := query.SelectStrings(c.tx, stmt, c.nodeID)
if err != nil {
return "", err
}
switch len(addresses) {
case 0:
return "", nil
case 1:
return addresses[0], nil
default:
return "", fmt.Errorf("inconsistency: non-unique node ID")
}
}
// NodeIsOutdated returns true if there's some cluster node having an API or
// schema version greater than the node this method is invoked on.
func (c *ClusterTx) NodeIsOutdated() (bool, error) {
nodes, err := c.nodes(false /* not pending */, "")
if err != nil {
return false, errors.Wrap(err, "Failed to fetch nodes")
}
// Figure our own version.
version := [2]int{}
for _, node := range nodes {
if node.ID == c.nodeID {
version = node.Version()
}
}
if version[0] == 0 || version[1] == 0 {
return false, fmt.Errorf("Inconsistency: local node not found")
}
// Check if any of the other nodes is greater than us.
for _, node := range nodes {
if node.ID == c.nodeID {
continue
}
n, err := util.CompareVersions(node.Version(), version)
if err != nil {
errors.Wrapf(err, "Failed to compare with version of node %s", node.Name)
}
if n == 1 {
// The other node's version is greater than ours.
return true, nil
}
}
return false, nil
}
// GetNodes returns all LXD nodes part of the cluster.
//
// If this LXD instance is not clustered, a list with a single node whose
// address is 0.0.0.0 is returned.
func (c *ClusterTx) GetNodes() ([]NodeInfo, error) {
return c.nodes(false /* not pending */, "")
}
// GetNodesCount returns the number of nodes in the LXD cluster.
//
// Since there's always at least one node row, even when not-clustered, the
// return value is greater than zero
func (c *ClusterTx) GetNodesCount() (int, error) {
count, err := query.Count(c.tx, "nodes", "")
if err != nil {
return 0, errors.Wrap(err, "failed to count existing nodes")
}
return count, nil
}
// RenameNode changes the name of an existing node.
//
// Return an error if a node with the same name already exists.
func (c *ClusterTx) RenameNode(old, new string) error {
count, err := query.Count(c.tx, "nodes", "name=?", new)
if err != nil {
return errors.Wrap(err, "failed to check existing nodes")
}
if count != 0 {
return ErrAlreadyDefined
}
stmt := `UPDATE nodes SET name=? WHERE name=?`
result, err := c.tx.Exec(stmt, new, old)
if err != nil {
return errors.Wrap(err, "failed to update node name")
}
n, err := result.RowsAffected()
if err != nil {
return errors.Wrap(err, "failed to get rows count")
}
if n != 1 {
return fmt.Errorf("expected to update one row, not %d", n)
}
return nil
}
// SetDescription changes the description of the given node.
func (c *ClusterTx) SetDescription(id int64, description string) error {
stmt := `UPDATE nodes SET description=? WHERE id=?`
result, err := c.tx.Exec(stmt, description, id)
if err != nil {
return errors.Wrap(err, "Failed to update node name")
}
n, err := result.RowsAffected()
if err != nil {
return errors.Wrap(err, "Failed to get rows count")
}
if n != 1 {
return fmt.Errorf("Expected to update one row, not %d", n)
}
return nil
}
// Nodes returns all LXD nodes part of the cluster.
func (c *ClusterTx) nodes(pending bool, where string, args ...interface{}) ([]NodeInfo, error) {
// Get node roles
sql := "SELECT node_id, role FROM nodes_roles"
nodeRoles := map[int64][]string{}
rows, err := c.tx.Query(sql)
if err != nil {
if err.Error() != "no such table: nodes_roles" {
return nil, err
}
} else {
// Don't fail on a missing table, we need to handle updates
defer rows.Close()
for i := 0; rows.Next(); i++ {
var nodeID int64
var role int
err := rows.Scan(&nodeID, &role)
if err != nil {
return nil, err
}
if nodeRoles[nodeID] == nil {
nodeRoles[nodeID] = []string{}
}
roleName := string(ClusterRoles[role])
nodeRoles[nodeID] = append(nodeRoles[nodeID], roleName)
}
}
err = rows.Err()
if err != nil {
return nil, err
}
// Process node entries
nodes := []NodeInfo{}
dest := func(i int) []interface{} {
nodes = append(nodes, NodeInfo{})
return []interface{}{
&nodes[i].ID,
&nodes[i].Name,
&nodes[i].Address,
&nodes[i].Description,
&nodes[i].Schema,
&nodes[i].APIExtensions,
&nodes[i].Heartbeat,
&nodes[i].Architecture,
}
}
if pending {
args = append([]interface{}{1}, args...)
} else {
args = append([]interface{}{0}, args...)
}
// Get the node entries
sql = "SELECT id, name, address, description, schema, api_extensions, heartbeat, arch FROM nodes WHERE pending=?"
if where != "" {
sql += fmt.Sprintf("AND %s ", where)
}
sql += "ORDER BY id"
stmt, err := c.tx.Prepare(sql)
if err != nil {
return nil, err
}
defer stmt.Close()
err = query.SelectObjects(stmt, dest, args...)
if err != nil {
return nil, errors.Wrap(err, "Failed to fetch nodes")
}
// Add the roles
for i, node := range nodes {
roles, ok := nodeRoles[node.ID]
if ok {
nodes[i].Roles = roles
}
}
return nodes, nil
}
// CreateNode adds a node to the current list of LXD nodes that are part of the
// cluster. The node's architecture will be the architecture of the machine the
// method is being run on. It returns the ID of the newly inserted row.
func (c *ClusterTx) CreateNode(name string, address string) (int64, error) {
arch, err := osarch.ArchitectureGetLocalID()
if err != nil {
return -1, err
}
return c.CreateNodeWithArch(name, address, arch)
}
// CreateNodeWithArch is the same as NodeAdd, but lets setting the node
// architecture explicitly.
func (c *ClusterTx) CreateNodeWithArch(name string, address string, arch int) (int64, error) {
columns := []string{"name", "address", "schema", "api_extensions", "arch"}
values := []interface{}{name, address, cluster.SchemaVersion, version.APIExtensionsCount(), arch}
return query.UpsertObject(c.tx, "nodes", columns, values)
}
// SetNodePendingFlag toggles the pending flag for the node. A node is pending when
// it's been accepted in the cluster, but has not yet actually joined it.
func (c *ClusterTx) SetNodePendingFlag(id int64, pending bool) error {
value := 0
if pending {
value = 1
}
result, err := c.tx.Exec("UPDATE nodes SET pending=? WHERE id=?", value, id)
if err != nil {
return err
}
n, err := result.RowsAffected()
if err != nil {
return err
}
if n != 1 {
return fmt.Errorf("query updated %d rows instead of 1", n)
}
return nil
}
// UpdateNode updates the name an address of a node.
func (c *ClusterTx) UpdateNode(id int64, name string, address string) error {
result, err := c.tx.Exec("UPDATE nodes SET name=?, address=? WHERE id=?", name, address, id)
if err != nil {
return err
}
n, err := result.RowsAffected()
if err != nil {
return err
}
if n != 1 {
return fmt.Errorf("query updated %d rows instead of 1", n)
}
return nil
}
// CreateNodeRole adds a role to the node.
func (c *ClusterTx) CreateNodeRole(id int64, role ClusterRole) error {
// Translate role names to ids
roleID := -1
for k, v := range ClusterRoles {
if v == role {
roleID = k
break
}
}
if roleID < 0 {
return fmt.Errorf("Invalid role: %v", role)
}
// Update the database record
_, err := c.tx.Exec("INSERT INTO nodes_roles (node_id, role) VALUES (?, ?)", id, roleID)
if err != nil {
return err
}
return nil
}
// RemoveNodeRole removes a role from the node.
func (c *ClusterTx) RemoveNodeRole(id int64, role ClusterRole) error {
// Translate role names to ids
roleID := -1
for k, v := range ClusterRoles {
if v == role {
roleID = k
break
}
}
if roleID < 0 {
return fmt.Errorf("Invalid role: %v", role)
}
// Update the database record
_, err := c.tx.Exec("DELETE FROM nodes_roles WHERE node_id=? AND role=?", id, roleID)
if err != nil {
return err
}
return nil
}
// UpdateNodeRoles changes the list of roles on a member.
func (c *ClusterTx) UpdateNodeRoles(id int64, roles []ClusterRole) error {
getRoleID := func(role ClusterRole) (int, error) {
for k, v := range ClusterRoles {
if v == role {
return k, nil
}
}
return -1, fmt.Errorf("Invalid cluster role '%s'", role)
}
// Translate role names to ids
roleIDs := []int{}
for _, role := range roles {
// Skip internal-only roles.
if role == ClusterRoleDatabase {
continue
}
roleID, err := getRoleID(role)
if err != nil {
return err
}
roleIDs = append(roleIDs, roleID)
}
// Update the database record
_, err := c.tx.Exec("DELETE FROM nodes_roles WHERE node_id=?", id)
if err != nil {
return err
}
for _, roleID := range roleIDs {
_, err := c.tx.Exec("INSERT INTO nodes_roles (node_id, role) VALUES (?, ?)", id, roleID)
if err != nil {
return err
}
}
return nil
}
// UpdateNodeFailureDomain changes the failure domain of a node.
func (c *ClusterTx) UpdateNodeFailureDomain(id int64, domain string) error {
var domainID interface{}
if domain == "" {
return fmt.Errorf("Failure domain name can't be empty")
}
if domain == "default" {
domainID = nil
} else {
row := c.tx.QueryRow("SELECT id FROM nodes_failure_domains WHERE name=?", domain)
err := row.Scan(&domainID)
if err != nil {
if err != sql.ErrNoRows {
return errors.Wrapf(err, "Load failure domain name")
}
result, err := c.tx.Exec("INSERT INTO nodes_failure_domains (name) VALUES (?)", domain)
if err != nil {
return errors.Wrapf(err, "Create new failure domain")
}
domainID, err = result.LastInsertId()
if err != nil {
return errors.Wrapf(err, "Get last inserted ID")
}
}
}
result, err := c.tx.Exec("UPDATE nodes SET failure_domain_id=? WHERE id=?", domainID, id)
if err != nil {
return err
}
n, err := result.RowsAffected()
if err != nil {
return err
}
if n != 1 {
return fmt.Errorf("Query updated %d rows instead of 1", n)
}
return nil
}
// GetNodeFailureDomain returns the failure domain associated with the node with the given ID.
func (c *ClusterTx) GetNodeFailureDomain(id int64) (string, error) {
stmt := `
SELECT coalesce(nodes_failure_domains.name,'default')
FROM nodes LEFT JOIN nodes_failure_domains ON nodes.failure_domain_id = nodes_failure_domains.id
WHERE nodes.id=?
`
var domain string
err := c.tx.QueryRow(stmt, id).Scan(&domain)
if err != nil {
return "", err
}
return domain, nil
}
// GetNodesFailureDomains returns a map associating each node address with its
// failure domain code.
func (c *ClusterTx) GetNodesFailureDomains() (map[string]uint64, error) {
stmt, err := c.tx.Prepare("SELECT address, coalesce(failure_domain_id, 0) FROM nodes")
if err != nil {
return nil, err
}
rows := []struct {
Address string
FailureDomainID int64
}{}
dest := func(i int) []interface{} {
rows = append(rows, struct {
Address string
FailureDomainID int64
}{})
return []interface{}{&rows[len(rows)-1].Address, &rows[len(rows)-1].FailureDomainID}
}
err = query.SelectObjects(stmt, dest)
if err != nil {
return nil, err
}
domains := map[string]uint64{}
for _, row := range rows {
domains[row.Address] = uint64(row.FailureDomainID)
}
return domains, nil
}
// GetFailureDomainsNames return a map associating failure domain IDs to their
// names.
func (c *ClusterTx) GetFailureDomainsNames() (map[uint64]string, error) {
stmt, err := c.tx.Prepare("SELECT id, name FROM nodes_failure_domains")
if err != nil {
return nil, err
}
rows := []struct {
ID int64
Name string
}{}
dest := func(i int) []interface{} {
rows = append(rows, struct {
ID int64
Name string
}{})
return []interface{}{&rows[len(rows)-1].ID, &rows[len(rows)-1].Name}
}
err = query.SelectObjects(stmt, dest)
if err != nil {
return nil, err
}
domains := map[uint64]string{
0: "default", // Default failure domain, when not set
}
for _, row := range rows {
domains[uint64(row.ID)] = row.Name
}
return domains, nil
}
// RemoveNode removes the node with the given id.
func (c *ClusterTx) RemoveNode(id int64) error {
result, err := c.tx.Exec("DELETE FROM nodes WHERE id=?", id)
if err != nil {
return err
}
n, err := result.RowsAffected()
if err != nil {
return err
}
if n != 1 {
return fmt.Errorf("query deleted %d rows instead of 1", n)
}
return nil
}
// SetNodeHeartbeat updates the heartbeat column of the node with the given address.
func (c *ClusterTx) SetNodeHeartbeat(address string, heartbeat time.Time) error {
stmt := "UPDATE nodes SET heartbeat=? WHERE address=?"
result, err := c.tx.Exec(stmt, heartbeat, address)
if err != nil {
return err
}
n, err := result.RowsAffected()
if err != nil {
return err
}
if n < 1 {
return ErrNoSuchObject
} else if n > 1 {
return fmt.Errorf("Expected to update one row and not %d", n)
}
return nil
}
// NodeIsEmpty returns an empty string if the node with the given ID has no
// containers or images associated with it. Otherwise, it returns a message
// say what's left.
func (c *ClusterTx) NodeIsEmpty(id int64) (string, error) {
// Check if the node has any instances.
containers, err := query.SelectStrings(c.tx, "SELECT name FROM instances WHERE node_id=?", id)
if err != nil {
return "", errors.Wrapf(err, "Failed to get instances for node %d", id)
}
if len(containers) > 0 {
message := fmt.Sprintf(
"Node still has the following containers: %s", strings.Join(containers, ", "))
return message, nil
}
// Check if the node has any images available only in it.
images := []struct {
fingerprint string
nodeID int64
}{}
dest := func(i int) []interface{} {
images = append(images, struct {
fingerprint string
nodeID int64
}{})
return []interface{}{&images[i].fingerprint, &images[i].nodeID}
}
stmt, err := c.tx.Prepare(`
SELECT fingerprint, node_id FROM images JOIN images_nodes ON images.id=images_nodes.image_id`)
if err != nil {
return "", err
}
defer stmt.Close()
err = query.SelectObjects(stmt, dest)
if err != nil {
return "", errors.Wrapf(err, "Failed to get image list for node %d", id)
}
index := map[string][]int64{} // Map fingerprints to IDs of nodes
for _, image := range images {
index[image.fingerprint] = append(index[image.fingerprint], image.nodeID)
}
fingerprints := []string{}
for fingerprint, ids := range index {
if len(ids) > 1 {
continue
}
if ids[0] == id {
fingerprints = append(fingerprints, fingerprint)
}
}
if len(fingerprints) > 0 {
message := fmt.Sprintf(
"Node still has the following images: %s", strings.Join(fingerprints, ", "))
return message, nil
}
// Check if the node has any custom volumes.
volumes, err := query.SelectStrings(
c.tx, "SELECT storage_volumes.name FROM storage_volumes JOIN storage_pools ON storage_volumes.storage_pool_id=storage_pools.id WHERE storage_volumes.node_id=? AND storage_volumes.type=? AND storage_pools.driver NOT IN ('ceph', 'cephfs')",
id, StoragePoolVolumeTypeCustom)
if err != nil {
return "", errors.Wrapf(err, "Failed to get custom volumes for node %d", id)
}
if len(volumes) > 0 {
message := fmt.Sprintf(
"Node still has the following custom volumes: %s", strings.Join(volumes, ", "))
return message, nil
}
return "", nil
}
// ClearNode removes any instance or image associated with this node.
func (c *ClusterTx) ClearNode(id int64) error {
_, err := c.tx.Exec("DELETE FROM instances WHERE node_id=?", id)
if err != nil {
return err
}
// Get the IDs of the images this node is hosting.
ids, err := query.SelectIntegers(c.tx, "SELECT image_id FROM images_nodes WHERE node_id=?", id)
if err != nil {
return err
}
// Delete the association
_, err = c.tx.Exec("DELETE FROM images_nodes WHERE node_id=?", id)
if err != nil {
return err
}
// Delete the image as well if this was the only node with it.
for _, id := range ids {
count, err := query.Count(c.tx, "images_nodes", "image_id=?", id)
if err != nil {
return err
}
if count > 0 {
continue
}
_, err = c.tx.Exec("DELETE FROM images WHERE id=?", id)
if err != nil {
return err
}
}
return nil
}
// GetNodeOfflineThreshold returns the amount of time that needs to elapse after
// which a series of unsuccessful heartbeat will make the node be considered
// offline.
func (c *ClusterTx) GetNodeOfflineThreshold() (time.Duration, error) {
threshold := time.Duration(DefaultOfflineThreshold) * time.Second
values, err := query.SelectStrings(
c.tx, "SELECT value FROM config WHERE key='cluster.offline_threshold'")
if err != nil {
return -1, err
}
if len(values) > 0 {
seconds, err := strconv.Atoi(values[0])
if err != nil {
return -1, err
}
threshold = time.Duration(seconds) * time.Second
}
return threshold, nil
}
// GetNodeWithLeastInstances returns the name of the non-offline node with with
// the least number of containers (either already created or being created with
// an operation). If archs is not empty, then return only nodes with an
// architecture in that list.
func (c *ClusterTx) GetNodeWithLeastInstances(archs []int, defaultArch int) (string, error) {
threshold, err := c.GetNodeOfflineThreshold()
if err != nil {
return "", errors.Wrap(err, "failed to get offline threshold")
}
nodes, err := c.GetNodes()
if err != nil {
return "", errors.Wrap(err, "failed to get current nodes")
}
name := ""
containers := -1
isDefaultArchChosen := false
for _, node := range nodes {
if node.IsOffline(threshold) {
continue
}
// Get personalities too.
personalities, err := osarch.ArchitecturePersonalities(node.Architecture)
if err != nil {
return "", err
}
supported := []int{node.Architecture}
supported = append(supported, personalities...)
match := false
isDefaultArch := false
for _, entry := range supported {
if shared.IntInSlice(entry, archs) {
match = true
}
if entry == defaultArch {
isDefaultArch = true
}
}
if len(archs) > 0 && !match {
continue
}
if !isDefaultArch && isDefaultArchChosen {
continue
}
// Fetch the number of containers already created on this node.
created, err := query.Count(c.tx, "instances", "node_id=?", node.ID)
if err != nil {
return "", errors.Wrap(err, "Failed to get instances count")
}
// Fetch the number of containers currently being created on this node.
pending, err := query.Count(
c.tx, "operations", "node_id=? AND type=?", node.ID, OperationInstanceCreate)
if err != nil {
return "", errors.Wrap(err, "Failed to get pending instances count")
}
count := created + pending
if containers == -1 || count < containers || (isDefaultArch == true && isDefaultArchChosen == false) {
containers = count
name = node.Name
if isDefaultArch {
isDefaultArchChosen = true
}
}
}
return name, nil
}
// SetNodeVersion updates the schema and API version of the node with the
// given id. This is used only in tests.
func (c *ClusterTx) SetNodeVersion(id int64, version [2]int) error {
stmt := "UPDATE nodes SET schema=?, api_extensions=? WHERE id=?"
result, err := c.tx.Exec(stmt, version[0], version[1], id)
if err != nil {
return errors.Wrap(err, "Failed to update nodes table")
}
n, err := result.RowsAffected()
if err != nil {
return errors.Wrap(err, "Failed to get affected rows")
}
if n != 1 {
return fmt.Errorf("Expected exactly one row to be updated")
}
return nil
}
func nodeIsOffline(threshold time.Duration, heartbeat time.Time) bool {
return heartbeat.Before(time.Now().Add(-threshold))
}
// DefaultOfflineThreshold is the default value for the
// cluster.offline_threshold configuration key, expressed in seconds.
const DefaultOfflineThreshold = 20
|
package models
import (
"encoding/json"
"github.com/fugazister/feeds"
"io/ioutil"
"net/http"
"regexp"
"strconv"
"strings"
"time"
)
type VKFeed struct {
FeedUrl string
}
type VKVideo struct {
Id int
Link string
}
type VKPhoto struct {
Album_id int
Owner_id int
Photo_75 string
Photo_130 string
Photo_604 string
Photo_807 string
Photo_1280 string
}
type VKAudio struct {
Id int
Url string
}
type VKAttachment struct {
Type string
Photo VKPhoto
Audio VKAudio
Video VKVideo
}
type VKItem struct {
Id int
From_id int
Owner_id int
Date int
Post_type string
Text string
Copy_history []VKItem
Attachments []VKAttachment
}
type VKProfile struct {
Id int
First_name string
Last_name string
Screen_name string
Photo_200 string
}
type VKGroup struct {
Id int
Name string
Screen_name string
Is_closed int
Type string
Photo_200 string
}
type VKResponseBody struct {
Count int
Items []VKItem
Profiles []VKProfile
Groups []VKGroup
}
type VKResponse struct {
Response VKResponseBody
}
type SourceInfo struct {
Name string
Screen_name string
First_name string
Last_name string
}
type SourceInfoContainer struct {
Response []SourceInfo
}
type VKAttachmentListItem struct {
Url string
Type string
}
type VKAttachmentList struct {
Items []*VKAttachmentListItem
}
func processAttachments(attachmants []VKAttachment) VKAttachmentList {
var attachmentList VKAttachmentList
if len(attachmants) > 0 {
for _, attachment := range attachmants {
if attachment.Type == "photo" {
var photo string = ""
if attachment.Photo.Photo_1280 != "" {
photo = attachment.Photo.Photo_1280
} else if attachment.Photo.Photo_807 != "" {
photo = attachment.Photo.Photo_807
} else if attachment.Photo.Photo_604 != "" {
photo = attachment.Photo.Photo_604
} else if attachment.Photo.Photo_130 != "" {
photo = attachment.Photo.Photo_130
} else if attachment.Photo.Photo_75 != "" {
photo = attachment.Photo.Photo_75
}
attachmentList.Items = append(attachmentList.Items, &VKAttachmentListItem{photo, "photo"})
}
if attachment.Type == "audio" {
attachmentList.Items = append(attachmentList.Items, &VKAttachmentListItem{attachment.Audio.Url, "audio/mpeg"})
}
}
}
return attachmentList
}
type ResolvedScreenName struct {
Type string
Object_id float64
}
type ResolvedScreenNameResponse struct {
Response ResolvedScreenName
}
func resolveScreenName(screenName string) ResolvedScreenName {
var requestUrl = "https://api.vk.com/method/utils.resolveScreenName?v=5.12&screen_name=" + screenName
resp, err := http.Get(requestUrl)
if err != nil {
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
}
var encoded ResolvedScreenNameResponse
err = json.Unmarshal(body, &encoded)
if err != nil {
}
return encoded.Response
}
func getSourceInfo(feedId string) (string, string) {
var isGroup bool = strings.Contains(feedId, "-")
var groupUrl string = "https://api.vk.com/method/groups.getById?group_id="
var profileUrl string = "https://api.vk.com/method/users.get?user_ids="
var requestUrl string
if isGroup {
feedId = feedId[1:]
requestUrl = groupUrl + feedId
} else {
requestUrl = profileUrl + feedId
}
resp, err := http.Get(requestUrl)
if err != nil {
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
}
var encoded SourceInfoContainer
err = json.Unmarshal(body, &encoded)
if len(encoded.Response[0].Name) > 0 {
return encoded.Response[0].Name, encoded.Response[0].Screen_name
} else {
return encoded.Response[0].First_name + " " + encoded.Response[0].Last_name, feedId
}
}
func getPosts(feedUrl string) (string, error) {
var requestUrl string = "https://api.vk.com/method/wall.get?v=5.12&extended=1&owner_id=" + feedUrl
resp, err := http.Get(requestUrl)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
var encoded VKResponse
err = json.Unmarshal(body, &encoded)
if err != nil {
return "", err
}
name, screenName := getSourceInfo(feedUrl)
feed := &feeds.Feed{
Title: name,
Link: &feeds.Link{Href: "https://vk.com/" + screenName},
}
for _, elem := range encoded.Response.Items {
var description string = ""
var screenName, name string = "", ""
var photos string = ""
attachmentList := processAttachments(elem.Attachments)
description += elem.Text
if len(elem.Copy_history) > 0 {
description += elem.Copy_history[0].Text
attachmentList = processAttachments(elem.Copy_history[0].Attachments)
name, screenName = getSourceInfo(strconv.Itoa(elem.Copy_history[0].Owner_id))
}
for _, attachment := range attachmentList.Items {
if attachment.Type == "photo" {
photos += "<br/><img src='" + attachment.Url + "'/>"
}
}
item := &feeds.Item{
Author: &feeds.Author{Name: name, Email: "https://vk.com/" + screenName},
Title: strings.Split(elem.Text, ".")[0] + "...",
Link: &feeds.Link{Href: "http://vk.com/wall" + strconv.Itoa(elem.Owner_id) + "_" + strconv.Itoa(elem.Id)},
Description: description + photos,
Created: time.Unix(int64(elem.Date), int64(0)),
}
for _, attachment := range attachmentList.Items {
if attachment.Type == "audio/mpeg" {
enclosure := &feeds.Enclosure{attachment.Url, attachment.Type}
item.AddEnclosure(enclosure)
}
}
feed.Add(item)
}
return feed.ToRss()
}
func getPostsByUrl(feedUrl string) (string, error) {
rp := regexp.MustCompile("vk.com/(\\w+)")
result := rp.FindAllStringSubmatch(feedUrl, -1)
screenName := resolveScreenName(result[0][1])
var resolvedFeedId string
if screenName.Type != "user" {
resolvedFeedId = "-" + strconv.Itoa(int(screenName.Object_id))
} else {
resolvedFeedId = strconv.Itoa(int(screenName.Object_id))
}
return getPosts(resolvedFeedId)
}
func (v VKFeed) GetFeed() (string, error) {
return getPostsByUrl(v.FeedUrl)
}
replace enclosure with source tag in description
package models
import (
"encoding/json"
"github.com/fugazister/feeds"
"io/ioutil"
"net/http"
"regexp"
"strconv"
"strings"
"time"
)
type VKFeed struct {
FeedUrl string
}
type VKVideo struct {
Id int
Link string
}
type VKPhoto struct {
Album_id int
Owner_id int
Photo_75 string
Photo_130 string
Photo_604 string
Photo_807 string
Photo_1280 string
}
type VKAudio struct {
Id int
Url string
}
type VKAttachment struct {
Type string
Photo VKPhoto
Audio VKAudio
Video VKVideo
}
type VKItem struct {
Id int
From_id int
Owner_id int
Date int
Post_type string
Text string
Copy_history []VKItem
Attachments []VKAttachment
}
type VKProfile struct {
Id int
First_name string
Last_name string
Screen_name string
Photo_200 string
}
type VKGroup struct {
Id int
Name string
Screen_name string
Is_closed int
Type string
Photo_200 string
}
type VKResponseBody struct {
Count int
Items []VKItem
Profiles []VKProfile
Groups []VKGroup
}
type VKResponse struct {
Response VKResponseBody
}
type SourceInfo struct {
Name string
Screen_name string
First_name string
Last_name string
}
type SourceInfoContainer struct {
Response []SourceInfo
}
type VKAttachmentListItem struct {
Url string
Type string
}
type VKAttachmentList struct {
Items []*VKAttachmentListItem
}
func processAttachments(attachmants []VKAttachment) VKAttachmentList {
var attachmentList VKAttachmentList
if len(attachmants) > 0 {
for _, attachment := range attachmants {
if attachment.Type == "photo" {
var photo string = ""
if attachment.Photo.Photo_1280 != "" {
photo = attachment.Photo.Photo_1280
} else if attachment.Photo.Photo_807 != "" {
photo = attachment.Photo.Photo_807
} else if attachment.Photo.Photo_604 != "" {
photo = attachment.Photo.Photo_604
} else if attachment.Photo.Photo_130 != "" {
photo = attachment.Photo.Photo_130
} else if attachment.Photo.Photo_75 != "" {
photo = attachment.Photo.Photo_75
}
attachmentList.Items = append(attachmentList.Items, &VKAttachmentListItem{photo, "photo"})
}
if attachment.Type == "audio" {
attachmentList.Items = append(attachmentList.Items, &VKAttachmentListItem{attachment.Audio.Url, "audio"})
}
}
}
return attachmentList
}
type ResolvedScreenName struct {
Type string
Object_id float64
}
type ResolvedScreenNameResponse struct {
Response ResolvedScreenName
}
func resolveScreenName(screenName string) ResolvedScreenName {
var requestUrl = "https://api.vk.com/method/utils.resolveScreenName?v=5.12&screen_name=" + screenName
resp, err := http.Get(requestUrl)
if err != nil {
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
}
var encoded ResolvedScreenNameResponse
err = json.Unmarshal(body, &encoded)
if err != nil {
}
return encoded.Response
}
func getSourceInfo(feedId string) (string, string) {
var isGroup bool = strings.Contains(feedId, "-")
var groupUrl string = "https://api.vk.com/method/groups.getById?group_id="
var profileUrl string = "https://api.vk.com/method/users.get?user_ids="
var requestUrl string
if isGroup {
feedId = feedId[1:]
requestUrl = groupUrl + feedId
} else {
requestUrl = profileUrl + feedId
}
resp, err := http.Get(requestUrl)
if err != nil {
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
}
var encoded SourceInfoContainer
err = json.Unmarshal(body, &encoded)
if len(encoded.Response[0].Name) > 0 {
return encoded.Response[0].Name, encoded.Response[0].Screen_name
} else {
return encoded.Response[0].First_name + " " + encoded.Response[0].Last_name, feedId
}
}
func getPosts(feedUrl string) (string, error) {
var requestUrl string = "https://api.vk.com/method/wall.get?v=5.12&extended=1&owner_id=" + feedUrl
resp, err := http.Get(requestUrl)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
var encoded VKResponse
err = json.Unmarshal(body, &encoded)
if err != nil {
return "", err
}
name, screenName := getSourceInfo(feedUrl)
feed := &feeds.Feed{
Title: name,
Link: &feeds.Link{Href: "https://vk.com/" + screenName},
}
for _, elem := range encoded.Response.Items {
var description string = ""
var screenName, name string = "", ""
var attachments string = ""
attachmentList := processAttachments(elem.Attachments)
description += elem.Text
if len(elem.Copy_history) > 0 {
description += elem.Copy_history[0].Text
attachmentList = processAttachments(elem.Copy_history[0].Attachments)
name, screenName = getSourceInfo(strconv.Itoa(elem.Copy_history[0].Owner_id))
}
for _, attachment := range attachmentList.Items {
if attachment.Type == "photo" {
attachments += "<br/><img src='" + attachment.Url + "'/>"
} else if attachment.Type == "audio" {
attachments += "<br/><source src='" + attachment.Url + "' type='audio/mpeg; codecs='mp3' />"
}
}
item := &feeds.Item{
Author: &feeds.Author{Name: name, Email: "https://vk.com/" + screenName},
Title: strings.Split(elem.Text, ".")[0] + "...",
Link: &feeds.Link{Href: "http://vk.com/wall" + strconv.Itoa(elem.Owner_id) + "_" + strconv.Itoa(elem.Id)},
Description: description + attachments,
Created: time.Unix(int64(elem.Date), int64(0)),
}
/* for _, attachment := range attachmentList.Items {
if attachment.Type == "audio/mpeg" {
enclosure := &feeds.Enclosure{attachment.Url, attachment.Type}
item.AddEnclosure(enclosure)
}
}*/
feed.Add(item)
}
return feed.ToRss()
}
func getPostsByUrl(feedUrl string) (string, error) {
rp := regexp.MustCompile("vk.com/(\\w+)")
result := rp.FindAllStringSubmatch(feedUrl, -1)
screenName := resolveScreenName(result[0][1])
var resolvedFeedId string
if screenName.Type != "user" {
resolvedFeedId = "-" + strconv.Itoa(int(screenName.Object_id))
} else {
resolvedFeedId = strconv.Itoa(int(screenName.Object_id))
}
return getPosts(resolvedFeedId)
}
func (v VKFeed) GetFeed() (string, error) {
return getPostsByUrl(v.FeedUrl)
}
|
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"syscall"
"github.com/lxc/lxd/shared"
log "gopkg.in/inconshreveable/log15.v2"
)
/* Some interesting filesystems */
const (
filesystemSuperMagicTmpfs = 0x01021994
filesystemSuperMagicExt4 = 0xEF53
filesystemSuperMagicXfs = 0x58465342
filesystemSuperMagicNfs = 0x6969
)
/*
* filesystemDetect returns the filesystem on which
* the passed-in path sits
*/
func filesystemDetect(path string) (string, error) {
fs := syscall.Statfs_t{}
err := syscall.Statfs(path, &fs)
if err != nil {
return "", err
}
switch fs.Type {
case filesystemSuperMagicBtrfs:
return "btrfs", nil
case filesystemSuperMagicTmpfs:
return "tmpfs", nil
case filesystemSuperMagicExt4:
return "ext4", nil
case filesystemSuperMagicXfs:
return "xfs", nil
case filesystemSuperMagicNfs:
return "nfs", nil
default:
return string(fs.Type), nil
}
}
// storageRsyncCopy copies a directory using rsync (with the --devices option).
func storageRsyncCopy(source string, dest string) (string, error) {
if err := os.MkdirAll(dest, 0755); err != nil {
return "", err
}
rsyncVerbosity := "-q"
if *debug {
rsyncVerbosity = "-vi"
}
output, err := exec.Command(
"rsync",
"-a",
"--checksum", // TODO: Not sure we need this option
"-HAX",
"--devices",
"--delete",
rsyncVerbosity,
shared.AddSlash(source),
dest).CombinedOutput()
return string(output), err
}
func storageUnprivUserAclSet(c container, dpath string) error {
idmapset, err := c.IdmapSetGet()
if err != nil {
return err
}
if idmapset == nil {
return nil
}
uid, _ := idmapset.ShiftIntoNs(0, 0)
switch uid {
case -1:
shared.Debugf("storageUnprivUserAclSet: no root id mapping")
return nil
case 0:
return nil
}
acl := fmt.Sprintf("%d:rx", uid)
output, err := exec.Command("setfacl", "-m", acl, dpath).CombinedOutput()
if err != nil {
shared.Debugf("storageUnprivUserAclSet: setfacl failed:\n%s", output)
}
return err
}
// storageType defines the type of a storage
type storageType int
const (
storageTypeBtrfs storageType = iota
storageTypeLvm
storageTypeDir
)
func storageTypeToString(sType storageType) string {
switch sType {
case storageTypeBtrfs:
return "btrfs"
case storageTypeLvm:
return "lvm"
}
return "dir"
}
type storage interface {
Init(config map[string]interface{}) (storage, error)
GetStorageType() storageType
GetStorageTypeName() string
// ContainerCreate creates an empty container (no rootfs/metadata.yaml)
ContainerCreate(container container) error
// ContainerCreateFromImage creates a container from a image.
ContainerCreateFromImage(container container, imageFingerprint string) error
ContainerDelete(container container) error
ContainerCopy(container container, sourceContainer container) error
ContainerStart(container container) error
ContainerStop(container container) error
ContainerRename(container container, newName string) error
ContainerRestore(container container, sourceContainer container) error
ContainerSnapshotCreate(
snapshotContainer container, sourceContainer container) error
ContainerSnapshotDelete(snapshotContainer container) error
ContainerSnapshotRename(snapshotContainer container, newName string) error
ImageCreate(fingerprint string) error
ImageDelete(fingerprint string) error
}
func newStorage(d *Daemon, sType storageType) (storage, error) {
var nilmap map[string]interface{}
return newStorageWithConfig(d, sType, nilmap)
}
func newStorageWithConfig(d *Daemon, sType storageType, config map[string]interface{}) (storage, error) {
var s storage
switch sType {
case storageTypeBtrfs:
s = &storageLogWrapper{w: &storageBtrfs{d: d, sType: sType}}
case storageTypeLvm:
s = &storageLogWrapper{w: &storageLvm{d: d, sType: sType}}
default:
s = &storageLogWrapper{w: &storageDir{d: d, sType: sType}}
}
return s.Init(config)
}
func storageForFilename(d *Daemon, filename string) (storage, error) {
config := make(map[string]interface{})
storageType := storageTypeDir
lvLinkPath := filename + ".lv"
filesystem, err := filesystemDetect(filename)
if err != nil {
return nil, fmt.Errorf("couldn't detect filesystem for '%s': %v", filename, err)
}
if shared.PathExists(lvLinkPath) {
storageType = storageTypeLvm
lvPath, err := os.Readlink(lvLinkPath)
if err != nil {
return nil, fmt.Errorf("couldn't read link dest for '%s': %v", lvLinkPath, err)
}
vgname := filepath.Base(filepath.Dir(lvPath))
config["vgName"] = vgname
} else if filesystem == "btrfs" {
storageType = storageTypeBtrfs
}
return newStorageWithConfig(d, storageType, config)
}
func storageForImage(d *Daemon, imgInfo *shared.ImageBaseInfo) (storage, error) {
imageFilename := shared.VarPath("images", imgInfo.Fingerprint)
return storageForFilename(d, imageFilename)
}
type storageShared struct {
sTypeName string
log log.Logger
}
func (ss *storageShared) initShared() error {
ss.log = shared.Log.New(
log.Ctx{"driver": fmt.Sprintf("storage/%s", ss.sTypeName)},
)
return nil
}
func (ss *storageShared) GetStorageTypeName() string {
return ss.sTypeName
}
func (ss *storageShared) shiftRootfs(c container) error {
dpath := c.PathGet("")
rpath := c.RootfsPathGet()
shared.Log.Debug("shiftRootfs",
log.Ctx{"container": c.NameGet(), "rootfs": rpath})
idmapset, err := c.IdmapSetGet()
if err != nil {
return err
}
if idmapset == nil {
return fmt.Errorf("IdmapSet of container '%s' is nil", c.NameGet())
}
err = idmapset.ShiftRootfs(rpath)
if err != nil {
shared.Debugf("Shift of rootfs %s failed: %s\n", rpath, err)
return err
}
/* Set an acl so the container root can descend the container dir */
// TODO: i changed this so it calls ss.setUnprivUserAcl, which does
// the acl change only if the container is not privileged, think thats right.
return ss.setUnprivUserAcl(c, dpath)
}
func (ss *storageShared) setUnprivUserAcl(c container, destPath string) error {
if !c.IsPrivileged() {
err := storageUnprivUserAclSet(c, destPath)
if err != nil {
ss.log.Error(
"adding acl for container root: falling back to chmod",
log.Ctx{"destPath": destPath})
output, err := exec.Command(
"chmod", "+x", destPath).CombinedOutput()
if err != nil {
ss.log.Error(
"chmoding the container root",
log.Ctx{
"destPath": destPath,
"output": output})
return err
}
}
}
return nil
}
type storageLogWrapper struct {
w storage
log log.Logger
}
func (lw *storageLogWrapper) Init(config map[string]interface{}) (storage, error) {
_, err := lw.w.Init(config)
lw.log = shared.Log.New(
log.Ctx{"driver": fmt.Sprintf("storage/%s", lw.w.GetStorageTypeName())},
)
lw.log.Info("Init")
return lw, err
}
func (lw *storageLogWrapper) GetStorageType() storageType {
return lw.w.GetStorageType()
}
func (lw *storageLogWrapper) GetStorageTypeName() string {
return lw.w.GetStorageTypeName()
}
func (lw *storageLogWrapper) ContainerCreate(container container) error {
lw.log.Debug(
"ContainerCreate",
log.Ctx{
"name": container.NameGet(),
"isPrivileged": container.IsPrivileged()})
return lw.w.ContainerCreate(container)
}
func (lw *storageLogWrapper) ContainerCreateFromImage(
container container, imageFingerprint string) error {
lw.log.Debug(
"ContainerCreate",
log.Ctx{
"imageFingerprint": imageFingerprint,
"name": container.NameGet(),
"isPrivileged": container.IsPrivileged()})
return lw.w.ContainerCreateFromImage(container, imageFingerprint)
}
func (lw *storageLogWrapper) ContainerDelete(container container) error {
lw.log.Debug("ContainerDelete", log.Ctx{"container": container.NameGet()})
return lw.w.ContainerDelete(container)
}
func (lw *storageLogWrapper) ContainerCopy(
container container, sourceContainer container) error {
lw.log.Debug(
"ContainerCopy",
log.Ctx{
"container": container.NameGet(),
"source": sourceContainer.NameGet()})
return lw.w.ContainerCopy(container, sourceContainer)
}
func (lw *storageLogWrapper) ContainerStart(container container) error {
lw.log.Debug("ContainerStart", log.Ctx{"container": container.NameGet()})
return lw.w.ContainerStart(container)
}
func (lw *storageLogWrapper) ContainerStop(container container) error {
lw.log.Debug("ContainerStop", log.Ctx{"container": container.NameGet()})
return lw.w.ContainerStop(container)
}
func (lw *storageLogWrapper) ContainerRename(
container container, newName string) error {
lw.log.Debug(
"ContainerRename",
log.Ctx{
"container": container.NameGet(),
"newName": newName})
return lw.w.ContainerRename(container, newName)
}
func (lw *storageLogWrapper) ContainerRestore(
container container, sourceContainer container) error {
lw.log.Debug(
"ContainerRestore",
log.Ctx{
"container": container.NameGet(),
"source": sourceContainer.NameGet()})
return lw.w.ContainerRestore(container, sourceContainer)
}
func (lw *storageLogWrapper) ContainerSnapshotCreate(
snapshotContainer container, sourceContainer container) error {
lw.log.Debug("ContainerSnapshotCreate",
log.Ctx{
"snapshotContainer": snapshotContainer.NameGet(),
"sourceContainer": sourceContainer.NameGet()})
return lw.w.ContainerSnapshotCreate(snapshotContainer, sourceContainer)
}
func (lw *storageLogWrapper) ContainerSnapshotDelete(
snapshotContainer container) error {
lw.log.Debug("ContainerSnapshotDelete",
log.Ctx{"snapshotContainer": snapshotContainer.NameGet()})
return lw.w.ContainerSnapshotDelete(snapshotContainer)
}
func (lw *storageLogWrapper) ContainerSnapshotRename(
snapshotContainer container, newName string) error {
lw.log.Debug("ContainerSnapshotRename",
log.Ctx{
"snapshotContainer": snapshotContainer.NameGet(),
"newName": newName})
return lw.w.ContainerSnapshotRename(snapshotContainer, newName)
}
func (lw *storageLogWrapper) ImageCreate(fingerprint string) error {
lw.log.Debug(
"ImageCreate",
log.Ctx{"fingerprint": fingerprint})
return lw.w.ImageCreate(fingerprint)
}
func (lw *storageLogWrapper) ImageDelete(fingerprint string) error {
lw.log.Debug("ImageDelete", log.Ctx{"fingerprint": fingerprint})
return lw.w.ImageDelete(fingerprint)
}
Never create a new storage if the default is of the same type.
Signed-off-by: René Jochum <fad9a0a6f25df623a055091fe7e403534c7e9536@jochums.at>
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"syscall"
"github.com/lxc/lxd/shared"
log "gopkg.in/inconshreveable/log15.v2"
)
/* Some interesting filesystems */
const (
filesystemSuperMagicTmpfs = 0x01021994
filesystemSuperMagicExt4 = 0xEF53
filesystemSuperMagicXfs = 0x58465342
filesystemSuperMagicNfs = 0x6969
)
/*
* filesystemDetect returns the filesystem on which
* the passed-in path sits
*/
func filesystemDetect(path string) (string, error) {
fs := syscall.Statfs_t{}
err := syscall.Statfs(path, &fs)
if err != nil {
return "", err
}
switch fs.Type {
case filesystemSuperMagicBtrfs:
return "btrfs", nil
case filesystemSuperMagicTmpfs:
return "tmpfs", nil
case filesystemSuperMagicExt4:
return "ext4", nil
case filesystemSuperMagicXfs:
return "xfs", nil
case filesystemSuperMagicNfs:
return "nfs", nil
default:
return string(fs.Type), nil
}
}
// storageRsyncCopy copies a directory using rsync (with the --devices option).
func storageRsyncCopy(source string, dest string) (string, error) {
if err := os.MkdirAll(dest, 0755); err != nil {
return "", err
}
rsyncVerbosity := "-q"
if *debug {
rsyncVerbosity = "-vi"
}
output, err := exec.Command(
"rsync",
"-a",
"--checksum", // TODO: Not sure we need this option
"-HAX",
"--devices",
"--delete",
rsyncVerbosity,
shared.AddSlash(source),
dest).CombinedOutput()
return string(output), err
}
func storageUnprivUserAclSet(c container, dpath string) error {
idmapset, err := c.IdmapSetGet()
if err != nil {
return err
}
if idmapset == nil {
return nil
}
uid, _ := idmapset.ShiftIntoNs(0, 0)
switch uid {
case -1:
shared.Debugf("storageUnprivUserAclSet: no root id mapping")
return nil
case 0:
return nil
}
acl := fmt.Sprintf("%d:rx", uid)
output, err := exec.Command("setfacl", "-m", acl, dpath).CombinedOutput()
if err != nil {
shared.Debugf("storageUnprivUserAclSet: setfacl failed:\n%s", output)
}
return err
}
// storageType defines the type of a storage
type storageType int
const (
storageTypeBtrfs storageType = iota
storageTypeLvm
storageTypeDir
)
func storageTypeToString(sType storageType) string {
switch sType {
case storageTypeBtrfs:
return "btrfs"
case storageTypeLvm:
return "lvm"
}
return "dir"
}
type storage interface {
Init(config map[string]interface{}) (storage, error)
GetStorageType() storageType
GetStorageTypeName() string
// ContainerCreate creates an empty container (no rootfs/metadata.yaml)
ContainerCreate(container container) error
// ContainerCreateFromImage creates a container from a image.
ContainerCreateFromImage(container container, imageFingerprint string) error
ContainerDelete(container container) error
ContainerCopy(container container, sourceContainer container) error
ContainerStart(container container) error
ContainerStop(container container) error
ContainerRename(container container, newName string) error
ContainerRestore(container container, sourceContainer container) error
ContainerSnapshotCreate(
snapshotContainer container, sourceContainer container) error
ContainerSnapshotDelete(snapshotContainer container) error
ContainerSnapshotRename(snapshotContainer container, newName string) error
ImageCreate(fingerprint string) error
ImageDelete(fingerprint string) error
}
func newStorage(d *Daemon, sType storageType) (storage, error) {
var nilmap map[string]interface{}
return newStorageWithConfig(d, sType, nilmap)
}
func newStorageWithConfig(d *Daemon, sType storageType, config map[string]interface{}) (storage, error) {
var s storage
switch sType {
case storageTypeBtrfs:
if d.Storage != nil && d.Storage.GetStorageType() == storageTypeBtrfs {
return d.Storage, nil
}
s = &storageLogWrapper{w: &storageBtrfs{d: d, sType: sType}}
case storageTypeLvm:
if d.Storage != nil && d.Storage.GetStorageType() == storageTypeLvm {
return d.Storage, nil
}
s = &storageLogWrapper{w: &storageLvm{d: d, sType: sType}}
default:
if d.Storage != nil && d.Storage.GetStorageType() == storageTypeDir {
return d.Storage, nil
}
s = &storageLogWrapper{w: &storageDir{d: d, sType: sType}}
}
return s.Init(config)
}
func storageForFilename(d *Daemon, filename string) (storage, error) {
config := make(map[string]interface{})
storageType := storageTypeDir
lvLinkPath := filename + ".lv"
filesystem, err := filesystemDetect(filename)
if err != nil {
return nil, fmt.Errorf("couldn't detect filesystem for '%s': %v", filename, err)
}
if shared.PathExists(lvLinkPath) {
storageType = storageTypeLvm
lvPath, err := os.Readlink(lvLinkPath)
if err != nil {
return nil, fmt.Errorf("couldn't read link dest for '%s': %v", lvLinkPath, err)
}
vgname := filepath.Base(filepath.Dir(lvPath))
config["vgName"] = vgname
} else if filesystem == "btrfs" {
storageType = storageTypeBtrfs
}
return newStorageWithConfig(d, storageType, config)
}
func storageForImage(d *Daemon, imgInfo *shared.ImageBaseInfo) (storage, error) {
imageFilename := shared.VarPath("images", imgInfo.Fingerprint)
return storageForFilename(d, imageFilename)
}
type storageShared struct {
sTypeName string
log log.Logger
}
func (ss *storageShared) initShared() error {
ss.log = shared.Log.New(
log.Ctx{"driver": fmt.Sprintf("storage/%s", ss.sTypeName)},
)
return nil
}
func (ss *storageShared) GetStorageTypeName() string {
return ss.sTypeName
}
func (ss *storageShared) shiftRootfs(c container) error {
dpath := c.PathGet("")
rpath := c.RootfsPathGet()
shared.Log.Debug("shiftRootfs",
log.Ctx{"container": c.NameGet(), "rootfs": rpath})
idmapset, err := c.IdmapSetGet()
if err != nil {
return err
}
if idmapset == nil {
return fmt.Errorf("IdmapSet of container '%s' is nil", c.NameGet())
}
err = idmapset.ShiftRootfs(rpath)
if err != nil {
shared.Debugf("Shift of rootfs %s failed: %s\n", rpath, err)
return err
}
/* Set an acl so the container root can descend the container dir */
// TODO: i changed this so it calls ss.setUnprivUserAcl, which does
// the acl change only if the container is not privileged, think thats right.
return ss.setUnprivUserAcl(c, dpath)
}
func (ss *storageShared) setUnprivUserAcl(c container, destPath string) error {
if !c.IsPrivileged() {
err := storageUnprivUserAclSet(c, destPath)
if err != nil {
ss.log.Error(
"adding acl for container root: falling back to chmod",
log.Ctx{"destPath": destPath})
output, err := exec.Command(
"chmod", "+x", destPath).CombinedOutput()
if err != nil {
ss.log.Error(
"chmoding the container root",
log.Ctx{
"destPath": destPath,
"output": output})
return err
}
}
}
return nil
}
type storageLogWrapper struct {
w storage
log log.Logger
}
func (lw *storageLogWrapper) Init(config map[string]interface{}) (storage, error) {
_, err := lw.w.Init(config)
lw.log = shared.Log.New(
log.Ctx{"driver": fmt.Sprintf("storage/%s", lw.w.GetStorageTypeName())},
)
lw.log.Info("Init")
return lw, err
}
func (lw *storageLogWrapper) GetStorageType() storageType {
return lw.w.GetStorageType()
}
func (lw *storageLogWrapper) GetStorageTypeName() string {
return lw.w.GetStorageTypeName()
}
func (lw *storageLogWrapper) ContainerCreate(container container) error {
lw.log.Debug(
"ContainerCreate",
log.Ctx{
"name": container.NameGet(),
"isPrivileged": container.IsPrivileged()})
return lw.w.ContainerCreate(container)
}
func (lw *storageLogWrapper) ContainerCreateFromImage(
container container, imageFingerprint string) error {
lw.log.Debug(
"ContainerCreate",
log.Ctx{
"imageFingerprint": imageFingerprint,
"name": container.NameGet(),
"isPrivileged": container.IsPrivileged()})
return lw.w.ContainerCreateFromImage(container, imageFingerprint)
}
func (lw *storageLogWrapper) ContainerDelete(container container) error {
lw.log.Debug("ContainerDelete", log.Ctx{"container": container.NameGet()})
return lw.w.ContainerDelete(container)
}
func (lw *storageLogWrapper) ContainerCopy(
container container, sourceContainer container) error {
lw.log.Debug(
"ContainerCopy",
log.Ctx{
"container": container.NameGet(),
"source": sourceContainer.NameGet()})
return lw.w.ContainerCopy(container, sourceContainer)
}
func (lw *storageLogWrapper) ContainerStart(container container) error {
lw.log.Debug("ContainerStart", log.Ctx{"container": container.NameGet()})
return lw.w.ContainerStart(container)
}
func (lw *storageLogWrapper) ContainerStop(container container) error {
lw.log.Debug("ContainerStop", log.Ctx{"container": container.NameGet()})
return lw.w.ContainerStop(container)
}
func (lw *storageLogWrapper) ContainerRename(
container container, newName string) error {
lw.log.Debug(
"ContainerRename",
log.Ctx{
"container": container.NameGet(),
"newName": newName})
return lw.w.ContainerRename(container, newName)
}
func (lw *storageLogWrapper) ContainerRestore(
container container, sourceContainer container) error {
lw.log.Debug(
"ContainerRestore",
log.Ctx{
"container": container.NameGet(),
"source": sourceContainer.NameGet()})
return lw.w.ContainerRestore(container, sourceContainer)
}
func (lw *storageLogWrapper) ContainerSnapshotCreate(
snapshotContainer container, sourceContainer container) error {
lw.log.Debug("ContainerSnapshotCreate",
log.Ctx{
"snapshotContainer": snapshotContainer.NameGet(),
"sourceContainer": sourceContainer.NameGet()})
return lw.w.ContainerSnapshotCreate(snapshotContainer, sourceContainer)
}
func (lw *storageLogWrapper) ContainerSnapshotDelete(
snapshotContainer container) error {
lw.log.Debug("ContainerSnapshotDelete",
log.Ctx{"snapshotContainer": snapshotContainer.NameGet()})
return lw.w.ContainerSnapshotDelete(snapshotContainer)
}
func (lw *storageLogWrapper) ContainerSnapshotRename(
snapshotContainer container, newName string) error {
lw.log.Debug("ContainerSnapshotRename",
log.Ctx{
"snapshotContainer": snapshotContainer.NameGet(),
"newName": newName})
return lw.w.ContainerSnapshotRename(snapshotContainer, newName)
}
func (lw *storageLogWrapper) ImageCreate(fingerprint string) error {
lw.log.Debug(
"ImageCreate",
log.Ctx{"fingerprint": fingerprint})
return lw.w.ImageCreate(fingerprint)
}
func (lw *storageLogWrapper) ImageDelete(fingerprint string) error {
lw.log.Debug("ImageDelete", log.Ctx{"fingerprint": fingerprint})
return lw.w.ImageDelete(fingerprint)
}
|
package lxlog
import (
"fmt"
"github.com/Sirupsen/logrus"
"runtime"
"strings"
"io"
"net/http"
"os/exec"
"bufio"
)
var GlobalLogLevel Level = InfoLevel
const (
default_logger = "default_logger"
PanicLevel = Level("PanicLevel")
FatalLevel = Level("FatalLevel")
ErrorLevel = Level("ErrorLevel")
WarnLevel = Level("WarnLevel")
InfoLevel = Level("InfoLevel")
DebugLevel = Level("DebugLevel")
)
var logLevels = map[Level]logrus.Level{
PanicLevel: logrus.PanicLevel,
FatalLevel: logrus.FatalLevel,
ErrorLevel: logrus.ErrorLevel,
WarnLevel: logrus.WarnLevel,
InfoLevel: logrus.InfoLevel,
DebugLevel: logrus.DebugLevel,
}
type Level string
func (level Level) String() string {
return string(level)
}
type Fields logrus.Fields
type LxLogger struct {
loggers map[string]*logrus.Logger
fields Fields
err error
name string
}
func New(name string) *LxLogger {
loggers := make(map[string]*logrus.Logger)
loggers[default_logger] = logrus.New()
lxlogger := &LxLogger{
loggers: loggers,
name: name,
}
lxlogger.SetLogLevel(GlobalLogLevel)
return lxlogger
}
func (lxlog *LxLogger) WithFields(fields Fields) *LxLogger {
return &LxLogger{
loggers: lxlog.loggers,
fields: fields,
err: lxlog.err,
name: lxlog.name,
}
}
func (lxlog *LxLogger) WithErr(err error) *LxLogger {
return &LxLogger{
loggers: lxlog.loggers,
fields: lxlog.fields,
err: err,
name: lxlog.name,
}
}
func (lxlog *LxLogger) SetLogLevel(level Level) {
for _, logrusLogger := range lxlog.loggers {
logrusLogger.Level = logLevels[level]
}
}
func (lxlog *LxLogger) AddWriter(name string, level Level, w io.Writer) {
newLogger := logrus.New()
newLogger.Out = w
newLogger.Level = logLevels[level]
lxlog.loggers[name] = newLogger
}
func (lxlog *LxLogger) DeleteWriter(name string) {
delete(lxlog.loggers, name)
}
func (lxlog *LxLogger) LogCommand(cmd *exec.Cmd, asDebug bool) {
stdout, err := cmd.StdoutPipe()
if err != nil {
return
}
stderr, err := cmd.StderrPipe()
if err != nil {
return
}
go func() {
// read command's stdout line by line
in := bufio.NewScanner(stdout)
for in.Scan() {
if asDebug {
lxlog.Debugf(in.Text())
} else {
lxlog.Infof(in.Text())
}
}
}()
go func() {
// read command's stdout line by line
in := bufio.NewScanner(stderr)
for in.Scan() {
lxlog.Errorf(in.Text())
}
}()
}
func (lxlog *LxLogger) Infof(format string, a ...interface{}) {
lxlog.log(InfoLevel, format, a...)
}
func (lxlog *LxLogger) Debugf(format string, a ...interface{}) {
lxlog.log(DebugLevel, format, a...)
}
func (lxlog *LxLogger) Warnf(format string, a ...interface{}) {
lxlog.log(WarnLevel, format, a...)
}
func (lxlog *LxLogger) Errorf(format string, a ...interface{}) {
lxlog.log(ErrorLevel, format, a...)
}
func (lxlog *LxLogger) Fatalf(format string, a ...interface{}) {
lxlog.log(FatalLevel, format, a...)
}
func (lxlog *LxLogger) Panicf(format string, a ...interface{}) {
lxlog.log(PanicLevel, format, a...)
}
func (lxlog *LxLogger) log(level Level, format string, a ...interface{}) {
format = lxlog.addTrace(format)
for _, optionalLog := range lxlog.loggers {
entry := optionalLog.WithFields(logrus.Fields(lxlog.fields))
if lxlog.err != nil {
entry = entry.WithError(lxlog.err)
}
switch level {
case PanicLevel:
entry.Panicf(format, a...)
break
case FatalLevel:
entry.Fatalf(format, a...)
break
case ErrorLevel:
entry.Errorf(format, a...)
break
case WarnLevel:
entry.Warnf(format, a...)
break
case InfoLevel:
entry.Infof(format, a...)
break
case DebugLevel:
entry.Debugf(format, a...)
break
}
if flusher, ok := optionalLog.Out.(http.Flusher); ok {
flusher.Flush()
}
}
}
func (lxlog *LxLogger) addTrace(format string) string {
pc, fn, line, _ := runtime.Caller(3)
pathComponents := strings.Split(fn, "/")
var truncatedPath string
if len(pathComponents) > 3 {
truncatedPath = strings.Join(pathComponents[len(pathComponents) - 2:], "/")
} else {
truncatedPath = strings.Join(pathComponents, "/")
}
fnName := runtime.FuncForPC(pc).Name()
fnNameComponents := strings.Split(fnName, "/")
truncatedFnName := fnNameComponents[len(fnNameComponents) - 1]
file := fmt.Sprintf("(%s): %s[%s:%d] ", lxlog.name, truncatedFnName, truncatedPath, line)
return file + format
}
enable adding extra trace levels
Signed-off-by: Scott Weiss <c131bbdcbdcedaee666cc960f646505569f0b8f5@emc.com>
package lxlog
import (
"fmt"
"github.com/Sirupsen/logrus"
"runtime"
"strings"
"io"
"net/http"
"os/exec"
"bufio"
)
var GlobalLogLevel Level = InfoLevel
const (
default_logger = "default_logger"
default_trace = 3
PanicLevel = Level("PanicLevel")
FatalLevel = Level("FatalLevel")
ErrorLevel = Level("ErrorLevel")
WarnLevel = Level("WarnLevel")
InfoLevel = Level("InfoLevel")
DebugLevel = Level("DebugLevel")
)
var logLevels = map[Level]logrus.Level{
PanicLevel: logrus.PanicLevel,
FatalLevel: logrus.FatalLevel,
ErrorLevel: logrus.ErrorLevel,
WarnLevel: logrus.WarnLevel,
InfoLevel: logrus.InfoLevel,
DebugLevel: logrus.DebugLevel,
}
type Level string
func (level Level) String() string {
return string(level)
}
type Fields logrus.Fields
type LxLogger struct {
loggers map[string]*logrus.Logger
fields Fields
err error
name string
trace int
}
func New(name string) *LxLogger {
loggers := make(map[string]*logrus.Logger)
loggers[default_logger] = logrus.New()
lxlogger := &LxLogger{
loggers: loggers,
name: name,
trace: 0,
}
lxlogger.SetLogLevel(GlobalLogLevel)
return lxlogger
}
func (lxlog *LxLogger) WithFields(fields Fields) *LxLogger {
return &LxLogger{
loggers: lxlog.loggers,
fields: fields,
err: lxlog.err,
name: lxlog.name,
trace: lxlog.trace,
}
}
func (lxlog *LxLogger) WithErr(err error) *LxLogger {
return &LxLogger{
loggers: lxlog.loggers,
fields: lxlog.fields,
err: err,
name: lxlog.name,
trace: lxlog.trace,
}
}
func (lxlog *LxLogger) WithTrace(trace int) *LxLogger {
return &LxLogger{
loggers: lxlog.loggers,
fields: lxlog.fields,
err: lxlog.err,
name: lxlog.name,
trace: trace,
}
}
func (lxlog *LxLogger) SetLogLevel(level Level) {
for _, logrusLogger := range lxlog.loggers {
logrusLogger.Level = logLevels[level]
}
}
func (lxlog *LxLogger) AddWriter(name string, level Level, w io.Writer) {
newLogger := logrus.New()
newLogger.Out = w
newLogger.Level = logLevels[level]
lxlog.loggers[name] = newLogger
}
func (lxlog *LxLogger) DeleteWriter(name string) {
delete(lxlog.loggers, name)
}
func (lxlog *LxLogger) LogCommand(cmd *exec.Cmd, asDebug bool) {
stdout, err := cmd.StdoutPipe()
if err != nil {
return
}
stderr, err := cmd.StderrPipe()
if err != nil {
return
}
go func() {
// read command's stdout line by line
in := bufio.NewScanner(stdout)
for in.Scan() {
if asDebug {
lxlog.Debugf(in.Text())
} else {
lxlog.Infof(in.Text())
}
}
}()
go func() {
// read command's stdout line by line
in := bufio.NewScanner(stderr)
for in.Scan() {
lxlog.Errorf(in.Text())
}
}()
}
func (lxlog *LxLogger) Infof(format string, a ...interface{}) {
lxlog.log(InfoLevel, format, a...)
}
func (lxlog *LxLogger) Debugf(format string, a ...interface{}) {
lxlog.log(DebugLevel, format, a...)
}
func (lxlog *LxLogger) Warnf(format string, a ...interface{}) {
lxlog.log(WarnLevel, format, a...)
}
func (lxlog *LxLogger) Errorf(format string, a ...interface{}) {
lxlog.log(ErrorLevel, format, a...)
}
func (lxlog *LxLogger) Fatalf(format string, a ...interface{}) {
lxlog.log(FatalLevel, format, a...)
}
func (lxlog *LxLogger) Panicf(format string, a ...interface{}) {
lxlog.log(PanicLevel, format, a...)
}
func (lxlog *LxLogger) log(level Level, format string, a ...interface{}) {
format = lxlog.addTrace(format)
for _, optionalLog := range lxlog.loggers {
entry := optionalLog.WithFields(logrus.Fields(lxlog.fields))
if lxlog.err != nil {
entry = entry.WithError(lxlog.err)
}
switch level {
case PanicLevel:
entry.Panicf(format, a...)
break
case FatalLevel:
entry.Fatalf(format, a...)
break
case ErrorLevel:
entry.Errorf(format, a...)
break
case WarnLevel:
entry.Warnf(format, a...)
break
case InfoLevel:
entry.Infof(format, a...)
break
case DebugLevel:
entry.Debugf(format, a...)
break
}
if flusher, ok := optionalLog.Out.(http.Flusher); ok {
flusher.Flush()
}
}
}
func (lxlog *LxLogger) addTrace(format string) string {
pc, fn, line, _ := runtime.Caller(default_trace+lxlog.trace)
pathComponents := strings.Split(fn, "/")
var truncatedPath string
if len(pathComponents) > 3 {
truncatedPath = strings.Join(pathComponents[len(pathComponents) - 2:], "/")
} else {
truncatedPath = strings.Join(pathComponents, "/")
}
fnName := runtime.FuncForPC(pc).Name()
fnNameComponents := strings.Split(fnName, "/")
truncatedFnName := fnNameComponents[len(fnNameComponents) - 1]
file := fmt.Sprintf("(%s): %s[%s:%d] ", lxlog.name, truncatedFnName, truncatedPath, line)
return file + format
}
|
// Copyright 2014 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Spencer Kimball (spencer.kimball@gmail.com)
package storage
import (
"bytes"
"fmt"
"math"
"math/rand"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"golang.org/x/net/context"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/cockroachdb/cockroach/base"
"github.com/cockroachdb/cockroach/config"
"github.com/cockroachdb/cockroach/gossip"
"github.com/cockroachdb/cockroach/internal/client"
"github.com/cockroachdb/cockroach/keys"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/rpc"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/storage/engine/enginepb"
"github.com/cockroachdb/cockroach/storage/storagebase"
"github.com/cockroachdb/cockroach/testutils"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/caller"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/leaktest"
"github.com/cockroachdb/cockroach/util/log"
"github.com/cockroachdb/cockroach/util/metric"
"github.com/cockroachdb/cockroach/util/protoutil"
"github.com/cockroachdb/cockroach/util/stop"
"github.com/cockroachdb/cockroach/util/uuid"
)
func testRangeDescriptor() *roachpb.RangeDescriptor {
return &roachpb.RangeDescriptor{
RangeID: 1,
StartKey: roachpb.RKeyMin,
EndKey: roachpb.RKeyMax,
Replicas: []roachpb.ReplicaDescriptor{
{
ReplicaID: 1,
NodeID: 1,
StoreID: 1,
},
},
NextReplicaID: 2,
}
}
// boostrapMode controls how the first range is created in testContext.
type bootstrapMode int
const (
// Use Store.BootstrapRange, which writes the range descriptor and
// other metadata. Most tests should use this mode because it more
// closely resembles the real world.
bootstrapRangeWithMetadata bootstrapMode = iota
// Create a range with NewRange and Store.AddRangeTest. The store's data
// will be persisted but metadata will not.
//
// Tests which run in this mode play fast and loose; they want
// a Replica which doesn't have too many moving parts, but then
// may still exercise a sizable amount of code, be it by accident
// or design. We bootstrap them here with what's absolutely
// necessary to not immediately crash on a Raft command, but
// nothing more.
// If you read this and you're writing a new test, try not to
// use this mode - it's deprecated and tends to get in the way
// of new development.
bootstrapRangeOnly
)
// leaseExpiry returns a duration in nanos after which any range lease the
// Replica may hold is expired. It is more precise than LeaseExpiration
// in that it returns the minimal duration necessary.
func leaseExpiry(rng *Replica) int64 {
if l, _ := rng.getLease(); l != nil {
return l.Expiration.WallTime + 1
}
return 0
}
// testContext contains all the objects necessary to test a Range.
// In most cases, simply call Start(t) (and later Stop()) on a zero-initialized
// testContext{}. Any fields which are initialized to non-nil values
// will be used as-is.
type testContext struct {
testing.TB
transport *RaftTransport
store *Store
rng *Replica
rangeID roachpb.RangeID
gossip *gossip.Gossip
engine engine.Engine
manualClock *hlc.ManualClock
clock *hlc.Clock
stopper *stop.Stopper
bootstrapMode bootstrapMode
}
// Start initializes the test context with a single range covering the
// entire keyspace.
func (tc *testContext) Start(t testing.TB) {
ctx := TestStoreContext()
tc.StartWithStoreContext(t, ctx)
}
// StartWithStoreContext initializes the test context with a single
// range covering the entire keyspace.
func (tc *testContext) StartWithStoreContext(t testing.TB, ctx StoreContext) {
tc.TB = t
if tc.stopper == nil {
tc.stopper = stop.NewStopper()
}
// Setup fake zone config handler.
config.TestingSetupZoneConfigHook(tc.stopper)
if tc.gossip == nil {
rpcContext := rpc.NewContext(&base.Context{Insecure: true}, nil, tc.stopper)
server := rpc.NewServer(rpcContext) // never started
tc.gossip = gossip.New(rpcContext, server, nil, tc.stopper, metric.NewRegistry())
tc.gossip.SetNodeID(1)
}
if tc.manualClock == nil {
tc.manualClock = hlc.NewManualClock(0)
}
if tc.clock == nil {
tc.clock = hlc.NewClock(tc.manualClock.UnixNano)
}
if tc.engine == nil {
tc.engine = engine.NewInMem(roachpb.Attributes{Attrs: []string{"dc1", "mem"}}, 1<<20, tc.stopper)
}
if tc.transport == nil {
tc.transport = NewDummyRaftTransport()
}
if tc.store == nil {
ctx.Clock = tc.clock
ctx.Gossip = tc.gossip
ctx.Transport = tc.transport
// Create a test sender without setting a store. This is to deal with the
// circular dependency between the test sender and the store. The actual
// store will be passed to the sender after it is created and bootstrapped.
sender := &testSender{}
ctx.DB = client.NewDB(sender)
tc.store = NewStore(ctx, tc.engine, &roachpb.NodeDescriptor{NodeID: 1})
if err := tc.store.Bootstrap(roachpb.StoreIdent{
ClusterID: uuid.MakeV4(),
NodeID: 1,
StoreID: 1,
}, tc.stopper); err != nil {
t.Fatal(err)
}
// Now that we have our actual store, monkey patch the sender used in ctx.DB.
sender.store = tc.store
// We created the store without a real KV client, so it can't perform splits.
tc.store.splitQueue.SetDisabled(true)
if tc.rng == nil && tc.bootstrapMode == bootstrapRangeWithMetadata {
if err := tc.store.BootstrapRange(nil); err != nil {
t.Fatal(err)
}
}
if err := tc.store.Start(tc.stopper); err != nil {
t.Fatal(err)
}
tc.store.WaitForInit()
}
realRange := tc.rng == nil
if realRange {
if tc.bootstrapMode == bootstrapRangeOnly {
testDesc := testRangeDescriptor()
if _, err := writeInitialState(
context.Background(),
tc.store.Engine(),
enginepb.MVCCStats{},
*testDesc,
); err != nil {
t.Fatal(err)
}
rng, err := NewReplica(testDesc, tc.store, 0)
if err != nil {
t.Fatal(err)
}
if err := tc.store.AddReplicaTest(rng); err != nil {
t.Fatal(err)
}
}
var err error
tc.rng, err = tc.store.GetReplica(1)
if err != nil {
t.Fatal(err)
}
tc.rangeID = tc.rng.RangeID
}
if err := tc.initConfigs(realRange, t); err != nil {
t.Fatal(err)
}
}
func (tc *testContext) Sender() client.Sender {
return client.Wrap(tc.rng, func(ba roachpb.BatchRequest) roachpb.BatchRequest {
if ba.RangeID != 0 {
ba.RangeID = 1
}
if ba.Timestamp == hlc.ZeroTimestamp {
if err := ba.SetActiveTimestamp(tc.clock.Now); err != nil {
tc.Fatal(err)
}
}
return ba
})
}
// SendWrappedWith is a convenience function which wraps the request in a batch
// and sends it
func (tc *testContext) SendWrappedWith(h roachpb.Header, args roachpb.Request) (roachpb.Response, *roachpb.Error) {
return client.SendWrappedWith(tc.Sender(), context.Background(), h, args)
}
// SendWrapped is identical to SendWrappedWith with a zero header.
func (tc *testContext) SendWrapped(args roachpb.Request) (roachpb.Response, *roachpb.Error) {
return tc.SendWrappedWith(roachpb.Header{}, args)
}
func (tc *testContext) Stop() {
tc.stopper.Stop()
}
// initConfigs creates default configuration entries.
func (tc *testContext) initConfigs(realRange bool, t testing.TB) error {
// Put an empty system config into gossip so that gossip callbacks get
// run. We're using a fake config, but it's hooked into SystemConfig.
if err := tc.gossip.AddInfoProto(gossip.KeySystemConfig,
&config.SystemConfig{}, 0); err != nil {
return err
}
util.SucceedsSoon(t, func() error {
if _, ok := tc.gossip.GetSystemConfig(); !ok {
return errors.Errorf("expected system config to be set")
}
return nil
})
return nil
}
func newTransaction(name string, baseKey roachpb.Key, userPriority roachpb.UserPriority,
isolation enginepb.IsolationType, clock *hlc.Clock) *roachpb.Transaction {
var offset int64
var now hlc.Timestamp
if clock != nil {
offset = clock.MaxOffset().Nanoseconds()
now = clock.Now()
}
return roachpb.NewTransaction(name, baseKey, userPriority, isolation, now, offset)
}
// createReplicaSets creates new roachpb.ReplicaDescriptor protos based on an array of
// StoreIDs to aid in testing. Note that this does not actually produce any
// replicas, it just creates the descriptors.
func createReplicaSets(replicaNumbers []roachpb.StoreID) []roachpb.ReplicaDescriptor {
result := []roachpb.ReplicaDescriptor{}
for _, replicaNumber := range replicaNumbers {
result = append(result, roachpb.ReplicaDescriptor{
StoreID: replicaNumber,
})
}
return result
}
// TestIsOnePhaseCommit verifies the circumstances where a
// transactional batch can be committed as an atomic write.
func TestIsOnePhaseCommit(t *testing.T) {
defer leaktest.AfterTest(t)()
txnReqs := []roachpb.RequestUnion{
{BeginTransaction: &roachpb.BeginTransactionRequest{}},
{Put: &roachpb.PutRequest{}},
{EndTransaction: &roachpb.EndTransactionRequest{}},
}
testCases := []struct {
bu []roachpb.RequestUnion
isTxn bool
isWTO bool
isTSOff bool
exp1PC bool
}{
{[]roachpb.RequestUnion{}, false, false, false, false},
{[]roachpb.RequestUnion{}, true, false, false, false},
{[]roachpb.RequestUnion{{Get: &roachpb.GetRequest{}}}, true, false, false, false},
{[]roachpb.RequestUnion{{Put: &roachpb.PutRequest{}}}, true, false, false, false},
{txnReqs[0 : len(txnReqs)-1], true, false, false, false},
{txnReqs[1:], true, false, false, false},
{txnReqs, true, false, false, true},
{txnReqs, true, true, false, false},
{txnReqs, true, false, true, false},
{txnReqs, true, true, true, false},
}
clock := hlc.NewClock(hlc.UnixNano)
for i, c := range testCases {
ba := roachpb.BatchRequest{Requests: c.bu}
if c.isTxn {
ba.Txn = newTransaction("txn", roachpb.Key("a"), 1, enginepb.SNAPSHOT, clock)
if c.isWTO {
ba.Txn.WriteTooOld = true
}
ba.Txn.Timestamp = ba.Txn.OrigTimestamp.Add(1, 0)
if c.isTSOff {
ba.Txn.Isolation = enginepb.SERIALIZABLE
}
}
if is1PC := isOnePhaseCommit(ba); is1PC != c.exp1PC {
t.Errorf("%d: expected 1pc=%t; got %t", i, c.exp1PC, is1PC)
}
}
}
// TestReplicaContains verifies that the range uses Key.Address() in
// order to properly resolve addresses for local keys.
func TestReplicaContains(t *testing.T) {
defer leaktest.AfterTest(t)()
desc := &roachpb.RangeDescriptor{
RangeID: 1,
StartKey: roachpb.RKey("a"),
EndKey: roachpb.RKey("b"),
}
// This test really only needs a hollow shell of a Replica.
r := &Replica{}
r.mu.state.Desc = desc
r.rangeDesc.Store(desc)
if statsKey := keys.RangeStatsKey(desc.RangeID); !r.ContainsKey(statsKey) {
t.Errorf("expected range to contain range stats key %q", statsKey)
}
if !r.ContainsKey(roachpb.Key("aa")) {
t.Errorf("expected range to contain key \"aa\"")
}
if !r.ContainsKey(keys.RangeDescriptorKey([]byte("aa"))) {
t.Errorf("expected range to contain range descriptor key for \"aa\"")
}
if !r.ContainsKeyRange(roachpb.Key("aa"), roachpb.Key("b")) {
t.Errorf("expected range to contain key range \"aa\"-\"b\"")
}
if !r.ContainsKeyRange(keys.RangeDescriptorKey([]byte("aa")),
keys.RangeDescriptorKey([]byte("b"))) {
t.Errorf("expected range to contain key transaction range \"aa\"-\"b\"")
}
}
func sendLeaseRequest(r *Replica, l *roachpb.Lease) error {
ba := roachpb.BatchRequest{}
ba.Timestamp = r.store.Clock().Now()
ba.Add(&roachpb.RequestLeaseRequest{Lease: *l})
ch, _, err := r.proposeRaftCommand(context.Background(), ba)
if err == nil {
// Next if the command was committed, wait for the range to apply it.
// TODO(bdarnell): refactor this to a more conventional error-handling pattern.
err = (<-ch).Err.GoError()
}
return err
}
// TestReplicaReadConsistency verifies behavior of the range under
// different read consistencies. Note that this unittest plays
// fast and loose with granting range leases.
func TestReplicaReadConsistency(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Modify range descriptor to include a second replica; range lease can
// only be obtained by Replicas which are part of the range descriptor. This
// workaround is sufficient for the purpose of this test.
secondReplica := roachpb.ReplicaDescriptor{
NodeID: 2,
StoreID: 2,
ReplicaID: 2,
}
rngDesc := tc.rng.Desc()
rngDesc.Replicas = append(rngDesc.Replicas, secondReplica)
tc.rng.setDescWithoutProcessUpdate(rngDesc)
gArgs := getArgs(roachpb.Key("a"))
// Try consistent read and verify success.
if _, err := tc.SendWrapped(&gArgs); err != nil {
t.Errorf("expected success on consistent read: %s", err)
}
// Try a consensus read and verify error.
if _, err := tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.CONSENSUS,
}, &gArgs); err == nil {
t.Errorf("expected error on consensus read")
}
// Try an inconsistent read within a transaction.
txn := newTransaction("test", roachpb.Key("a"), 1, enginepb.SERIALIZABLE, tc.clock)
if _, err := tc.SendWrappedWith(roachpb.Header{
Txn: txn,
ReadConsistency: roachpb.INCONSISTENT,
}, &gArgs); err == nil {
t.Errorf("expected error on inconsistent read within a txn")
}
// Lose the lease and verify CONSISTENT reads receive NotLeaseHolderError
// and INCONSISTENT reads work as expected.
start := hlc.ZeroTimestamp.Add(leaseExpiry(tc.rng), 0)
tc.manualClock.Set(start.WallTime)
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: start,
StartStasis: start.Add(10, 0),
Expiration: start.Add(10, 0),
Replica: roachpb.ReplicaDescriptor{ // a different node
ReplicaID: 2,
NodeID: 2,
StoreID: 2,
},
}); err != nil {
t.Fatal(err)
}
// Send without Txn.
_, pErr := tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.CONSISTENT,
}, &gArgs)
if _, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); !ok {
t.Errorf("expected not lease holder error; got %s", pErr)
}
if _, pErr := tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &gArgs); pErr != nil {
t.Errorf("expected success reading with inconsistent: %s", pErr)
}
}
// TestApplyCmdLeaseError verifies that when during application of a Raft
// command the proposing node no longer holds the range lease, an error is
// returned. This prevents regression of #1483.
func TestApplyCmdLeaseError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Modify range descriptor to include a second replica; range lease can
// only be obtained by Replicas which are part of the range descriptor. This
// workaround is sufficient for the purpose of this test.
secondReplica := roachpb.ReplicaDescriptor{
NodeID: 2,
StoreID: 2,
ReplicaID: 2,
}
rngDesc := tc.rng.Desc()
rngDesc.Replicas = append(rngDesc.Replicas, secondReplica)
tc.rng.setDescWithoutProcessUpdate(rngDesc)
pArgs := putArgs(roachpb.Key("a"), []byte("asd"))
// Lose the lease.
start := hlc.ZeroTimestamp.Add(leaseExpiry(tc.rng), 0)
tc.manualClock.Set(start.WallTime)
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: start,
StartStasis: start.Add(10, 0),
Expiration: start.Add(10, 0),
Replica: roachpb.ReplicaDescriptor{ // a different node
ReplicaID: 2,
NodeID: 2,
StoreID: 2,
},
}); err != nil {
t.Fatal(err)
}
_, pErr := tc.SendWrappedWith(roachpb.Header{
Timestamp: tc.clock.Now().Add(-100, 0),
}, &pArgs)
if _, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); !ok {
t.Fatalf("expected not lease holder error in return, got %v", pErr)
}
}
func TestReplicaRangeBoundsChecking(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.RKey("a")
firstRng := tc.store.LookupReplica(key, nil)
newRng := splitTestRange(tc.store, key, key, t)
if pErr := newRng.redirectOnOrAcquireLease(context.Background()); pErr != nil {
t.Fatal(pErr)
}
gArgs := getArgs(roachpb.Key("b"))
_, pErr := tc.SendWrapped(&gArgs)
if mismatchErr, ok := pErr.GetDetail().(*roachpb.RangeKeyMismatchError); !ok {
t.Errorf("expected range key mismatch error: %s", pErr)
} else {
if mismatchedDesc := mismatchErr.MismatchedRange; mismatchedDesc == nil || mismatchedDesc.RangeID != firstRng.RangeID {
t.Errorf("expected mismatched range to be %d, found %v", firstRng.RangeID, mismatchedDesc)
}
if suggestedDesc := mismatchErr.SuggestedRange; suggestedDesc == nil || suggestedDesc.RangeID != newRng.RangeID {
t.Errorf("expected suggested range to be %d, found %v", newRng.RangeID, suggestedDesc)
}
}
}
// hasLease returns whether the most recent range lease was held by the given
// range replica and whether it's expired for the given timestamp.
func hasLease(rng *Replica, timestamp hlc.Timestamp) (owned bool, expired bool) {
l, _ := rng.getLease()
return l.OwnedBy(rng.store.StoreID()), !l.Covers(timestamp)
}
func TestReplicaLease(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Modify range descriptor to include a second replica; leader lease can
// only be obtained by Replicas which are part of the range descriptor. This
// workaround is sufficient for the purpose of this test.
secondReplica := roachpb.ReplicaDescriptor{
NodeID: 2,
StoreID: 2,
ReplicaID: 2,
}
rngDesc := tc.rng.Desc()
rngDesc.Replicas = append(rngDesc.Replicas, secondReplica)
tc.rng.setDescWithoutProcessUpdate(rngDesc)
// Test that leases with invalid times are rejected.
// Start leases at a point that avoids overlapping with the existing lease.
one := hlc.ZeroTimestamp.Add(time.Second.Nanoseconds(), 0)
for _, lease := range []roachpb.Lease{
{Start: one, StartStasis: one},
{Start: one, StartStasis: one.Next(), Expiration: one},
} {
if _, _, err := tc.rng.RequestLease(context.Background(), tc.store.Engine(), nil,
roachpb.Header{}, roachpb.RequestLeaseRequest{
Lease: lease,
}); !testutils.IsError(err, "illegal lease interval") {
t.Fatalf("unexpected error: %v", err)
}
}
if held, _ := hasLease(tc.rng, tc.clock.Now()); !held {
t.Errorf("expected lease on range start")
}
tc.manualClock.Set(leaseExpiry(tc.rng))
now := tc.clock.Now()
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: now.Add(10, 0),
StartStasis: now.Add(20, 0),
Expiration: now.Add(20, 0),
Replica: secondReplica,
}); err != nil {
t.Fatal(err)
}
if held, expired := hasLease(tc.rng, tc.clock.Now().Add(15, 0)); held || expired {
t.Errorf("expected second replica to have range lease")
}
{
pErr := tc.rng.redirectOnOrAcquireLease(context.Background())
if lErr, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); !ok || lErr == nil {
t.Fatalf("wanted NotLeaseHolderError, got %s", pErr)
}
}
// Advance clock past expiration and verify that another has
// range lease will not be true.
tc.manualClock.Increment(21) // 21ns have passed
if held, expired := hasLease(tc.rng, tc.clock.Now()); held || !expired {
t.Errorf("expected another replica to have expired lease")
}
// Verify that command returns NotLeaseHolderError when lease is rejected.
rng, err := NewReplica(testRangeDescriptor(), tc.store, 0)
if err != nil {
t.Fatal(err)
}
rng.mu.Lock()
rng.mu.proposeRaftCommandFn = func(*pendingCmd) error {
return &roachpb.LeaseRejectedError{
Message: "replica not found",
}
}
rng.mu.Unlock()
{
if _, ok := rng.redirectOnOrAcquireLease(context.Background()).GetDetail().(*roachpb.NotLeaseHolderError); !ok {
t.Fatalf("expected %T, got %s", &roachpb.NotLeaseHolderError{}, err)
}
}
}
// TestReplicaNotLeaseHolderError verifies NotLeaderError when lease is rejected.
func TestReplicaNotLeaseHolderError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Modify range descriptor to include a second replica; range lease can
// only be obtained by Replicas which are part of the range descriptor. This
// workaround is sufficient for the purpose of this test.
secondReplica := roachpb.ReplicaDescriptor{
NodeID: 2,
StoreID: 2,
ReplicaID: 2,
}
rngDesc := tc.rng.Desc()
rngDesc.Replicas = append(rngDesc.Replicas, secondReplica)
tc.rng.setDescWithoutProcessUpdate(rngDesc)
tc.manualClock.Set(leaseExpiry(tc.rng))
now := tc.clock.Now()
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: now,
StartStasis: now.Add(10, 0),
Expiration: now.Add(10, 0),
Replica: roachpb.ReplicaDescriptor{
ReplicaID: 2,
NodeID: 2,
StoreID: 2,
},
}); err != nil {
t.Fatal(err)
}
header := roachpb.Span{
Key: roachpb.Key("a"),
}
testCases := []roachpb.Request{
// Admin split covers admin commands.
&roachpb.AdminSplitRequest{
Span: header,
SplitKey: roachpb.Key("a"),
},
// Get covers read-only commands.
&roachpb.GetRequest{
Span: header,
},
// Put covers read-write commands.
&roachpb.PutRequest{
Span: header,
Value: roachpb.MakeValueFromString("value"),
},
}
for i, test := range testCases {
_, pErr := tc.SendWrappedWith(roachpb.Header{Timestamp: now}, test)
if _, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); !ok {
t.Errorf("%d: expected not lease holder error: %s", i, pErr)
}
}
}
// TestReplicaLeaseCounters verifies leaseRequest metrics counters are updated
// correctly after a lease request.
func TestReplicaLeaseCounters(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
assert := func(actual, min, max int64) {
if actual < min || actual > max {
t.Fatal(errors.Errorf(
"metrics counters actual=%d, expected=[%d,%d]", actual, min, max))
}
}
metrics := tc.rng.store.metrics
assert(metrics.LeaseRequestSuccessCount.Count(), 1, 1000)
assert(metrics.LeaseRequestErrorCount.Count(), 0, 0)
now := tc.clock.Now()
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: now,
StartStasis: now.Add(10, 0),
Expiration: now.Add(10, 0),
Replica: roachpb.ReplicaDescriptor{
ReplicaID: 1,
NodeID: 1,
StoreID: 1,
},
}); err != nil {
t.Fatal(err)
}
assert(metrics.LeaseRequestSuccessCount.Count(), 2, 1000)
assert(metrics.LeaseRequestErrorCount.Count(), 0, 0)
// Make lease request fail by providing an invalid ReplicaDescriptor.
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: now,
StartStasis: now.Add(10, 0),
Expiration: now.Add(10, 0),
Replica: roachpb.ReplicaDescriptor{
ReplicaID: 2,
NodeID: 99,
StoreID: 99,
},
}); err == nil {
t.Fatal("lease request did not fail on invalid ReplicaDescriptor")
}
assert(metrics.LeaseRequestSuccessCount.Count(), 2, 1000)
assert(metrics.LeaseRequestErrorCount.Count(), 1, 1000)
}
// TestReplicaGossipConfigsOnLease verifies that config info is gossiped
// upon acquisition of the range lease.
func TestReplicaGossipConfigsOnLease(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Modify range descriptor to include a second replica; range lease can
// only be obtained by Replicas which are part of the range descriptor. This
// workaround is sufficient for the purpose of this test.
secondReplica := roachpb.ReplicaDescriptor{
NodeID: 2,
StoreID: 2,
ReplicaID: 2,
}
rngDesc := tc.rng.Desc()
rngDesc.Replicas = append(rngDesc.Replicas, secondReplica)
tc.rng.setDescWithoutProcessUpdate(rngDesc)
// Write some arbitrary data in the system config span.
key := keys.MakeTablePrefix(keys.MaxSystemConfigDescID)
var val roachpb.Value
val.SetInt(42)
if err := engine.MVCCPut(context.Background(), tc.engine, nil, key, hlc.MinTimestamp, val, nil); err != nil {
t.Fatal(err)
}
// If this actually failed, we would have gossiped from MVCCPutProto.
// Unlikely, but why not check.
if cfg, ok := tc.gossip.GetSystemConfig(); ok {
if nv := len(cfg.Values); nv == 1 && cfg.Values[nv-1].Key.Equal(key) {
t.Errorf("unexpected gossip of system config: %s", cfg)
}
}
// Expire our own lease which we automagically acquired due to being
// first range and config holder.
tc.manualClock.Set(leaseExpiry(tc.rng))
now := tc.clock.Now()
// Give lease to someone else.
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: now,
StartStasis: now.Add(10, 0),
Expiration: now.Add(10, 0),
Replica: roachpb.ReplicaDescriptor{
ReplicaID: 2,
NodeID: 2,
StoreID: 2,
},
}); err != nil {
t.Fatal(err)
}
// Expire that lease.
tc.manualClock.Increment(11 + int64(tc.clock.MaxOffset())) // advance time
now = tc.clock.Now()
// Give lease to this range.
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: now.Add(11, 0),
StartStasis: now.Add(20, 0),
Expiration: now.Add(20, 0),
Replica: roachpb.ReplicaDescriptor{
ReplicaID: 1,
NodeID: 1,
StoreID: 1,
},
}); err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
cfg, ok := tc.gossip.GetSystemConfig()
if !ok {
return errors.Errorf("expected system config to be set")
}
numValues := len(cfg.Values)
if numValues != 1 {
return errors.Errorf("num config values != 1; got %d", numValues)
}
if k := cfg.Values[numValues-1].Key; !k.Equal(key) {
return errors.Errorf("invalid key for config value (%q != %q)", k, key)
}
return nil
})
}
// TestReplicaTSCacheLowWaterOnLease verifies that the low water mark
// is set on the timestamp cache when the node is granted the lease holder
// lease after not holding it and it is not set when the node is
// granted the range lease when it was the last holder.
func TestReplicaTSCacheLowWaterOnLease(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
tc.clock.SetMaxOffset(maxClockOffset)
// Disable raft log truncation which confuses this test.
tc.store.SetRaftLogQueueActive(false)
// Modify range descriptor to include a second replica; range lease can
// only be obtained by Replicas which are part of the range descriptor. This
// workaround is sufficient for the purpose of this test.
secondReplica := roachpb.ReplicaDescriptor{
NodeID: 2,
StoreID: 2,
ReplicaID: 2,
}
rngDesc := tc.rng.Desc()
rngDesc.Replicas = append(rngDesc.Replicas, secondReplica)
tc.rng.setDescWithoutProcessUpdate(rngDesc)
tc.manualClock.Set(leaseExpiry(tc.rng))
now := hlc.Timestamp{WallTime: tc.manualClock.UnixNano()}
tc.rng.mu.Lock()
baseRTS, _ := tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("a"), nil /* end */, nil /* txn */)
tc.rng.mu.Unlock()
baseLowWater := baseRTS.WallTime
newLowWater := now.Add(50, 0).WallTime + baseLowWater
testCases := []struct {
storeID roachpb.StoreID
start hlc.Timestamp
expiration hlc.Timestamp
expLowWater int64
expErr string
}{
// Grant the lease fresh.
{storeID: tc.store.StoreID(),
start: now, expiration: now.Add(10, 0),
expLowWater: baseLowWater},
// Renew the lease.
{storeID: tc.store.StoreID(),
start: now.Add(15, 0), expiration: now.Add(30, 0),
expLowWater: baseLowWater},
// Renew the lease but shorten expiration. This errors out.
{storeID: tc.store.StoreID(),
start: now.Add(16, 0), expiration: now.Add(25, 0),
expErr: "lease shortening currently unsupported",
},
// Another Store attempts to get the lease, but overlaps. If the
// previous lease expiration had worked, this would have too.
{storeID: tc.store.StoreID() + 1,
start: now.Add(29, 0), expiration: now.Add(50, 0),
expLowWater: baseLowWater,
expErr: "overlaps previous",
},
// The other store tries again, this time without the overlap.
{storeID: tc.store.StoreID() + 1,
start: now.Add(31, 0), expiration: now.Add(50, 0),
expLowWater: baseLowWater},
// Lease is regranted to this replica. Store clock moves forward avoid
// influencing the result.
{storeID: tc.store.StoreID(),
start: now.Add(60, 0), expiration: now.Add(70, 0),
expLowWater: newLowWater},
// Lease is held by another once more.
{storeID: tc.store.StoreID() + 1,
start: now.Add(70, 0), expiration: now.Add(90, 0),
expLowWater: newLowWater},
}
for i, test := range testCases {
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: test.start,
StartStasis: test.expiration.Add(-1, 0), // smaller than durations used
Expiration: test.expiration,
Replica: roachpb.ReplicaDescriptor{
ReplicaID: roachpb.ReplicaID(test.storeID),
NodeID: roachpb.NodeID(test.storeID),
StoreID: test.storeID,
},
}); err != nil {
if test.expErr == "" || !testutils.IsError(err, test.expErr) {
t.Fatalf("%d: unexpected error %s", i, err)
}
}
// Verify expected low water mark.
tc.rng.mu.Lock()
rTS, rOK := tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("a"), nil, nil)
wTS, wOK := tc.rng.mu.tsCache.GetMaxWrite(roachpb.Key("a"), nil, nil)
tc.rng.mu.Unlock()
if rTS.WallTime != test.expLowWater || wTS.WallTime != test.expLowWater || rOK || wOK {
t.Errorf("%d: expected low water %d; got %d, %d; rOK=%t, wOK=%t", i, test.expLowWater, rTS.WallTime, wTS.WallTime, rOK, wOK)
}
}
}
// TestReplicaLeaseRejectUnknownRaftNodeID ensures that a replica cannot
// obtain the range lease if it is not part of the current range descriptor.
// TODO(mrtracy): This should probably be tested in client_raft_test package,
// using a real second store.
func TestReplicaLeaseRejectUnknownRaftNodeID(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
tc.manualClock.Set(leaseExpiry(tc.rng))
now := tc.clock.Now()
lease := &roachpb.Lease{
Start: now,
StartStasis: now.Add(10, 0),
Expiration: now.Add(10, 0),
Replica: roachpb.ReplicaDescriptor{
ReplicaID: 2,
NodeID: 2,
StoreID: 2,
},
}
ba := roachpb.BatchRequest{}
ba.Timestamp = tc.rng.store.Clock().Now()
ba.Add(&roachpb.RequestLeaseRequest{Lease: *lease})
ch, _, err := tc.rng.proposeRaftCommand(context.Background(), ba)
if err == nil {
// Next if the command was committed, wait for the range to apply it.
// TODO(bdarnell): refactor to a more conventional error-handling pattern.
// Remove ambiguity about where the "replica not found" error comes from.
err = (<-ch).Err.GoError()
}
if !testutils.IsError(err, "replica not found") {
t.Errorf("unexpected error obtaining lease for invalid store: %v", err)
}
}
// TestReplicaDrainLease makes sure that no new leases are granted when
// the Store is in DrainLeases mode.
func TestReplicaDrainLease(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Acquire initial lease.
if pErr := tc.rng.redirectOnOrAcquireLease(context.Background()); pErr != nil {
t.Fatal(pErr)
}
var slept atomic.Value
slept.Store(false)
if err := tc.stopper.RunAsyncTask(func() {
// Wait just a bit so that the main thread can check that
// DrainLeases blocks (false negatives are possible, but 10ms is
// plenty to make this fail 99.999% of the time in practice).
time.Sleep(10 * time.Millisecond)
slept.Store(true)
// Expire the lease (and any others that may race in before we drain).
for {
tc.manualClock.Increment(leaseExpiry(tc.rng))
select {
case <-time.After(10 * time.Millisecond): // real code would use Ticker
case <-tc.stopper.ShouldQuiesce():
return
}
}
}); err != nil {
t.Fatal(err)
}
if err := tc.store.DrainLeases(true); err != nil {
t.Fatal(err)
}
if !slept.Load().(bool) {
t.Fatal("DrainLeases returned with active lease")
}
tc.rng.mu.Lock()
pErr := <-tc.rng.requestLeaseLocked(tc.clock.Now())
tc.rng.mu.Unlock()
_, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError)
if !ok {
t.Fatalf("expected NotLeaseHolderError, not %v", pErr)
}
if err := tc.store.DrainLeases(false); err != nil {
t.Fatal(err)
}
// Newly unfrozen, leases work again.
if pErr := tc.rng.redirectOnOrAcquireLease(context.Background()); pErr != nil {
t.Fatal(pErr)
}
}
// TestReplicaGossipFirstRange verifies that the first range gossips its
// location and the cluster ID.
func TestReplicaGossipFirstRange(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
for _, key := range []string{gossip.KeyClusterID, gossip.KeyFirstRangeDescriptor, gossip.KeySentinel} {
bytes, err := tc.gossip.GetInfo(key)
if err != nil {
t.Errorf("missing first range gossip of key %s", key)
}
if key == gossip.KeyFirstRangeDescriptor {
var rangeDesc roachpb.RangeDescriptor
if err := proto.Unmarshal(bytes, &rangeDesc); err != nil {
t.Fatal(err)
}
}
if key == gossip.KeyClusterID && len(bytes) == 0 {
t.Errorf("expected non-empty gossiped cluster ID, got %q", bytes)
}
if key == gossip.KeySentinel && len(bytes) == 0 {
t.Errorf("expected non-empty gossiped sentinel, got %q", bytes)
}
}
}
// TestReplicaGossipAllConfigs verifies that all config types are gossiped.
func TestReplicaGossipAllConfigs(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
if _, ok := tc.gossip.GetSystemConfig(); !ok {
t.Fatal("config not set")
}
}
func maybeWrapWithBeginTransaction(sender client.Sender, ctx context.Context, header roachpb.Header, req roachpb.Request) (roachpb.Response, *roachpb.Error) {
if header.Txn == nil || header.Txn.Writing {
return client.SendWrappedWith(sender, ctx, header, req)
}
if ctx == nil {
ctx = context.Background()
}
var ba roachpb.BatchRequest
bt, _ := beginTxnArgs(req.Header().Key, header.Txn)
ba.Header = header
ba.Add(&bt)
ba.Add(req)
br, pErr := sender.Send(ctx, ba)
if pErr != nil {
return nil, pErr
}
unwrappedReply := br.Responses[1].GetInner()
unwrappedHeader := unwrappedReply.Header()
unwrappedHeader.Txn = br.Txn
unwrappedReply.SetHeader(unwrappedHeader)
return unwrappedReply, nil
}
// TestReplicaNoGossipConfig verifies that certain commands (e.g.,
// reads, writes in uncommitted transactions) do not trigger gossip.
func TestReplicaNoGossipConfig(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Write some arbitrary data in the system span (up to, but not including MaxReservedID+1)
key := keys.MakeTablePrefix(keys.MaxReservedDescID)
txn := newTransaction("test", key, 1 /* userPriority */, enginepb.SERIALIZABLE, tc.clock)
h := roachpb.Header{Txn: txn}
req1 := putArgs(key, []byte("foo"))
req2, _ := endTxnArgs(txn, true /* commit */)
req2.IntentSpans = []roachpb.Span{{Key: key}}
req3 := getArgs(key)
testCases := []struct {
req roachpb.Request
h roachpb.Header
}{
{&req1, h},
{&req2, h},
{&req3, roachpb.Header{}},
}
for i, test := range testCases {
txn.Sequence++
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), test.h, test.req); pErr != nil {
t.Fatal(pErr)
}
txn.Writing = true
// System config is not gossiped.
cfg, ok := tc.gossip.GetSystemConfig()
if !ok {
t.Fatal("config not set")
}
if len(cfg.Values) != 0 {
t.Errorf("System config was gossiped at #%d", i)
}
}
}
// TestReplicaNoGossipFromNonLeader verifies that a non-lease holder replica
// does not gossip configurations.
func TestReplicaNoGossipFromNonLeader(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Write some arbitrary data in the system span (up to, but not including MaxReservedID+1)
key := keys.MakeTablePrefix(keys.MaxReservedDescID)
txn := newTransaction("test", key, 1 /* userPriority */, enginepb.SERIALIZABLE, tc.clock)
req1 := putArgs(key, nil)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), nil, roachpb.Header{
Txn: txn,
}, &req1); pErr != nil {
t.Fatal(pErr)
}
txn.Writing = true
txn.Sequence++
req2, h := endTxnArgs(txn, true /* commit */)
req2.IntentSpans = []roachpb.Span{{Key: key}}
txn.Sequence++
if _, pErr := tc.SendWrappedWith(h, &req2); pErr != nil {
t.Fatal(pErr)
}
// Execute a get to resolve the intent.
req3 := getArgs(key)
if _, pErr := tc.SendWrappedWith(roachpb.Header{Timestamp: txn.Timestamp}, &req3); pErr != nil {
t.Fatal(pErr)
}
// Increment the clock's timestamp to expire the range lease.
tc.manualClock.Set(leaseExpiry(tc.rng))
if lease, _ := tc.rng.getLease(); lease.Covers(tc.clock.Now()) {
t.Fatal("range lease should have been expired")
}
// Make sure the information for db1 is not gossiped. Since obtaining
// a lease updates the gossiped information, we do that.
if pErr := tc.rng.redirectOnOrAcquireLease(context.Background()); pErr != nil {
t.Fatal(pErr)
}
// Fetch the raw gossip info. GetSystemConfig is based on callbacks at
// modification time. But we're checking for _not_ gossiped, so there should
// be no callbacks. Easier to check the raw info.
var cfg config.SystemConfig
err := tc.gossip.GetInfoProto(gossip.KeySystemConfig, &cfg)
if err != nil {
t.Fatal(err)
}
if len(cfg.Values) != 0 {
t.Fatalf("non-lease holder gossiped the system config")
}
}
func getArgs(key []byte) roachpb.GetRequest {
return roachpb.GetRequest{
Span: roachpb.Span{
Key: key,
},
}
}
func putArgs(key roachpb.Key, value []byte) roachpb.PutRequest {
return roachpb.PutRequest{
Span: roachpb.Span{
Key: key,
},
Value: roachpb.MakeValueFromBytes(value),
}
}
func cPutArgs(key roachpb.Key, value, expValue []byte) roachpb.ConditionalPutRequest {
expV := roachpb.MakeValueFromBytes(expValue)
return roachpb.ConditionalPutRequest{
Span: roachpb.Span{
Key: key,
},
Value: roachpb.MakeValueFromBytes(value),
ExpValue: &expV,
}
}
func deleteArgs(key roachpb.Key) roachpb.DeleteRequest {
return roachpb.DeleteRequest{
Span: roachpb.Span{
Key: key,
},
}
}
// readOrWriteArgs returns either get or put arguments depending on
// value of "read". Get for true; Put for false.
func readOrWriteArgs(key roachpb.Key, read bool) roachpb.Request {
if read {
gArgs := getArgs(key)
return &gArgs
}
pArgs := putArgs(key, []byte("value"))
return &pArgs
}
func incrementArgs(key []byte, inc int64) roachpb.IncrementRequest {
return roachpb.IncrementRequest{
Span: roachpb.Span{
Key: key,
},
Increment: inc,
}
}
func scanArgs(start, end []byte) roachpb.ScanRequest {
return roachpb.ScanRequest{
Span: roachpb.Span{
Key: start,
EndKey: end,
},
}
}
func beginTxnArgs(key []byte, txn *roachpb.Transaction) (_ roachpb.BeginTransactionRequest, h roachpb.Header) {
h.Txn = txn
return roachpb.BeginTransactionRequest{
Span: roachpb.Span{
Key: txn.Key,
},
}, h
}
func endTxnArgs(txn *roachpb.Transaction, commit bool) (_ roachpb.EndTransactionRequest, h roachpb.Header) {
h.Txn = txn
return roachpb.EndTransactionRequest{
Span: roachpb.Span{
Key: txn.Key, // not allowed when going through TxnCoordSender, but we're not
},
Commit: commit,
}, h
}
func pushTxnArgs(pusher, pushee *roachpb.Transaction, pushType roachpb.PushTxnType) roachpb.PushTxnRequest {
return roachpb.PushTxnRequest{
Span: roachpb.Span{
Key: pushee.Key,
},
Now: pusher.Timestamp,
PushTo: pusher.Timestamp,
PusherTxn: *pusher,
PusheeTxn: pushee.TxnMeta,
PushType: pushType,
}
}
func heartbeatArgs(txn *roachpb.Transaction) (_ roachpb.HeartbeatTxnRequest, h roachpb.Header) {
h.Txn = txn
return roachpb.HeartbeatTxnRequest{
Span: roachpb.Span{
Key: txn.Key,
},
}, h
}
func internalMergeArgs(key []byte, value roachpb.Value) roachpb.MergeRequest {
return roachpb.MergeRequest{
Span: roachpb.Span{
Key: key,
},
Value: value,
}
}
func truncateLogArgs(index uint64, rangeID roachpb.RangeID) roachpb.TruncateLogRequest {
return roachpb.TruncateLogRequest{
Index: index,
RangeID: rangeID,
}
}
func gcKey(key roachpb.Key, timestamp hlc.Timestamp) roachpb.GCRequest_GCKey {
return roachpb.GCRequest_GCKey{
Key: key,
Timestamp: timestamp,
}
}
func gcArgs(startKey []byte, endKey []byte, keys ...roachpb.GCRequest_GCKey) roachpb.GCRequest {
return roachpb.GCRequest{
Span: roachpb.Span{
Key: startKey,
EndKey: endKey,
},
Keys: keys,
}
}
// TestOptimizePuts verifies that contiguous runs of puts and
// conditional puts are marked as "blind" if they're written
// to a virgin keyspace.
func TestOptimizePuts(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
pArgs := make([]roachpb.PutRequest, optimizePutThreshold)
cpArgs := make([]roachpb.ConditionalPutRequest, optimizePutThreshold)
for i := 0; i < optimizePutThreshold; i++ {
pArgs[i] = putArgs([]byte(fmt.Sprintf("%02d", i)), []byte("1"))
cpArgs[i] = cPutArgs([]byte(fmt.Sprintf("%02d", i)), []byte("1"), []byte("0"))
}
incArgs := incrementArgs([]byte("inc"), 1)
testCases := []struct {
exKey roachpb.Key
reqs []roachpb.Request
expBlind []bool
}{
// No existing keys, single put.
{
nil,
[]roachpb.Request{
&pArgs[0],
},
[]bool{
false,
},
},
// No existing keys, nine puts.
{
nil,
[]roachpb.Request{
&pArgs[0], &pArgs[1], &pArgs[2], &pArgs[3], &pArgs[4], &pArgs[5], &pArgs[6], &pArgs[7], &pArgs[8],
},
[]bool{
false, false, false, false, false, false, false, false, false,
},
},
// No existing keys, ten puts.
{
nil,
[]roachpb.Request{
&pArgs[0], &pArgs[1], &pArgs[2], &pArgs[3], &pArgs[4], &pArgs[5], &pArgs[6], &pArgs[7], &pArgs[8], &pArgs[9],
},
[]bool{
true, true, true, true, true, true, true, true, true, true,
},
},
// Existing key at "0", ten conditional puts.
{
roachpb.Key("0"),
[]roachpb.Request{
&cpArgs[0], &cpArgs[1], &cpArgs[2], &cpArgs[3], &cpArgs[4], &cpArgs[5], &cpArgs[6], &cpArgs[7], &cpArgs[8], &cpArgs[9],
},
[]bool{
true, true, true, true, true, true, true, true, true, true,
},
},
// Existing key at 11, mixed puts and conditional puts.
{
roachpb.Key("11"),
[]roachpb.Request{
&pArgs[0], &cpArgs[1], &pArgs[2], &cpArgs[3], &pArgs[4], &cpArgs[5], &pArgs[6], &cpArgs[7], &pArgs[8], &cpArgs[9],
},
[]bool{
true, true, true, true, true, true, true, true, true, true,
},
},
// Existing key at 00, ten puts, expect nothing blind.
{
roachpb.Key("00"),
[]roachpb.Request{
&pArgs[0], &pArgs[1], &pArgs[2], &pArgs[3], &pArgs[4], &pArgs[5], &pArgs[6], &pArgs[7], &pArgs[8], &pArgs[9],
},
[]bool{
false, false, false, false, false, false, false, false, false, false,
},
},
// Existing key at 00, ten puts in reverse order, expect nothing blind.
{
roachpb.Key("00"),
[]roachpb.Request{
&pArgs[9], &pArgs[8], &pArgs[7], &pArgs[6], &pArgs[5], &pArgs[4], &pArgs[3], &pArgs[2], &pArgs[1], &pArgs[0],
},
[]bool{
false, false, false, false, false, false, false, false, false, false,
},
},
// Existing key at 05, ten puts, expect first five puts are blind.
{
roachpb.Key("05"),
[]roachpb.Request{
&pArgs[0], &pArgs[1], &pArgs[2], &pArgs[3], &pArgs[4], &pArgs[5], &pArgs[6], &pArgs[7], &pArgs[8], &pArgs[9],
},
[]bool{
true, true, true, true, true, false, false, false, false, false,
},
},
// No existing key, ten puts + inc + ten cputs.
{
nil,
[]roachpb.Request{
&pArgs[0], &pArgs[1], &pArgs[2], &pArgs[3], &pArgs[4], &pArgs[5], &pArgs[6], &pArgs[7], &pArgs[8], &pArgs[9],
&incArgs, &cpArgs[0], &cpArgs[1], &cpArgs[2], &cpArgs[3], &cpArgs[4], &cpArgs[5], &cpArgs[6], &cpArgs[7], &cpArgs[8], &cpArgs[9],
},
[]bool{
true, true, true, true, true, true, true, true, true, true,
false, false, false, false, false, false, false, false, false, false, false,
},
},
// Duplicate put at 11th key; should see ten puts.
{
nil,
[]roachpb.Request{
&pArgs[0], &pArgs[1], &pArgs[2], &pArgs[3], &pArgs[4], &pArgs[5], &pArgs[6], &pArgs[7], &pArgs[8], &pArgs[9], &pArgs[9],
},
[]bool{
true, true, true, true, true, true, true, true, true, true, false,
},
},
// Duplicate cput at 11th key; should see ten puts.
{
nil,
[]roachpb.Request{
&pArgs[0], &pArgs[1], &pArgs[2], &pArgs[3], &pArgs[4], &pArgs[5], &pArgs[6], &pArgs[7], &pArgs[8], &pArgs[9], &cpArgs[9],
},
[]bool{
true, true, true, true, true, true, true, true, true, true, false,
},
},
// Duplicate cput at 6th key; should see ten cputs.
{
nil,
[]roachpb.Request{
&cpArgs[0], &cpArgs[1], &cpArgs[2], &cpArgs[3], &cpArgs[4], &cpArgs[5], &cpArgs[6], &cpArgs[7], &cpArgs[8], &cpArgs[9], &cpArgs[9],
},
[]bool{
true, true, true, true, true, true, true, true, true, true, false,
},
},
}
for i, c := range testCases {
if c.exKey != nil {
if err := engine.MVCCPut(context.Background(), tc.engine, nil, c.exKey,
hlc.ZeroTimestamp, roachpb.MakeValueFromString("foo"), nil); err != nil {
t.Fatal(err)
}
}
batch := roachpb.BatchRequest{}
for _, r := range c.reqs {
batch.Add(r)
}
optimizePuts(tc.engine, batch.Requests, false)
blind := []bool{}
for _, r := range batch.Requests {
switch t := r.GetInner().(type) {
case *roachpb.PutRequest:
blind = append(blind, t.Blind)
t.Blind = false
case *roachpb.ConditionalPutRequest:
blind = append(blind, t.Blind)
t.Blind = false
default:
blind = append(blind, false)
}
}
if !reflect.DeepEqual(blind, c.expBlind) {
t.Errorf("%d: expected %+v; got %+v", i, c.expBlind, blind)
}
if c.exKey != nil {
if err := tc.engine.Clear(engine.MakeMVCCMetadataKey(c.exKey)); err != nil {
t.Fatal(err)
}
}
}
}
// TestAcquireLease verifies that the range lease is acquired
// for read and write methods, and eagerly renewed.
func TestAcquireLease(t *testing.T) {
defer leaktest.AfterTest(t)()
gArgs := getArgs([]byte("a"))
pArgs := putArgs([]byte("b"), []byte("1"))
testCases := []roachpb.Request{&gArgs, &pArgs}
for i, test := range testCases {
tc := testContext{}
tc.Start(t)
// This is a single-replica test; since we're automatically pushing back
// the start of a lease as far as possible, and since there is an auto-
// matic lease for us at the beginning, we'll basically create a lease from
// then on.
lease, _ := tc.rng.getLease()
expStart := lease.Start
tc.manualClock.Set(leaseExpiry(tc.rng))
ts := tc.clock.Now().Next()
if _, pErr := tc.SendWrappedWith(roachpb.Header{Timestamp: ts}, test); pErr != nil {
t.Error(pErr)
}
if held, expired := hasLease(tc.rng, ts); !held || expired {
t.Errorf("%d: expected lease acquisition", i)
}
lease, _ = tc.rng.getLease()
if !lease.Start.Equal(expStart) {
t.Errorf("%d: unexpected lease start: %s; expected %s", i, lease.Start, expStart)
}
if !ts.Less(lease.StartStasis) {
t.Errorf("%d: %s already in stasis (or beyond): %+v", i, ts, lease)
}
shouldRenewTS := lease.StartStasis.Add(-1, 0)
tc.manualClock.Set(shouldRenewTS.WallTime + 1)
if _, pErr := tc.SendWrapped(test); pErr != nil {
t.Error(pErr)
}
// Since the command we sent above does not get blocked on the lease
// extension, we need to wait for it to go through.
util.SucceedsSoon(t, func() error {
newLease, _ := tc.rng.getLease()
if !lease.StartStasis.Less(newLease.StartStasis) {
return errors.Errorf("%d: lease did not get extended: %+v to %+v", i, lease, newLease)
}
return nil
})
tc.Stop()
if t.Failed() {
return
}
}
}
func TestLeaseConcurrent(t *testing.T) {
defer leaktest.AfterTest(t)()
const num = 5
// The test was written to test this individual block below. The worry
// was that this would NPE (it does not; proto.Clone is unusual in that it
// returns the input value instead).
{
if protoutil.Clone((*roachpb.Error)(nil)).(*roachpb.Error) != nil {
t.Fatal("could not clone nil *Error")
}
}
// Testing concurrent range lease requests is still a good idea. We check
// that they work and clone *Error, which prevents regression of #6111.
const origMsg = "boom"
for _, withError := range []bool{false, true} {
func(withError bool) {
tc := testContext{}
tc.Start(t)
defer tc.Stop()
var wg sync.WaitGroup
wg.Add(num)
var active atomic.Value
active.Store(false)
var seen int32
tc.rng.mu.Lock()
tc.rng.mu.proposeRaftCommandFn = func(cmd *pendingCmd) error {
ll, ok := cmd.raftCmd.Cmd.Requests[0].
GetInner().(*roachpb.RequestLeaseRequest)
if !ok || !active.Load().(bool) {
return defaultProposeRaftCommandLocked(tc.rng, cmd)
}
if c := atomic.AddInt32(&seen, 1); c > 1 {
// Morally speaking, this is an error, but reproposals can
// happen and so we warn (in case this trips the test up
// in more unexpected ways).
log.Infof(context.Background(), "reproposal of %+v", ll)
}
go func() {
wg.Wait()
if withError {
cmd.done <- roachpb.ResponseWithError{
Err: roachpb.NewErrorf(origMsg),
}
return
}
tc.rng.mu.Lock()
defer tc.rng.mu.Unlock()
if err := defaultProposeRaftCommandLocked(tc.rng, cmd); err != nil {
panic(err) // unlikely, so punt on proper handling
}
}()
return nil
}
tc.rng.mu.Unlock()
active.Store(true)
tc.manualClock.Increment(leaseExpiry(tc.rng))
ts := tc.clock.Now()
pErrCh := make(chan *roachpb.Error, num)
for i := 0; i < num; i++ {
if err := tc.stopper.RunAsyncTask(func() {
tc.rng.mu.Lock()
leaseCh := tc.rng.requestLeaseLocked(ts)
tc.rng.mu.Unlock()
wg.Done()
pErr := <-leaseCh
// Mutate the errors as we receive them to expose races.
if pErr != nil {
pErr.OriginNode = 0
}
pErrCh <- pErr
}); err != nil {
t.Fatal(err)
}
}
pErrs := make([]*roachpb.Error, num)
for i := range pErrs {
// Make sure all of the responses are in (just so that we can
// mess with the "original" error knowing that all of the
// cloning must have happened by now).
pErrs[i] = <-pErrCh
}
newMsg := "moob"
for i, pErr := range pErrs {
if withError != (pErr != nil) {
t.Errorf("%d: wanted error: %t, got error %v", i, withError, pErr)
}
if testutils.IsPError(pErr, newMsg) {
t.Errorf("%d: errors shared memory: %v", i, pErr)
} else if testutils.IsPError(pErr, origMsg) {
// Mess with anyone holding the same reference.
pErr.Message = newMsg
} else if pErr != nil {
t.Errorf("%d: unexpected error: %s", i, pErr)
}
}
}(withError)
}
}
// TestReplicaUpdateTSCache verifies that reads and writes update the
// timestamp cache.
func TestReplicaUpdateTSCache(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Set clock to time 1s and do the read.
t0 := 1 * time.Second
tc.manualClock.Set(t0.Nanoseconds())
gArgs := getArgs([]byte("a"))
ts := tc.clock.Now()
_, pErr := tc.SendWrappedWith(roachpb.Header{Timestamp: ts}, &gArgs)
if pErr != nil {
t.Error(pErr)
}
// Set clock to time 2s for write.
t1 := 2 * time.Second
key := roachpb.Key([]byte("b"))
tc.manualClock.Set(t1.Nanoseconds())
drArgs := roachpb.NewDeleteRange(key, key.Next(), false)
ts = tc.clock.Now()
_, pErr = tc.SendWrappedWith(roachpb.Header{Timestamp: ts}, drArgs)
if pErr != nil {
t.Error(pErr)
}
// Verify the timestamp cache has rTS=1s and wTS=0s for "a".
tc.rng.mu.Lock()
defer tc.rng.mu.Unlock()
_, rOK := tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("a"), nil, nil)
_, wOK := tc.rng.mu.tsCache.GetMaxWrite(roachpb.Key("a"), nil, nil)
if rOK || wOK {
t.Errorf("expected rOK=false and wOK=false; rOK=%t, wOK=%t", rOK, wOK)
}
tc.rng.mu.tsCache.ExpandRequests(hlc.ZeroTimestamp)
rTS, rOK := tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("a"), nil, nil)
wTS, wOK := tc.rng.mu.tsCache.GetMaxWrite(roachpb.Key("a"), nil, nil)
if rTS.WallTime != t0.Nanoseconds() || wTS.WallTime != 0 || !rOK || wOK {
t.Errorf("expected rTS=1s and wTS=0s, but got %s, %s; rOK=%t, wOK=%t", rTS, wTS, rOK, wOK)
}
// Verify the timestamp cache has rTS=0s and wTS=2s for "b".
rTS, rOK = tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("b"), nil, nil)
wTS, wOK = tc.rng.mu.tsCache.GetMaxWrite(roachpb.Key("b"), nil, nil)
if rTS.WallTime != 0 || wTS.WallTime != t1.Nanoseconds() || rOK || !wOK {
t.Errorf("expected rTS=0s and wTS=2s, but got %s, %s; rOK=%t, wOK=%t", rTS, wTS, rOK, wOK)
}
// Verify another key ("c") has 0sec in timestamp cache.
rTS, rOK = tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("c"), nil, nil)
wTS, wOK = tc.rng.mu.tsCache.GetMaxWrite(roachpb.Key("c"), nil, nil)
if rTS.WallTime != 0 || wTS.WallTime != 0 || rOK || wOK {
t.Errorf("expected rTS=0s and wTS=0s, but got %s %s; rOK=%t, wOK=%t", rTS, wTS, rOK, wOK)
}
}
// TestReplicaCommandQueue verifies that reads/writes must wait for
// pending commands to complete through Raft before being executed on
// range.
func TestReplicaCommandQueue(t *testing.T) {
defer leaktest.AfterTest(t)()
// Intercept commands with matching command IDs and block them.
blockingStart := make(chan struct{})
blockingDone := make(chan struct{})
tc := testContext{}
tsc := TestStoreContext()
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if filterArgs.Hdr.UserPriority == 42 {
blockingStart <- struct{}{}
<-blockingDone
}
return nil
}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
defer close(blockingDone) // make sure teardown can happen
// Test all four combinations of reads & writes waiting.
testCases := []struct {
cmd1Read, cmd2Read bool
expWait bool
}{
// Read/read doesn't wait.
{true, true, false},
// All other combinations must wait.
{true, false, true},
{false, true, true},
{false, false, true},
}
tooLong := 5 * time.Second
for i, test := range testCases {
key1 := roachpb.Key(fmt.Sprintf("key1-%d", i))
key2 := roachpb.Key(fmt.Sprintf("key2-%d", i))
// Asynchronously put a value to the rng with blocking enabled.
cmd1Done := make(chan struct{})
if err := tc.stopper.RunAsyncTask(func() {
args := readOrWriteArgs(key1, test.cmd1Read)
_, pErr := tc.SendWrappedWith(roachpb.Header{
UserPriority: 42,
}, args)
if pErr != nil {
t.Fatalf("test %d: %s", i, pErr)
}
close(cmd1Done)
}); err != nil {
t.Fatal(err)
}
// Wait for cmd1 to get into the command queue.
<-blockingStart
// First, try a command for same key as cmd1 to verify it blocks.
cmd2Done := make(chan struct{})
if err := tc.stopper.RunAsyncTask(func() {
args := readOrWriteArgs(key1, test.cmd2Read)
_, pErr := tc.SendWrapped(args)
if pErr != nil {
t.Fatalf("test %d: %s", i, pErr)
}
close(cmd2Done)
}); err != nil {
t.Fatal(err)
}
// Next, try read for a non-impacted key--should go through immediately.
cmd3Done := make(chan struct{})
if err := tc.stopper.RunAsyncTask(func() {
args := readOrWriteArgs(key2, true)
_, pErr := tc.SendWrapped(args)
if pErr != nil {
t.Fatalf("test %d: %s", i, pErr)
}
close(cmd3Done)
}); err != nil {
t.Fatal(err)
}
if test.expWait {
// Verify cmd3 finishes but not cmd2.
select {
case <-cmd2Done:
t.Fatalf("test %d: should not have been able to execute cmd2", i)
case <-cmd3Done:
// success.
case <-cmd1Done:
t.Fatalf("test %d: should not have been able execute cmd1 while blocked", i)
case <-time.After(tooLong):
t.Fatalf("test %d: waited %s for cmd3 of key2", i, tooLong)
}
} else {
select {
case <-cmd2Done:
// success.
case <-cmd1Done:
t.Fatalf("test %d: should not have been able to execute cmd1 while blocked", i)
case <-time.After(tooLong):
t.Fatalf("test %d: waited %s for cmd2 of key1", i, tooLong)
}
<-cmd3Done
}
blockingDone <- struct{}{}
select {
case <-cmd2Done:
// success.
case <-time.After(tooLong):
t.Fatalf("test %d: waited %s for cmd2 of key1", i, tooLong)
}
}
}
// TestReplicaCommandQueueInconsistent verifies that inconsistent reads need
// not wait for pending commands to complete through Raft.
func TestReplicaCommandQueueInconsistent(t *testing.T) {
defer leaktest.AfterTest(t)()
key := roachpb.Key("key1")
blockingStart := make(chan struct{}, 1)
blockingDone := make(chan struct{})
tc := testContext{}
tsc := TestStoreContext()
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if put, ok := filterArgs.Req.(*roachpb.PutRequest); ok {
putBytes, err := put.Value.GetBytes()
if err != nil {
return roachpb.NewErrorWithTxn(err, filterArgs.Hdr.Txn)
}
if bytes.Equal(put.Key, key) && bytes.Equal(putBytes, []byte{1}) {
// Absence of replay protection can mean that we end up here
// more often than we expect, hence the select (#3669).
select {
case blockingStart <- struct{}{}:
default:
}
<-blockingDone
}
}
return nil
}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
cmd1Done := make(chan struct{})
go func() {
args := putArgs(key, []byte{1})
_, pErr := tc.SendWrapped(&args)
if pErr != nil {
t.Fatal(pErr)
}
close(cmd1Done)
}()
// Wait for cmd1 to get into the command queue.
<-blockingStart
// An inconsistent read to the key won't wait.
cmd2Done := make(chan struct{})
go func() {
args := getArgs(key)
_, pErr := tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &args)
if pErr != nil {
t.Fatal(pErr)
}
close(cmd2Done)
}()
select {
case <-cmd2Done:
// success.
case <-cmd1Done:
t.Fatalf("cmd1 should have been blocked")
}
close(blockingDone)
<-cmd1Done
// Success.
}
func SendWrapped(sender client.Sender, ctx context.Context, header roachpb.Header, args roachpb.Request) (roachpb.Response, roachpb.BatchResponse_Header, *roachpb.Error) {
var ba roachpb.BatchRequest
ba.Add(args)
ba.Header = header
br, pErr := sender.Send(ctx, ba)
if pErr != nil {
return nil, roachpb.BatchResponse_Header{}, pErr
}
return br.Responses[0].GetInner(), br.BatchResponse_Header, pErr
}
// TestReplicaUseTSCache verifies that write timestamps are upgraded
// based on the read timestamp cache.
func TestReplicaUseTSCache(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Set clock to time 1s and do the read.
t0 := 1 * time.Second
tc.manualClock.Set(t0.Nanoseconds())
args := getArgs([]byte("a"))
_, pErr := tc.SendWrapped(&args)
if pErr != nil {
t.Error(pErr)
}
pArgs := putArgs([]byte("a"), []byte("value"))
_, respH, pErr := SendWrapped(tc.Sender(), context.Background(), roachpb.Header{}, &pArgs)
if pErr != nil {
t.Fatal(pErr)
}
if respH.Timestamp.WallTime != tc.clock.Timestamp().WallTime {
t.Errorf("expected write timestamp to upgrade to 1s; got %s", respH.Timestamp)
}
}
// TestReplicaNoTSCacheInconsistent verifies that the timestamp cache
// is not affected by inconsistent reads.
func TestReplicaNoTSCacheInconsistent(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Set clock to time 1s and do the read.
t0 := 1 * time.Second
tc.manualClock.Set(t0.Nanoseconds())
args := getArgs([]byte("a"))
ts := tc.clock.Now()
_, pErr := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts,
ReadConsistency: roachpb.INCONSISTENT,
}, &args)
if pErr != nil {
t.Error(pErr)
}
pArgs := putArgs([]byte("a"), []byte("value"))
_, respH, pErr := SendWrapped(tc.Sender(), context.Background(), roachpb.Header{Timestamp: hlc.ZeroTimestamp.Add(0, 1)}, &pArgs)
if pErr != nil {
t.Fatal(pErr)
}
if respH.Timestamp.WallTime == tc.clock.Timestamp().WallTime {
t.Errorf("expected write timestamp not to upgrade to 1s; got %s", respH.Timestamp)
}
}
// TestReplicaNoTSCacheUpdateOnFailure verifies that read and write
// commands do not update the timestamp cache if they result in
// failure.
func TestReplicaNoTSCacheUpdateOnFailure(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Test for both read & write attempts.
for i, read := range []bool{true, false} {
key := roachpb.Key(fmt.Sprintf("key-%d", i))
// Start by laying down an intent to trip up future read or write to same key.
pArgs := putArgs(key, []byte("value"))
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
_, pErr := tc.SendWrappedWith(roachpb.Header{
Txn: txn,
}, &pArgs)
if pErr != nil {
t.Fatalf("test %d: %s", i, pErr)
}
// Now attempt read or write.
args := readOrWriteArgs(key, read)
ts := tc.clock.Now() // later timestamp
if _, pErr := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts,
}, args); pErr == nil {
t.Errorf("test %d: expected failure", i)
}
// Write the intent again -- should not have its timestamp upgraded!
txn.Sequence++
if _, respH, pErr := SendWrapped(tc.Sender(), context.Background(), roachpb.Header{Txn: txn}, &pArgs); pErr != nil {
t.Fatalf("test %d: %s", i, pErr)
} else if !respH.Txn.Timestamp.Equal(txn.Timestamp) {
t.Errorf("expected timestamp not to advance %s != %s", respH.Timestamp, txn.Timestamp)
}
}
}
// TestReplicaNoTimestampIncrementWithinTxn verifies that successive
// read and write commands within the same transaction do not cause
// the write to receive an incremented timestamp.
func TestReplicaNoTimestampIncrementWithinTxn(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Test for both read & write attempts.
key := roachpb.Key("a")
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
// Start with a read to warm the timestamp cache.
gArgs := getArgs(key)
if _, pErr := tc.SendWrappedWith(roachpb.Header{
Txn: txn,
}, &gArgs); pErr != nil {
t.Fatal(pErr)
}
// Now try a write and verify timestamp isn't incremented.
pArgs := putArgs(key, []byte("value"))
txn.Sequence++
_, respH, pErr := SendWrapped(tc.Sender(), context.Background(), roachpb.Header{Txn: txn}, &pArgs)
if pErr != nil {
t.Fatal(pErr)
}
if !respH.Txn.Timestamp.Equal(txn.Timestamp) {
t.Errorf("expected timestamp to remain %s; got %s", txn.Timestamp, respH.Timestamp)
}
// Resolve the intent.
rArgs := &roachpb.ResolveIntentRequest{
Span: pArgs.Header(),
IntentTxn: txn.TxnMeta,
Status: roachpb.COMMITTED,
}
txn.Sequence++
if _, pErr = tc.SendWrappedWith(roachpb.Header{Timestamp: txn.Timestamp}, rArgs); pErr != nil {
t.Fatal(pErr)
}
// Finally, try a non-transactional write and verify timestamp is incremented.
ts := txn.Timestamp
expTS := ts
expTS.Logical++
_, respH, pErr = SendWrapped(tc.Sender(), context.Background(), roachpb.Header{Timestamp: ts}, &pArgs)
if pErr != nil {
t.Errorf("unexpected pError: %s", pErr)
}
if !respH.Timestamp.Equal(expTS) {
t.Errorf("expected timestamp to increment to %s; got %s", expTS, respH.Timestamp)
}
}
// TestReplicaAbortCacheReadError verifies that an error is returned
// to the client in the event that a abort cache entry is found but is
// not decodable.
func TestReplicaAbortCacheReadError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
k := []byte("a")
txn := newTransaction("test", k, 10, enginepb.SERIALIZABLE, tc.clock)
args := incrementArgs(k, 1)
txn.Sequence = 1
if _, pErr := tc.SendWrappedWith(roachpb.Header{
Txn: txn,
}, &args); pErr != nil {
t.Fatal(pErr)
}
// Overwrite Abort cache entry with garbage for the last op.
key := keys.AbortCacheKey(tc.rng.RangeID, txn.ID)
err := engine.MVCCPut(context.Background(), tc.engine, nil, key, hlc.ZeroTimestamp, roachpb.MakeValueFromString("never read in this test"), nil)
if err != nil {
t.Fatal(err)
}
// Now try increment again and verify error.
_, pErr := tc.SendWrappedWith(roachpb.Header{
Txn: txn,
}, &args)
if !testutils.IsPError(pErr, "replica corruption") {
t.Fatal(pErr)
}
}
// TestReplicaAbortCacheStoredTxnRetryError verifies that if a cached
// entry is present, a transaction restart error is returned.
func TestReplicaAbortCacheStoredTxnRetryError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := []byte("a")
{
txn := newTransaction("test", key, 10, enginepb.SERIALIZABLE, tc.clock)
txn.Sequence = int32(1)
entry := roachpb.AbortCacheEntry{
Key: txn.Key,
Timestamp: txn.Timestamp,
Priority: 0,
}
if err := tc.rng.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil {
t.Fatal(err)
}
args := incrementArgs(key, 1)
_, pErr := tc.SendWrappedWith(roachpb.Header{
Txn: txn,
}, &args)
if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); !ok {
t.Fatalf("unexpected error %v", pErr)
}
}
// Try the same again, this time verifying that the Put will actually
// populate the cache appropriately.
txn := newTransaction("test", key, 10, enginepb.SERIALIZABLE, tc.clock)
txn.Sequence = 321
args := incrementArgs(key, 1)
try := func() *roachpb.Error {
_, pErr := tc.SendWrappedWith(roachpb.Header{
Txn: txn,
}, &args)
return pErr
}
if pErr := try(); pErr != nil {
t.Fatal(pErr)
}
txn.Timestamp.Forward(txn.Timestamp.Add(10, 10)) // can't hurt
{
pErr := try()
if _, ok := pErr.GetDetail().(*roachpb.TransactionRetryError); !ok {
t.Fatal(pErr)
}
}
// Pretend we restarted by increasing the epoch. That's all that's needed.
txn.Epoch++
txn.Sequence++
if pErr := try(); pErr != nil {
t.Fatal(pErr)
}
// Now increase the sequence as well. Still good to go.
txn.Sequence++
if pErr := try(); pErr != nil {
t.Fatal(pErr)
}
}
// TestTransactionRetryLeavesIntents sets up a transaction retry event
// and verifies that the intents which were written as part of a
// single batch are left in place despite the failed end transaction.
func TestTransactionRetryLeavesIntents(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("a")
pushee := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee.Priority = 1
pusher.Priority = 2 // pusher will win
// Read from the key to increment the timestamp cache.
gArgs := getArgs(key)
if _, pErr := tc.SendWrapped(&gArgs); pErr != nil {
t.Fatal(pErr)
}
// Begin txn, write to key (with now-higher timestamp), and attempt to
// commit the txn, which should result in a retryable error.
btArgs, _ := beginTxnArgs(key, pushee)
pArgs := putArgs(key, []byte("foo"))
etArgs, _ := endTxnArgs(pushee, true /* commit */)
var ba roachpb.BatchRequest
ba.Header.Txn = pushee
ba.Add(&btArgs)
ba.Add(&pArgs)
ba.Add(&etArgs)
_, pErr := tc.Sender().Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.TransactionRetryError); !ok {
t.Fatalf("expected retry error; got %s", pErr)
}
// Now verify that the intent was still written for key.
_, pErr = tc.SendWrapped(&gArgs)
if _, ok := pErr.GetDetail().(*roachpb.WriteIntentError); !ok {
t.Fatalf("expected write intent error; got %s", pErr)
}
}
// TestReplicaAbortCacheOnlyWithIntent verifies that a transactional command
// which goes through Raft but is not a transactional write (i.e. does not
// leave intents) passes the abort cache unhindered.
func TestReplicaAbortCacheOnlyWithIntent(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
txn := newTransaction("test", []byte("test"), 10, enginepb.SERIALIZABLE, tc.clock)
txn.Sequence = 100
entry := roachpb.AbortCacheEntry{
Key: txn.Key,
Timestamp: txn.Timestamp,
Priority: 0,
}
if err := tc.rng.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil {
t.Fatal(err)
}
args, h := heartbeatArgs(txn)
// If the abort cache were active for this request, we'd catch a txn retry.
// Instead, we expect the error from heartbeating a nonexistent txn.
if _, pErr := tc.SendWrappedWith(h, &args); !testutils.IsPError(pErr, "record not present") {
t.Fatal(pErr)
}
}
// TestEndTransactionDeadline verifies that EndTransaction respects the
// transaction deadline.
func TestEndTransactionDeadline(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// 4 cases: no deadline, past deadline, equal deadline, future deadline.
for i := 0; i < 4; i++ {
key := roachpb.Key("key: " + strconv.Itoa(i))
txn := newTransaction("txn: "+strconv.Itoa(i), key, 1, enginepb.SERIALIZABLE, tc.clock)
put := putArgs(key, key)
_, header := beginTxnArgs(key, txn)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), header, &put); pErr != nil {
t.Fatal(pErr)
}
txn.Writing = true
etArgs, etHeader := endTxnArgs(txn, true /* commit */)
switch i {
case 0:
// No deadline.
case 1:
// Past deadline.
ts := txn.Timestamp.Prev()
etArgs.Deadline = &ts
case 2:
// Equal deadline.
etArgs.Deadline = &txn.Timestamp
case 3:
// Future deadline.
ts := txn.Timestamp.Next()
etArgs.Deadline = &ts
}
{
txn.Sequence++
_, pErr := tc.SendWrappedWith(etHeader, &etArgs)
switch i {
case 0:
// No deadline.
if pErr != nil {
t.Error(pErr)
}
case 1:
// Past deadline.
if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); !ok {
t.Errorf("expected TransactionAbortedError but got %T: %s", pErr, pErr)
}
case 2:
// Equal deadline.
if pErr != nil {
t.Error(pErr)
}
case 3:
// Future deadline.
if pErr != nil {
t.Error(pErr)
}
}
}
}
}
// TestEndTransactionDeadline_1PC verifies that a transaction that
// exceeded its deadline will be aborted even when one phase commit is
// applicable.
func TestEndTransactionDeadline_1PC(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("a")
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
bt, _ := beginTxnArgs(key, txn)
put := putArgs(key, []byte("value"))
et, etH := endTxnArgs(txn, true)
// Past deadline.
ts := txn.Timestamp.Prev()
et.Deadline = &ts
var ba roachpb.BatchRequest
ba.Header = etH
ba.Add(&bt, &put, &et)
_, pErr := tc.Sender().Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); !ok {
t.Errorf("expected TransactionAbortedError but got %T: %s", pErr, pErr)
}
}
// TestEndTransactionWithMalformedSplitTrigger verifies an
// EndTransaction call with a malformed commit trigger fails.
func TestEndTransactionWithMalformedSplitTrigger(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("foo")
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pArgs := putArgs(key, []byte("only here to make this a rw transaction"))
txn.Sequence++
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), roachpb.Header{
Txn: txn,
}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
txn.Writing = true
args, h := endTxnArgs(txn, true /* commit */)
// Make an EndTransaction request which would fail if not
// stripped. In this case, we set the start key to "bar" for a
// split of the default range; start key must be "" in this case.
args.InternalCommitTrigger = &roachpb.InternalCommitTrigger{
SplitTrigger: &roachpb.SplitTrigger{
LeftDesc: roachpb.RangeDescriptor{StartKey: roachpb.RKey("bar")},
},
}
txn.Sequence++
if _, pErr := tc.SendWrappedWith(h, &args); !testutils.IsPError(pErr, "range does not match splits") {
t.Errorf("expected range does not match splits error; got %s", pErr)
}
}
// TestEndTransactionBeforeHeartbeat verifies that a transaction
// can be committed/aborted before being heartbeat.
func TestEndTransactionBeforeHeartbeat(t *testing.T) {
defer leaktest.AfterTest(t)()
// Don't automatically GC the Txn record: We want to heartbeat the
// committed Transaction and compare it against our expectations.
// When it's removed, the heartbeat would recreate it.
defer setTxnAutoGC(false)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := []byte("a")
for _, commit := range []bool{true, false} {
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
_, btH := beginTxnArgs(key, txn)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
txn.Sequence++
txn.Writing = true
args, h := endTxnArgs(txn, commit)
resp, pErr := tc.SendWrappedWith(h, &args)
if pErr != nil {
t.Error(pErr)
}
reply := resp.(*roachpb.EndTransactionResponse)
expStatus := roachpb.COMMITTED
if !commit {
expStatus = roachpb.ABORTED
}
if reply.Txn.Status != expStatus {
t.Errorf("expected transaction status to be %s; got %s", expStatus, reply.Txn.Status)
}
// Try a heartbeat to the already-committed transaction; should get
// committed txn back, but without last heartbeat timestamp set.
txn.Epoch++ // need to fake a higher epoch to sneak past sequence cache
txn.Sequence++
hBA, h := heartbeatArgs(txn)
resp, pErr = tc.SendWrappedWith(h, &hBA)
if pErr != nil {
t.Error(pErr)
}
hBR := resp.(*roachpb.HeartbeatTxnResponse)
if hBR.Txn.Status != expStatus || hBR.Txn.LastHeartbeat != nil {
t.Errorf("unexpected heartbeat reply contents: %+v", hBR)
}
key = roachpb.Key(key).Next()
}
}
// TestEndTransactionAfterHeartbeat verifies that a transaction
// can be committed/aborted after being heartbeat.
func TestEndTransactionAfterHeartbeat(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("a")
for _, commit := range []bool{true, false} {
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
_, btH := beginTxnArgs(key, txn)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// Start out with a heartbeat to the transaction.
hBA, h := heartbeatArgs(txn)
txn.Sequence++
resp, pErr := tc.SendWrappedWith(h, &hBA)
if pErr != nil {
t.Fatal(pErr)
}
hBR := resp.(*roachpb.HeartbeatTxnResponse)
if hBR.Txn.Status != roachpb.PENDING || hBR.Txn.LastHeartbeat == nil {
t.Errorf("unexpected heartbeat reply contents: %+v", hBR)
}
args, h := endTxnArgs(txn, commit)
txn.Sequence++
resp, pErr = tc.SendWrappedWith(h, &args)
if pErr != nil {
t.Error(pErr)
}
reply := resp.(*roachpb.EndTransactionResponse)
expStatus := roachpb.COMMITTED
if !commit {
expStatus = roachpb.ABORTED
}
if reply.Txn.Status != expStatus {
t.Errorf("expected transaction status to be %s; got %s", expStatus, reply.Txn.Status)
}
if reply.Txn.LastHeartbeat == nil || !reply.Txn.LastHeartbeat.Equal(*hBR.Txn.LastHeartbeat) {
t.Errorf("expected heartbeats to remain equal: %+v != %+v",
reply.Txn.LastHeartbeat, hBR.Txn.LastHeartbeat)
}
key = key.Next()
}
}
// TestEndTransactionWithPushedTimestamp verifies that txn can be
// ended (both commit or abort) correctly when the commit timestamp is
// greater than the transaction timestamp, depending on the isolation
// level.
func TestEndTransactionWithPushedTimestamp(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
testCases := []struct {
commit bool
isolation enginepb.IsolationType
expErr bool
}{
{true, enginepb.SERIALIZABLE, true},
{true, enginepb.SNAPSHOT, false},
{false, enginepb.SERIALIZABLE, false},
{false, enginepb.SNAPSHOT, false},
}
key := roachpb.Key("a")
for i, test := range testCases {
pushee := newTransaction("pushee", key, 1, test.isolation, tc.clock)
pusher := newTransaction("pusher", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee.Priority = 1
pusher.Priority = 2 // pusher will win
_, btH := beginTxnArgs(key, pushee)
put := putArgs(key, []byte("value"))
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// Push pushee txn.
pushTxn := pushTxnArgs(pusher, pushee, roachpb.PUSH_TIMESTAMP)
pushTxn.Key = pusher.Key
if _, pErr := tc.SendWrapped(&pushTxn); pErr != nil {
t.Error(pErr)
}
// End the transaction with args timestamp moved forward in time.
endTxn, h := endTxnArgs(pushee, test.commit)
pushee.Sequence++
resp, pErr := tc.SendWrappedWith(h, &endTxn)
if test.expErr {
if _, ok := pErr.GetDetail().(*roachpb.TransactionRetryError); !ok {
t.Errorf("%d: expected retry error; got %s", i, pErr)
}
} else {
if pErr != nil {
t.Errorf("%d: unexpected error: %s", i, pErr)
}
expStatus := roachpb.COMMITTED
if !test.commit {
expStatus = roachpb.ABORTED
}
reply := resp.(*roachpb.EndTransactionResponse)
if reply.Txn.Status != expStatus {
t.Errorf("%d: expected transaction status to be %s; got %s", i, expStatus, reply.Txn.Status)
}
}
key = key.Next()
}
}
// TestEndTransactionWithIncrementedEpoch verifies that txn ended with
// a higher epoch (and priority) correctly assumes the higher epoch.
func TestEndTransactionWithIncrementedEpoch(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := []byte("a")
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
_, btH := beginTxnArgs(key, txn)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
txn.Writing = true
// Start out with a heartbeat to the transaction.
hBA, h := heartbeatArgs(txn)
txn.Sequence++
_, pErr := tc.SendWrappedWith(h, &hBA)
if pErr != nil {
t.Error(pErr)
}
// Now end the txn with increased epoch and priority.
args, h := endTxnArgs(txn, true)
h.Txn.Epoch = txn.Epoch + 1
h.Txn.Priority = txn.Priority + 1
txn.Sequence++
resp, pErr := tc.SendWrappedWith(h, &args)
if pErr != nil {
t.Error(pErr)
}
reply := resp.(*roachpb.EndTransactionResponse)
if reply.Txn.Status != roachpb.COMMITTED {
t.Errorf("expected transaction status to be COMMITTED; got %s", reply.Txn.Status)
}
if reply.Txn.Epoch != txn.Epoch {
t.Errorf("expected epoch to equal %d; got %d", txn.Epoch, reply.Txn.Epoch)
}
if reply.Txn.Priority != txn.Priority {
t.Errorf("expected priority to equal %d; got %d", txn.Priority, reply.Txn.Priority)
}
}
// TestEndTransactionWithErrors verifies various error conditions
// are checked such as transaction already being committed or
// aborted, or timestamp or epoch regression.
func TestEndTransactionWithErrors(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
regressTS := tc.clock.Now()
tc.manualClock.Set(1)
txn := newTransaction("test", roachpb.Key(""), 1, enginepb.SERIALIZABLE, tc.clock)
doesNotExist := roachpb.TransactionStatus(-1)
testCases := []struct {
key roachpb.Key
existStatus roachpb.TransactionStatus
existEpoch uint32
existTS hlc.Timestamp
expErrRegexp string
}{
{roachpb.Key("a"), doesNotExist, txn.Epoch, txn.Timestamp, "does not exist"},
{roachpb.Key("a"), roachpb.COMMITTED, txn.Epoch, txn.Timestamp, "txn \"test\" id=.*: already committed"},
{roachpb.Key("b"), roachpb.ABORTED, txn.Epoch, txn.Timestamp, "txn aborted \"test\" id=.*"},
{roachpb.Key("c"), roachpb.PENDING, txn.Epoch + 1, txn.Timestamp, "txn \"test\" id=.*: epoch regression: 0"},
{roachpb.Key("d"), roachpb.PENDING, txn.Epoch, regressTS, `txn "test" id=.*: timestamp regression: 0.000000001,\d+`},
}
for i, test := range testCases {
// Establish existing txn state by writing directly to range engine.
existTxn := txn.Clone()
existTxn.Key = test.key
existTxn.Status = test.existStatus
existTxn.Epoch = test.existEpoch
existTxn.Timestamp = test.existTS
txnKey := keys.TransactionKey(test.key, txn.ID)
if test.existStatus != doesNotExist {
if err := engine.MVCCPutProto(context.Background(), tc.rng.store.Engine(), nil, txnKey, hlc.ZeroTimestamp,
nil, &existTxn); err != nil {
t.Fatal(err)
}
}
// End the transaction, verify expected error.
txn.Key = test.key
args, h := endTxnArgs(txn, true)
txn.Sequence++
if _, pErr := tc.SendWrappedWith(h, &args); !testutils.IsPError(pErr, test.expErrRegexp) {
t.Errorf("%d: expected error:\n%s\not match:\n%s", i, pErr, test.expErrRegexp)
} else if txn := pErr.GetTxn(); txn != nil && txn.ID == nil {
// Prevent regression of #5591.
t.Fatalf("%d: received empty Transaction proto in error", i)
}
}
}
// TestEndTransactionRollbackAbortedTransaction verifies that no error
// is returned when a transaction that has already been aborted is
// rolled back by an EndTransactionRequest.
func TestEndTransactionRollbackAbortedTransaction(t *testing.T) {
defer leaktest.AfterTest(t)()
defer setTxnAutoGC(false)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := []byte("a")
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
_, btH := beginTxnArgs(key, txn)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// Abort the transaction by pushing it with a higher priority.
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pusher.Priority = txn.Priority + 1 // will push successfully
pushArgs := pushTxnArgs(pusher, btH.Txn, roachpb.PUSH_ABORT)
if _, pErr := tc.SendWrapped(&pushArgs); pErr != nil {
t.Fatal(pErr)
}
// Check if the intent has not yet been resolved.
var ba roachpb.BatchRequest
gArgs := getArgs(key)
ba.Add(&gArgs)
if err := ba.SetActiveTimestamp(tc.clock.Now); err != nil {
t.Fatal(err)
}
_, pErr := tc.Sender().Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.WriteIntentError); !ok {
t.Errorf("expected write intent error, but got %s", pErr)
}
// Abort the transaction again. No error is returned.
args, h := endTxnArgs(txn, false)
args.IntentSpans = []roachpb.Span{{Key: key}}
resp, pErr := tc.SendWrappedWith(h, &args)
if pErr != nil {
t.Error(pErr)
}
reply := resp.(*roachpb.EndTransactionResponse)
if reply.Txn.Status != roachpb.ABORTED {
t.Errorf("expected transaction status to be ABORTED; got %s", reply.Txn.Status)
}
// Verify that the intent has been resolved.
if _, pErr := tc.Sender().Send(context.Background(), ba); pErr != nil {
t.Errorf("expected resolved intent, but got %s", pErr)
}
}
// TestRaftReplayProtection verifies that non-transactional batches
// enjoy some protection from raft replays, but highlights an example
// where they won't.
func TestRaftReplayProtection(t *testing.T) {
defer leaktest.AfterTest(t)()
defer setTxnAutoGC(true)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("a")
incs := []int64{1, 3, 7}
sum := 2 * incs[0]
for _, n := range incs[1:] {
sum += n
}
{
// Start with an increment for key.
incArgs := incrementArgs(key, incs[0])
_, respH, pErr := SendWrapped(tc.Sender(), context.Background(), roachpb.Header{}, &incArgs)
if pErr != nil {
t.Fatal(pErr)
}
// Do an increment with timestamp to an earlier timestamp, but same key.
// This will bump up to a higher timestamp than the original increment
// and not surface a WriteTooOldError.
h := roachpb.Header{Timestamp: respH.Timestamp.Prev()}
_, respH, pErr = SendWrapped(tc.Sender(), context.Background(), h, &incArgs)
if pErr != nil {
t.Fatalf("unexpected error: %s", respH)
}
if expTS := h.Timestamp.Next().Next(); !respH.Timestamp.Equal(expTS) {
t.Fatalf("expected too-old increment to advance two logical ticks to %s; got %s", expTS, respH.Timestamp)
}
// Do an increment with exact timestamp; should propagate write too
// old error. This is assumed to be a replay because the timestamp
// encountered is an exact duplicate and nothing came before the
// increment in the batch.
h.Timestamp = respH.Timestamp
_, _, pErr = SendWrapped(tc.Sender(), context.Background(), h, &incArgs)
if _, ok := pErr.GetDetail().(*roachpb.WriteTooOldError); !ok {
t.Fatalf("expected WriteTooOldError; got %s", pErr)
}
}
// Send a double increment in a batch. This should increment twice,
// as the same key is being incremented in the same batch.
var ba roachpb.BatchRequest
for _, inc := range incs[1:] {
incArgs := incrementArgs(key, inc)
ba.Add(&incArgs)
}
br, pErr := tc.Sender().Send(context.Background(), ba)
if pErr != nil {
t.Fatalf("unexpected error: %s", pErr)
}
if latest := br.Responses[len(br.Responses)-1].GetInner().(*roachpb.IncrementResponse).NewValue; latest != sum {
t.Fatalf("expected %d, got %d", sum, latest)
}
// Now resend the batch with the same timestamp; this should look
// like the replay it is and surface a WriteTooOldError.
ba.Timestamp = br.Timestamp
_, pErr = tc.Sender().Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.WriteTooOldError); !ok {
t.Fatalf("expected WriteTooOldError; got %s", pErr)
}
// Send a DeleteRange & increment.
incArgs := incrementArgs(key, 1)
ba = roachpb.BatchRequest{}
ba.Add(roachpb.NewDeleteRange(key, key.Next(), false))
ba.Add(&incArgs)
br, pErr = tc.Sender().Send(context.Background(), ba)
if pErr != nil {
t.Fatalf("unexpected error: %s", pErr)
}
// Send exact same batch; the DeleteRange should trip up and
// we'll get a replay error.
ba.Timestamp = br.Timestamp
_, pErr = tc.Sender().Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.WriteTooOldError); !ok {
t.Fatalf("expected WriteTooOldError; got %s", pErr)
}
// Send just a DeleteRange batch.
ba = roachpb.BatchRequest{}
ba.Add(roachpb.NewDeleteRange(key, key.Next(), false))
br, pErr = tc.Sender().Send(context.Background(), ba)
if pErr != nil {
t.Fatalf("unexpected error: %s", pErr)
}
// Now send it again; will not look like a replay because the
// previous DeleteRange didn't leave any tombstones at this
// timestamp for the replay to "trip" over.
ba.Timestamp = br.Timestamp
_, pErr = tc.Sender().Send(context.Background(), ba)
if pErr != nil {
t.Fatalf("unexpected error: %s", pErr)
}
}
// TestRaftReplayProtectionInTxn verifies that transactional batches
// enjoy protection from raft replays.
func TestRaftReplayProtectionInTxn(t *testing.T) {
defer leaktest.AfterTest(t)()
defer setTxnAutoGC(true)()
ctx := TestStoreContext()
tc := testContext{}
tc.StartWithStoreContext(t, ctx)
defer tc.Stop()
key := roachpb.Key("a")
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
// Send a batch with begin txn, put & end txn.
var ba roachpb.BatchRequest
bt, btH := beginTxnArgs(key, txn)
put := putArgs(key, []byte("value"))
et, _ := endTxnArgs(txn, true)
et.IntentSpans = []roachpb.Span{{Key: key, EndKey: nil}}
ba.Header = btH
ba.Add(&bt)
ba.Add(&put)
ba.Add(&et)
_, pErr := tc.Sender().Send(context.Background(), ba)
if pErr != nil {
t.Fatalf("unexpected error: %s", pErr)
}
for i := 0; i < 2; i++ {
// Reach in and manually send to raft (to simulate Raft replay) and
// also avoid updating the timestamp cache; verify WriteTooOldError.
ba.Timestamp = txn.OrigTimestamp
ch, _, err := tc.rng.proposeRaftCommand(context.Background(), ba)
if err != nil {
t.Fatalf("%d: unexpected error: %s", i, err)
}
respWithErr := <-ch
if _, ok := respWithErr.Err.GetDetail().(*roachpb.WriteTooOldError); !ok {
t.Fatalf("%d: expected WriteTooOldError; got %s", i, respWithErr.Err)
}
}
}
// TestReplicaLaziness verifies that Raft Groups are brought up lazily.
func TestReplicaLaziness(t *testing.T) {
defer leaktest.AfterTest(t)()
// testWithAction is a function that creates an uninitialized Raft group,
// calls the supplied function, and then tests that the Raft group is
// initialized.
testWithAction := func(action func() roachpb.Request) {
tc := testContext{bootstrapMode: bootstrapRangeOnly}
tc.Start(t)
defer tc.Stop()
if status := tc.rng.RaftStatus(); status != nil {
t.Fatalf("expected raft group to not be initialized, got RaftStatus() of %v", status)
}
var ba roachpb.BatchRequest
request := action()
ba.Add(request)
if _, pErr := tc.Sender().Send(context.Background(), ba); pErr != nil {
t.Fatalf("unexpected error: %s", pErr)
}
if tc.rng.RaftStatus() == nil {
t.Fatalf("expected raft group to be initialized")
}
}
testWithAction(func() roachpb.Request {
put := putArgs(roachpb.Key("a"), []byte("value"))
return &put
})
testWithAction(func() roachpb.Request {
get := getArgs(roachpb.Key("a"))
return &get
})
testWithAction(func() roachpb.Request {
scan := scanArgs(roachpb.KeyMin, roachpb.KeyMax)
return &scan
})
}
// TestReplayProtection verifies that transactional replays cannot
// commit intents. The replay consists of an initial BeginTxn/Write
// batch and ends with an EndTxn batch.
func TestReplayProtection(t *testing.T) {
defer leaktest.AfterTest(t)()
defer setTxnAutoGC(true)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
for i, iso := range []enginepb.IsolationType{enginepb.SERIALIZABLE, enginepb.SNAPSHOT} {
key := roachpb.Key(fmt.Sprintf("a-%d", i))
keyB := roachpb.Key(fmt.Sprintf("b-%d", i))
txn := newTransaction("test", key, 1, iso, tc.clock)
// Send a batch with put to key.
var ba roachpb.BatchRequest
bt, btH := beginTxnArgs(key, txn)
put := putArgs(key, []byte("value"))
ba.Header = btH
ba.Add(&bt)
ba.Add(&put)
if err := ba.SetActiveTimestamp(tc.clock.Now); err != nil {
t.Fatal(err)
}
br, pErr := tc.Sender().Send(context.Background(), ba)
if pErr != nil {
t.Fatalf("%d: unexpected error: %s", i, pErr)
}
// Send a put for keyB.
putB := putArgs(keyB, []byte("value"))
putTxn := br.Txn.Clone()
putTxn.Sequence++
_, respH, pErr := SendWrapped(tc.Sender(), context.Background(), roachpb.Header{Txn: &putTxn}, &putB)
if pErr != nil {
t.Fatal(pErr)
}
// EndTransaction.
etTxn := respH.Txn.Clone()
etTxn.Sequence++
et, etH := endTxnArgs(&etTxn, true)
et.IntentSpans = []roachpb.Span{{Key: key, EndKey: nil}, {Key: keyB, EndKey: nil}}
if _, pErr := tc.SendWrappedWith(etH, &et); pErr != nil {
t.Fatalf("%d: unexpected error: %s", i, pErr)
}
// Verify txn record is cleaned.
var readTxn roachpb.Transaction
txnKey := keys.TransactionKey(txn.Key, txn.ID)
ok, err := engine.MVCCGetProto(context.Background(), tc.rng.store.Engine(), txnKey, hlc.ZeroTimestamp, true /* consistent */, nil /* txn */, &readTxn)
if err != nil || ok {
t.Errorf("%d: expected transaction record to be cleared (%t): %s", i, ok, err)
}
// Now replay begin & put. BeginTransaction should fail with a replay error.
_, pErr = tc.Sender().Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.TransactionReplayError); !ok {
t.Errorf("%d: expected transaction replay for iso=%s; got %s", i, iso, pErr)
}
// Intent should not have been created.
gArgs := getArgs(key)
if _, pErr = tc.SendWrapped(&gArgs); pErr != nil {
t.Errorf("%d: unexpected error reading key: %s", i, pErr)
}
// Send a put for keyB; should fail with a WriteTooOldError as this
// will look like an obvious replay.
_, _, pErr = SendWrapped(tc.Sender(), context.Background(), roachpb.Header{Txn: &putTxn}, &putB)
if _, ok := pErr.GetDetail().(*roachpb.WriteTooOldError); !ok {
t.Errorf("%d: expected write too old error for iso=%s; got %s", i, iso, pErr)
}
// EndTransaction should also fail, but with a status error (does not exist).
_, pErr = tc.SendWrappedWith(etH, &et)
if _, ok := pErr.GetDetail().(*roachpb.TransactionStatusError); !ok {
t.Errorf("%d: expected transaction aborted for iso=%s; got %s", i, iso, pErr)
}
// Expect that keyB intent did not get written!
gArgs = getArgs(keyB)
if _, pErr = tc.SendWrapped(&gArgs); pErr != nil {
t.Errorf("%d: unexpected error reading keyB: %s", i, pErr)
}
}
}
// TestEndTransactionGC verifies that a transaction record is immediately
// garbage-collected upon EndTransaction iff all of the supplied intents are
// local relative to the transaction record's location.
func TestEndTransactionLocalGC(t *testing.T) {
defer leaktest.AfterTest(t)()
defer setTxnAutoGC(true)()
tc := testContext{}
tsc := TestStoreContext()
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
// Make sure the direct GC path doesn't interfere with this test.
if filterArgs.Req.Method() == roachpb.GC {
return roachpb.NewErrorWithTxn(errors.Errorf("boom"), filterArgs.Hdr.Txn)
}
return nil
}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
splitKey := roachpb.RKey("c")
splitTestRange(tc.store, splitKey, splitKey, t)
key := roachpb.Key("a")
putKey := key
for i, test := range []struct {
intents []roachpb.Span
expGC bool
}{
// Range inside.
{[]roachpb.Span{{Key: roachpb.Key("a"), EndKey: roachpb.Key("b")}}, true},
// Two intents inside.
{[]roachpb.Span{{Key: roachpb.Key("a")}, {Key: roachpb.Key("b")}}, true},
// Intent range spilling over right endpoint.
{[]roachpb.Span{{Key: roachpb.Key("a"), EndKey: splitKey.Next().AsRawKey()}}, false},
// Intent range completely outside.
{[]roachpb.Span{{Key: splitKey.AsRawKey(), EndKey: roachpb.Key("q")}}, false},
// Intent inside and outside.
{[]roachpb.Span{{Key: roachpb.Key("a")}, {Key: splitKey.AsRawKey()}}, false},
} {
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
_, btH := beginTxnArgs(key, txn)
put := putArgs(putKey, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
putKey = putKey.Next() // for the next iteration
args, h := endTxnArgs(txn, true)
args.IntentSpans = test.intents
txn.Sequence++
if _, pErr := tc.SendWrappedWith(h, &args); pErr != nil {
t.Fatal(pErr)
}
var readTxn roachpb.Transaction
txnKey := keys.TransactionKey(txn.Key, txn.ID)
ok, err := engine.MVCCGetProto(context.Background(), tc.rng.store.Engine(), txnKey, hlc.ZeroTimestamp,
true /* consistent */, nil /* txn */, &readTxn)
if err != nil {
t.Fatal(err)
}
if !ok != test.expGC {
t.Errorf("%d: unexpected gc'ed: %t", i, !ok)
}
}
}
func setupResolutionTest(t *testing.T, tc testContext, key roachpb.Key,
splitKey roachpb.RKey, commit bool) (*Replica, *roachpb.Transaction) {
// Split the range and create an intent at splitKey and key.
newRng := splitTestRange(tc.store, splitKey, splitKey, t)
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
// These increments are not required, but testing feels safer when zero
// values are unexpected.
txn.Sequence++
txn.Epoch++
pArgs := putArgs(key, []byte("value"))
h := roachpb.Header{Txn: txn}
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), h, &pArgs); pErr != nil {
t.Fatal(pErr)
}
{
var ba roachpb.BatchRequest
ba.Header = h
if err := ba.SetActiveTimestamp(newRng.store.Clock().Now); err != nil {
t.Fatal(err)
}
pArgs := putArgs(splitKey.AsRawKey(), []byte("value"))
ba.Add(&pArgs)
txn.Sequence++
if _, pErr := newRng.Send(context.Background(), ba); pErr != nil {
t.Fatal(pErr)
}
}
// End the transaction and resolve the intents.
args, h := endTxnArgs(txn, commit)
args.IntentSpans = []roachpb.Span{{Key: key, EndKey: splitKey.Next().AsRawKey()}}
txn.Sequence++
if _, pErr := tc.SendWrappedWith(h, &args); pErr != nil {
t.Fatal(pErr)
}
return newRng, txn
}
// TestEndTransactionResolveOnlyLocalIntents verifies that an end transaction
// request resolves only local intents within the same batch.
func TestEndTransactionResolveOnlyLocalIntents(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tsc := TestStoreContext()
key := roachpb.Key("a")
splitKey := roachpb.RKey(key).Next()
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if filterArgs.Req.Method() == roachpb.ResolveIntentRange &&
filterArgs.Req.Header().Key.Equal(splitKey.AsRawKey()) {
return roachpb.NewErrorWithTxn(errors.Errorf("boom"), filterArgs.Hdr.Txn)
}
return nil
}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
newRng, txn := setupResolutionTest(t, tc, key, splitKey, true /* commit */)
// Check if the intent in the other range has not yet been resolved.
{
var ba roachpb.BatchRequest
gArgs := getArgs(splitKey)
ba.Add(&gArgs)
if err := ba.SetActiveTimestamp(tc.clock.Now); err != nil {
t.Fatal(err)
}
_, pErr := newRng.Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.WriteIntentError); !ok {
t.Errorf("expected write intent error, but got %s", pErr)
}
}
txn.Sequence++
hbArgs, h := heartbeatArgs(txn)
reply, pErr := tc.SendWrappedWith(h, &hbArgs)
if pErr != nil {
t.Fatal(pErr)
}
hbResp := reply.(*roachpb.HeartbeatTxnResponse)
expIntents := []roachpb.Span{{Key: splitKey.AsRawKey(), EndKey: splitKey.AsRawKey().Next()}}
if !reflect.DeepEqual(hbResp.Txn.Intents, expIntents) {
t.Fatalf("expected persisted intents %v, got %v",
expIntents, hbResp.Txn.Intents)
}
}
// TestEndTransactionDirectGC verifies that after successfully resolving the
// external intents of a transaction after EndTransaction, the transaction and
// abort cache records are purged on both the local range and non-local range.
func TestEndTransactionDirectGC(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
key := roachpb.Key("a")
splitKey := roachpb.RKey(key).Next()
tc.Start(t)
defer tc.Stop()
rightRng, txn := setupResolutionTest(t, tc, key, splitKey, false /* generate abort cache entry */)
util.SucceedsSoon(t, func() error {
if gr, _, err := tc.rng.Get(context.Background(), tc.engine, roachpb.Header{}, roachpb.GetRequest{Span: roachpb.Span{Key: keys.TransactionKey(txn.Key, txn.ID)}}); err != nil {
return err
} else if gr.Value != nil {
return errors.Errorf("txn entry still there: %+v", gr)
}
var entry roachpb.AbortCacheEntry
if aborted, err := tc.rng.abortCache.Get(context.Background(), tc.engine, txn.ID, &entry); err != nil {
t.Fatal(err)
} else if aborted {
return errors.Errorf("abort cache still populated: %v", entry)
}
if aborted, err := rightRng.abortCache.Get(context.Background(), tc.engine, txn.ID, &entry); err != nil {
t.Fatal(err)
} else if aborted {
t.Fatalf("right-hand side abort cache still populated: %v", entry)
}
return nil
})
}
// TestEndTransactionDirectGCFailure verifies that no immediate GC takes place
// if external intents can't be resolved (see also TestEndTransactionDirectGC).
func TestEndTransactionDirectGCFailure(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
key := roachpb.Key("a")
splitKey := roachpb.RKey(key).Next()
var count int64
tsc := TestStoreContext()
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if filterArgs.Req.Method() == roachpb.ResolveIntentRange &&
filterArgs.Req.Header().Key.Equal(splitKey.AsRawKey()) {
atomic.AddInt64(&count, 1)
return roachpb.NewErrorWithTxn(errors.Errorf("boom"), filterArgs.Hdr.Txn)
} else if filterArgs.Req.Method() == roachpb.GC {
t.Fatalf("unexpected GCRequest: %+v", filterArgs.Req)
}
return nil
}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
setupResolutionTest(t, tc, key, splitKey, true /* commit */)
// Now test that no GCRequest is issued. We can't test that directly (since
// it's completely asynchronous), so we first make sure ResolveIntent
// happened and subsequently issue a bogus Put which is likely to make it
// into Raft only after a rogue GCRequest (at least sporadically), which
// would trigger a Fatal from the command filter.
util.SucceedsSoon(t, func() error {
if atomic.LoadInt64(&count) == 0 {
return errors.Errorf("intent resolution not attempted yet")
} else if err := tc.store.DB().Put("panama", "banana"); err != nil {
return err
}
return nil
})
}
// TestEndTransactionDirectGC_1PC runs a test similar to TestEndTransactionDirectGC
// for the case of a transaction which is contained in a single batch.
func TestEndTransactionDirectGC_1PC(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, commit := range []bool{true, false} {
func() {
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("a")
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
bt, _ := beginTxnArgs(key, txn)
put := putArgs(key, []byte("value"))
et, etH := endTxnArgs(txn, commit)
et.IntentSpans = []roachpb.Span{{Key: key}}
var ba roachpb.BatchRequest
ba.Header = etH
ba.Add(&bt, &put, &et)
br, err := tc.Sender().Send(context.Background(), ba)
if err != nil {
t.Fatalf("commit=%t: %s", commit, err)
}
etArgs, ok := br.Responses[len(br.Responses)-1].GetInner().(*roachpb.EndTransactionResponse)
if !ok || !etArgs.OnePhaseCommit {
t.Errorf("commit=%t: expected one phase commit", commit)
}
var entry roachpb.AbortCacheEntry
if aborted, err := tc.rng.abortCache.Get(context.Background(), tc.engine, txn.ID, &entry); err != nil {
t.Fatal(err)
} else if aborted {
t.Fatalf("commit=%t: abort cache still populated: %v", commit, entry)
}
}()
}
}
func TestReplicaResolveIntentNoWait(t *testing.T) {
defer leaktest.AfterTest(t)()
var seen int32
key := roachpb.Key("zresolveme")
tsc := TestStoreContext()
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if filterArgs.Req.Method() == roachpb.ResolveIntent &&
filterArgs.Req.Header().Key.Equal(key) {
atomic.StoreInt32(&seen, 1)
}
return nil
}
tc := testContext{}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
splitKey := roachpb.RKey("aa")
setupResolutionTest(t, tc, roachpb.Key("a") /* irrelevant */, splitKey, true /* commit */)
txn := newTransaction("name", key, 1, enginepb.SERIALIZABLE, tc.clock)
txn.Status = roachpb.COMMITTED
if pErr := tc.store.intentResolver.resolveIntents(context.Background(),
[]roachpb.Intent{{
Span: roachpb.Span{Key: key},
Txn: txn.TxnMeta,
Status: txn.Status,
}}, false /* !wait */, false /* !poison; irrelevant */); pErr != nil {
t.Fatal(pErr)
}
util.SucceedsSoon(t, func() error {
if atomic.LoadInt32(&seen) > 0 {
return nil
}
return fmt.Errorf("no intent resolution on %q so far", key)
})
}
// TestAbortCachePoisonOnResolve verifies that when an intent is
// aborted, the abort cache on the respective Range is poisoned and
// the pushee is presented with a txn abort on its next contact with
// the Range in the same epoch.
func TestSequenceCachePoisonOnResolve(t *testing.T) {
defer leaktest.AfterTest(t)()
key := roachpb.Key("a")
// Isolation of the pushee and whether we're going to abort it.
// Run the actual meat of the test, which pushes the pushee and
// checks whether we get the correct behaviour as it touches the
// Range again.
run := func(abort bool, iso enginepb.IsolationType) {
tc := testContext{}
tc.Start(t)
defer tc.Stop()
pushee := newTransaction("test", key, 1, iso, tc.clock)
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pusher.Priority = 2
pushee.Priority = 1 // pusher will win
inc := func(actor *roachpb.Transaction, k roachpb.Key) (*roachpb.IncrementResponse, *roachpb.Error) {
reply, pErr := maybeWrapWithBeginTransaction(tc.store, nil, roachpb.Header{
Txn: actor,
RangeID: 1,
}, &roachpb.IncrementRequest{Span: roachpb.Span{Key: k}, Increment: 123})
if pErr != nil {
return nil, pErr
}
actor.Writing = true
actor.Sequence++
return reply.(*roachpb.IncrementResponse), nil
}
get := func(actor *roachpb.Transaction, k roachpb.Key) *roachpb.Error {
actor.Sequence++
_, pErr := client.SendWrappedWith(tc.store, nil, roachpb.Header{
Txn: actor,
RangeID: 1,
}, &roachpb.GetRequest{Span: roachpb.Span{Key: k}})
return pErr
}
// Write an intent (this also begins the pushee's transaction).
if _, pErr := inc(pushee, key); pErr != nil {
t.Fatal(pErr)
}
// Have the pusher run into the intent. That pushes our pushee and
// resolves the intent, which in turn should poison the abort cache.
var assert func(*roachpb.Error)
if abort {
// Write/Write conflict will abort pushee.
if _, pErr := inc(pusher, key); pErr != nil {
t.Fatal(pErr)
}
assert = func(pErr *roachpb.Error) {
if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); !ok {
t.Fatalf("abort=%t, iso=%s: expected txn abort, got %s", abort, iso, pErr)
}
}
} else {
// Verify we're not poisoned.
assert = func(pErr *roachpb.Error) {
if pErr != nil {
t.Fatalf("abort=%t, iso=%s: unexpected: %s", abort, iso, pErr)
}
}
}
// Our assert should be true for any reads or writes.
pErr := get(pushee, key)
assert(pErr)
_, pErr = inc(pushee, key)
assert(pErr)
// Still poisoned (on any key on the Range).
pErr = get(pushee, key.Next())
assert(pErr)
_, pErr = inc(pushee, key.Next())
assert(pErr)
// Pretend we're coming back. Increasing the epoch on an abort should
// still fail obviously, while on no abort will succeed.
pushee.Epoch++
_, pErr = inc(pushee, roachpb.Key("b"))
assert(pErr)
}
for _, abort := range []bool{false, true} {
run(abort, enginepb.SERIALIZABLE)
run(abort, enginepb.SNAPSHOT)
}
}
// TestAbortCacheError verifies that roachpb.Errors returned by checkIfTxnAborted
// have txns that are identical to txns stored in Transaction{Retry,Aborted}Error.
func TestAbortCacheError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
txn := roachpb.Transaction{}
txn.ID = uuid.NewV4()
txn.Priority = 1
txn.Sequence = 1
txn.Timestamp = hlc.Timestamp{WallTime: 1}
key := roachpb.Key("k")
ts := txn.Timestamp.Next()
priority := int32(10)
entry := roachpb.AbortCacheEntry{
Key: key,
Timestamp: ts,
Priority: priority,
}
if err := tc.rng.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil {
t.Fatal(err)
}
pErr := tc.rng.checkIfTxnAborted(context.Background(), tc.engine, txn)
if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); ok {
expected := txn.Clone()
expected.Timestamp = txn.Timestamp
expected.Priority = priority
if pErr.GetTxn() == nil || !reflect.DeepEqual(pErr.GetTxn(), &expected) {
t.Errorf("txn does not match: %s vs. %s", pErr.GetTxn(), expected)
}
} else {
t.Errorf("unexpected error: %s", pErr)
}
}
// TestPushTxnBadKey verifies that args.Key equals args.PusheeTxn.ID.
func TestPushTxnBadKey(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
pusher := newTransaction("test", roachpb.Key("a"), 1, enginepb.SERIALIZABLE, tc.clock)
pushee := newTransaction("test", roachpb.Key("b"), 1, enginepb.SERIALIZABLE, tc.clock)
args := pushTxnArgs(pusher, pushee, roachpb.PUSH_ABORT)
args.Key = pusher.Key
if _, pErr := tc.SendWrapped(&args); !testutils.IsPError(pErr, ".*should match pushee.*") {
t.Errorf("unexpected error %s", pErr)
}
}
// TestPushTxnAlreadyCommittedOrAborted verifies success
// (noop) in event that pushee is already committed or aborted.
func TestPushTxnAlreadyCommittedOrAborted(t *testing.T) {
defer leaktest.AfterTest(t)()
// This test simulates running into an open intent and resolving it using
// the transaction record. If we auto-gc'ed entries here, the entry would
// be deleted and the intents resolved instantaneously on successful commit
// (since they're on the same Range). Could split the range and have
// non-local intents if we ever wanted to get rid of this.
defer setTxnAutoGC(false)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
for i, status := range []roachpb.TransactionStatus{roachpb.COMMITTED, roachpb.ABORTED} {
key := roachpb.Key(fmt.Sprintf("key-%d", i))
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pusher.Priority = 1
pushee.Priority = 2 // pusher will lose, meaning we shouldn't push unless pushee is already ended.
// Begin the pushee's transaction.
_, btH := beginTxnArgs(key, pushee)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// End the pushee's transaction.
etArgs, h := endTxnArgs(pushee, status == roachpb.COMMITTED)
pushee.Sequence++
if _, pErr := tc.SendWrappedWith(h, &etArgs); pErr != nil {
t.Fatal(pErr)
}
// Now try to push what's already committed or aborted.
args := pushTxnArgs(pusher, pushee, roachpb.PUSH_ABORT)
resp, pErr := tc.SendWrapped(&args)
if pErr != nil {
t.Fatal(pErr)
}
reply := resp.(*roachpb.PushTxnResponse)
if reply.PusheeTxn.Status != status {
t.Errorf("expected push txn to return with status == %s; got %+v", status, reply.PusheeTxn)
}
}
}
// TestPushTxnUpgradeExistingTxn verifies that pushing
// a transaction record with a new epoch upgrades the pushee's
// epoch and timestamp if greater. In all test cases, the
// priorities are set such that the push will succeed.
func TestPushTxnUpgradeExistingTxn(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
ts1 := hlc.Timestamp{WallTime: 1}
ts2 := hlc.Timestamp{WallTime: 2}
testCases := []struct {
startTS, ts, expTS hlc.Timestamp
}{
// Noop.
{ts1, ts1, ts1},
// Move timestamp forward.
{ts1, ts2, ts2},
// Move timestamp backwards (has no effect).
{ts2, ts1, ts2},
}
for i, test := range testCases {
key := roachpb.Key(fmt.Sprintf("key-%d", i))
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee.Priority = 1
pushee.Epoch = 12345
pusher.Priority = 2 // Pusher will win
pusher.Writing = true // expected when a txn is heartbeat
// First, establish "start" of existing pushee's txn via BeginTransaction.
pushee.Timestamp = test.startTS
pushee.LastHeartbeat = &test.startTS
_, btH := beginTxnArgs(key, pushee)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// Now, attempt to push the transaction using updated timestamp.
pushee.Timestamp = test.ts
args := pushTxnArgs(pusher, pushee, roachpb.PUSH_ABORT)
resp, pErr := tc.SendWrapped(&args)
if pErr != nil {
t.Fatal(pErr)
}
reply := resp.(*roachpb.PushTxnResponse)
expTxn := pushee.Clone()
expTxn.Epoch = pushee.Epoch // no change
expTxn.Timestamp = test.expTS
expTxn.Status = roachpb.ABORTED
expTxn.LastHeartbeat = &test.startTS
expTxn.Writing = true
if !reflect.DeepEqual(expTxn, reply.PusheeTxn) {
t.Fatalf("unexpected push txn in trial %d; expected:\n%+v\ngot:\n%+v", i, expTxn, reply.PusheeTxn)
}
}
}
// TestPushTxnHeartbeatTimeout verifies that a txn which
// hasn't been heartbeat within 2x the heartbeat interval can be
// pushed/aborted.
func TestPushTxnHeartbeatTimeout(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
ts := hlc.Timestamp{WallTime: 1}
ns := base.DefaultHeartbeatInterval.Nanoseconds()
testCases := []struct {
heartbeat hlc.Timestamp // zero value indicates no heartbeat
currentTime int64 // nanoseconds
pushType roachpb.PushTxnType
expSuccess bool
}{
// Avoid using 0 as currentTime since our manualClock is at 0 and we
// don't want to have outcomes depend on random logical ticks.
{hlc.ZeroTimestamp, 1, roachpb.PUSH_TIMESTAMP, false},
{hlc.ZeroTimestamp, 1, roachpb.PUSH_ABORT, false},
{hlc.ZeroTimestamp, 1, roachpb.PUSH_TOUCH, false},
{hlc.ZeroTimestamp, 1, roachpb.PUSH_QUERY, true},
{hlc.ZeroTimestamp, ns, roachpb.PUSH_TIMESTAMP, false},
{hlc.ZeroTimestamp, ns, roachpb.PUSH_ABORT, false},
{hlc.ZeroTimestamp, ns, roachpb.PUSH_TOUCH, false},
{hlc.ZeroTimestamp, ns, roachpb.PUSH_QUERY, true},
{hlc.ZeroTimestamp, ns*2 - 1, roachpb.PUSH_TIMESTAMP, false},
{hlc.ZeroTimestamp, ns*2 - 1, roachpb.PUSH_ABORT, false},
{hlc.ZeroTimestamp, ns*2 - 1, roachpb.PUSH_TOUCH, false},
{hlc.ZeroTimestamp, ns*2 - 1, roachpb.PUSH_QUERY, true},
{hlc.ZeroTimestamp, ns * 2, roachpb.PUSH_TIMESTAMP, false},
{hlc.ZeroTimestamp, ns * 2, roachpb.PUSH_ABORT, false},
{hlc.ZeroTimestamp, ns * 2, roachpb.PUSH_TOUCH, false},
{hlc.ZeroTimestamp, ns * 2, roachpb.PUSH_QUERY, true},
{ts, ns*2 + 1, roachpb.PUSH_TIMESTAMP, false},
{ts, ns*2 + 1, roachpb.PUSH_ABORT, false},
{ts, ns*2 + 1, roachpb.PUSH_TOUCH, false},
{ts, ns*2 + 1, roachpb.PUSH_QUERY, true},
{ts, ns*2 + 2, roachpb.PUSH_TIMESTAMP, true},
{ts, ns*2 + 2, roachpb.PUSH_ABORT, true},
{ts, ns*2 + 2, roachpb.PUSH_TOUCH, true},
{ts, ns*2 + 2, roachpb.PUSH_QUERY, true},
}
for i, test := range testCases {
key := roachpb.Key(fmt.Sprintf("key-%d", i))
pushee := newTransaction(fmt.Sprintf("test-%d", i), key, 1, enginepb.SERIALIZABLE, tc.clock)
pusher := newTransaction("pusher", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee.Priority = 2
pusher.Priority = 1 // Pusher won't win based on priority.
// First, establish "start" of existing pushee's txn via BeginTransaction.
if !test.heartbeat.Equal(hlc.ZeroTimestamp) {
pushee.LastHeartbeat = &test.heartbeat
}
_, btH := beginTxnArgs(key, pushee)
btH.Timestamp = tc.rng.store.Clock().Now()
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatalf("%d: %s", i, pErr)
}
// Now, attempt to push the transaction with Now set to our current time.
args := pushTxnArgs(pusher, pushee, test.pushType)
args.Now = hlc.Timestamp{WallTime: test.currentTime}
args.PushTo = args.Now
reply, pErr := tc.SendWrapped(&args)
if test.expSuccess != (pErr == nil) {
t.Fatalf("%d: expSuccess=%t; got pErr %s, reply %+v", i,
test.expSuccess, pErr, reply)
}
if pErr != nil {
if _, ok := pErr.GetDetail().(*roachpb.TransactionPushError); !ok {
t.Errorf("%d: expected txn push error: %s", i, pErr)
}
} else if test.pushType != roachpb.PUSH_QUERY {
if txn := reply.(*roachpb.PushTxnResponse).PusheeTxn; txn.Status != roachpb.ABORTED {
t.Errorf("%d: expected aborted transaction, got %s", i, txn)
}
}
}
}
// TestPushTxnNoTxn makes sure that no Txn is returned from PushTxn and that
// it and ResolveIntent{,Range} can not be carried out in a transaction.
func TestResolveIntentPushTxnReplyTxn(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
b := tc.engine.NewBatch()
defer b.Close()
txn := newTransaction("test", roachpb.Key("test"), 1, enginepb.SERIALIZABLE, tc.clock)
txnPushee := txn.Clone()
txnPushee.Priority--
pa := pushTxnArgs(txn, &txnPushee, roachpb.PUSH_ABORT)
var ms enginepb.MVCCStats
var ra roachpb.ResolveIntentRequest
var rra roachpb.ResolveIntentRangeRequest
ctx := context.Background()
// Should not be able to push or resolve in a transaction.
if _, err := tc.rng.PushTxn(ctx, b, &ms, roachpb.Header{Txn: txn}, pa); !testutils.IsError(err, errTransactionUnsupported.Error()) {
t.Fatalf("transactional PushTxn returned unexpected error: %v", err)
}
if _, err := tc.rng.ResolveIntent(ctx, b, &ms, roachpb.Header{Txn: txn}, ra); !testutils.IsError(err, errTransactionUnsupported.Error()) {
t.Fatalf("transactional ResolveIntent returned unexpected error: %v", err)
}
if _, err := tc.rng.ResolveIntentRange(ctx, b, &ms, roachpb.Header{Txn: txn}, rra); !testutils.IsError(err, errTransactionUnsupported.Error()) {
t.Fatalf("transactional ResolveIntentRange returned unexpected error: %v", err)
}
// Should not get a transaction back from PushTxn. It used to erroneously
// return args.PusherTxn.
if reply, err := tc.rng.PushTxn(ctx, b, &ms, roachpb.Header{}, pa); err != nil {
t.Fatal(err)
} else if reply.Txn != nil {
t.Fatalf("expected nil response txn, but got %s", reply.Txn)
}
}
// TestPushTxnPriorities verifies that txns with lower
// priority are pushed; if priorities are equal, then the txns
// are ordered by txn timestamp, with the more recent timestamp
// being pushable.
// TODO(tschottdorf): we should have a randomized version of this test which
// also simulates the client proto and persisted record diverging. For example,
// clients may be using a higher timestamp for their push or the persisted
// record (which they are not using) might have a higher timestamp, and even
// in the presence of such skewed information, conflicts between two (or more)
// conflicting transactions must not deadlock (see #5685 for an example of this
// happening with older code).
func TestPushTxnPriorities(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
ts1 := hlc.Timestamp{WallTime: 1}
ts2 := hlc.Timestamp{WallTime: 2}
testCases := []struct {
pusherPriority, pusheePriority int32
pusherTS, pusheeTS hlc.Timestamp
pushType roachpb.PushTxnType
expSuccess bool
}{
// Pusher with higher priority succeeds.
{2, 1, ts1, ts1, roachpb.PUSH_TIMESTAMP, true},
{2, 1, ts1, ts1, roachpb.PUSH_ABORT, true},
// Pusher with lower priority fails.
{1, 2, ts1, ts1, roachpb.PUSH_ABORT, false},
{1, 2, ts1, ts1, roachpb.PUSH_TIMESTAMP, false},
// Pusher with lower priority fails, even with older txn timestamp.
{1, 2, ts1, ts2, roachpb.PUSH_ABORT, false},
// Pusher has lower priority, but older txn timestamp allows success if
// !abort since there's nothing to do.
{1, 2, ts1, ts2, roachpb.PUSH_TIMESTAMP, true},
// With same priorities, larger Txn ID wins. Timestamp does not matter
// (unless it implies that nothing needs to be pushed in the first
// place; see above).
// Note: in this test, the pusher has the larger ID.
{1, 1, ts1, ts1, roachpb.PUSH_ABORT, true},
{1, 1, ts1, ts1, roachpb.PUSH_TIMESTAMP, true},
{1, 1, ts2, ts1, roachpb.PUSH_ABORT, true},
{1, 1, ts2, ts1, roachpb.PUSH_TIMESTAMP, true},
// When touching, priority never wins.
{2, 1, ts1, ts1, roachpb.PUSH_TOUCH, false},
{1, 2, ts1, ts1, roachpb.PUSH_TOUCH, false},
// When updating, priority always succeeds.
{2, 1, ts1, ts1, roachpb.PUSH_QUERY, true},
{1, 2, ts1, ts1, roachpb.PUSH_QUERY, true},
}
for i, test := range testCases {
key := roachpb.Key(fmt.Sprintf("key-%d", i))
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pusher.Priority = test.pusherPriority
pushee.Priority = test.pusheePriority
pusher.Timestamp = test.pusherTS
pushee.Timestamp = test.pusheeTS
// Make sure pusher ID is greater; if priorities and timestamps are the same,
// the greater ID succeeds with push.
if bytes.Compare(pusher.ID.GetBytes(), pushee.ID.GetBytes()) < 0 {
pusher.ID, pushee.ID = pushee.ID, pusher.ID
}
_, btH := beginTxnArgs(key, pushee)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// Now, attempt to push the transaction with intent epoch set appropriately.
args := pushTxnArgs(pusher, pushee, test.pushType)
_, pErr := tc.SendWrapped(&args)
if test.expSuccess != (pErr == nil) {
t.Errorf("expected success on trial %d? %t; got err %s", i, test.expSuccess, pErr)
}
if pErr != nil {
if _, ok := pErr.GetDetail().(*roachpb.TransactionPushError); !ok {
t.Errorf("expected txn push error: %s", pErr)
}
}
}
}
// TestPushTxnPushTimestamp verifies that with args.Abort is
// false (i.e. for read/write conflict), the pushed txn keeps status
// PENDING, but has its txn Timestamp moved forward to the pusher's
// txn Timestamp + 1.
func TestPushTxnPushTimestamp(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
pusher := newTransaction("test", roachpb.Key("a"), 1, enginepb.SERIALIZABLE, tc.clock)
pushee := newTransaction("test", roachpb.Key("b"), 1, enginepb.SERIALIZABLE, tc.clock)
pusher.Priority = 2
pushee.Priority = 1 // pusher will win
pusher.Timestamp = hlc.Timestamp{WallTime: 50, Logical: 25}
pushee.Timestamp = hlc.Timestamp{WallTime: 5, Logical: 1}
key := roachpb.Key("a")
_, btH := beginTxnArgs(key, pushee)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
pushee.Writing = true
// Now, push the transaction with args.Abort=false.
args := pushTxnArgs(pusher, pushee, roachpb.PUSH_TIMESTAMP)
resp, pErr := tc.SendWrapped(&args)
if pErr != nil {
t.Errorf("unexpected error on push: %s", pErr)
}
expTS := pusher.Timestamp
expTS.Logical++
reply := resp.(*roachpb.PushTxnResponse)
if !reply.PusheeTxn.Timestamp.Equal(expTS) {
t.Errorf("expected timestamp to be pushed to %+v; got %+v", expTS, reply.PusheeTxn.Timestamp)
}
if reply.PusheeTxn.Status != roachpb.PENDING {
t.Errorf("expected pushed txn to have status PENDING; got %s", reply.PusheeTxn.Status)
}
}
// TestPushTxnPushTimestampAlreadyPushed verifies that pushing
// a timestamp forward which is already far enough forward is a simple
// noop. We do this by ensuring that priorities would otherwise make
// pushing impossible.
func TestPushTxnPushTimestampAlreadyPushed(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
pusher := newTransaction("test", roachpb.Key("a"), 1, enginepb.SERIALIZABLE, tc.clock)
pushee := newTransaction("test", roachpb.Key("b"), 1, enginepb.SERIALIZABLE, tc.clock)
pusher.Priority = 1
pushee.Priority = 2 // pusher will lose
pusher.Timestamp = hlc.Timestamp{WallTime: 50, Logical: 0}
pushee.Timestamp = hlc.Timestamp{WallTime: 50, Logical: 1}
key := roachpb.Key("a")
_, btH := beginTxnArgs(key, pushee)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// Now, push the transaction with args.Abort=false.
args := pushTxnArgs(pusher, pushee, roachpb.PUSH_TIMESTAMP)
resp, pErr := tc.SendWrapped(&args)
if pErr != nil {
t.Errorf("unexpected pError on push: %s", pErr)
}
reply := resp.(*roachpb.PushTxnResponse)
if !reply.PusheeTxn.Timestamp.Equal(pushee.Timestamp) {
t.Errorf("expected timestamp to be equal to original %+v; got %+v", pushee.Timestamp, reply.PusheeTxn.Timestamp)
}
if reply.PusheeTxn.Status != roachpb.PENDING {
t.Errorf("expected pushed txn to have status PENDING; got %s", reply.PusheeTxn.Status)
}
}
// TestPushTxnSerializableRestart simulates a transaction which is
// started at t=0, fails serializable commit due to a read at a key
// being written at t=1, is then restarted at the updated timestamp,
// but before the txn can be retried, it's pushed to t=2, an even
// higher timestamp. The test verifies that the serializable commit
// fails yet again, preventing regression of a bug in which we blindly
// overwrote the transaction record on BeginTransaction..
func TestPushTxnSerializableRestart(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("a")
pushee := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee.Priority = 1
pusher.Priority = 2 // pusher will win
// Read from the key to increment the timestamp cache.
gArgs := getArgs(key)
if _, pErr := tc.SendWrapped(&gArgs); pErr != nil {
t.Fatal(pErr)
}
// Begin the pushee's transaction & write to key.
btArgs, btH := beginTxnArgs(key, pushee)
put := putArgs(key, []byte("foo"))
resp, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put)
if pErr != nil {
t.Fatal(pErr)
}
pushee.Update(resp.Header().Txn)
// Try to end the pushee's transaction; should get a retry failure.
etArgs, h := endTxnArgs(pushee, true /* commit */)
pushee.Sequence++
_, pErr = tc.SendWrappedWith(h, &etArgs)
if _, ok := pErr.GetDetail().(*roachpb.TransactionRetryError); !ok {
t.Fatalf("expected retry error; got %s", pErr)
}
pusheeCopy := *pushee
pushee.Restart(1, 1, pusher.Timestamp)
// Next push pushee to advance timestamp of txn record.
pusher.Timestamp = tc.rng.store.Clock().Now()
args := pushTxnArgs(pusher, &pusheeCopy, roachpb.PUSH_TIMESTAMP)
if _, pErr := tc.SendWrapped(&args); pErr != nil {
t.Fatal(pErr)
}
// Try to end pushed transaction at restart timestamp, which is
// earlier than its now-pushed timestamp. Should fail.
var ba roachpb.BatchRequest
pushee.Sequence++
ba.Header.Txn = pushee
ba.Add(&btArgs)
ba.Add(&put)
ba.Add(&etArgs)
_, pErr = tc.Sender().Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.TransactionRetryError); !ok {
t.Fatalf("expected retry error; got %s", pErr)
}
// Verify that the returned transaction has timestamp equal to the
// pushed timestamp. This verifies that the BeginTransaction found
// the pushed record and propagated it.
if txn := pErr.GetTxn(); !txn.Timestamp.Equal(pusher.Timestamp.Next()) {
t.Errorf("expected retry error txn timestamp %s; got %s", pusher.Timestamp, txn.Timestamp)
}
}
// TestReplicaResolveIntentRange verifies resolving a range of intents.
func TestReplicaResolveIntentRange(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
keys := []roachpb.Key{roachpb.Key("a"), roachpb.Key("b")}
txn := newTransaction("test", keys[0], 1, enginepb.SERIALIZABLE, tc.clock)
// Put two values transactionally.
for _, key := range keys {
pArgs := putArgs(key, []byte("value1"))
txn.Sequence++
if _, pErr := tc.SendWrappedWith(roachpb.Header{Txn: txn}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
}
// Resolve the intents.
rArgs := &roachpb.ResolveIntentRangeRequest{
Span: roachpb.Span{
Key: roachpb.Key("a"),
EndKey: roachpb.Key("c"),
},
IntentTxn: txn.TxnMeta,
Status: roachpb.COMMITTED,
}
if _, pErr := tc.SendWrapped(rArgs); pErr != nil {
t.Fatal(pErr)
}
// Do a consistent scan to verify intents have been cleared.
sArgs := scanArgs(roachpb.Key("a"), roachpb.Key("c"))
reply, pErr := tc.SendWrapped(&sArgs)
if pErr != nil {
t.Fatalf("unexpected error on scan: %s", pErr)
}
sReply := reply.(*roachpb.ScanResponse)
if len(sReply.Rows) != 2 {
t.Errorf("expected 2 rows; got %v", sReply.Rows)
}
}
func verifyRangeStats(eng engine.Engine, rangeID roachpb.RangeID, expMS enginepb.MVCCStats, t *testing.T) {
var ms enginepb.MVCCStats
if err := engine.MVCCGetRangeStats(context.Background(), eng, rangeID, &ms); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(expMS, ms) {
f, l, _ := caller.Lookup(1)
t.Errorf("%s:%d: expected stats \n %+v;\ngot \n %+v", f, l, expMS, ms)
}
}
// TestReplicaStatsComputation verifies that commands executed against a
// range update the range stat counters. The stat values are
// empirically derived; we're really just testing that they increment
// in the right ways, not the exact amounts. If the encodings change,
// will need to update this test.
func TestReplicaStatsComputation(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{
bootstrapMode: bootstrapRangeOnly,
}
tc.Start(t)
defer tc.Stop()
baseStats := initialStats()
// Add in the contribution for the range lease request.
baseStats.Add(enginepb.MVCCStats{
SysCount: 1,
SysBytes: 62,
})
// Put a value.
pArgs := putArgs([]byte("a"), []byte("value1"))
if _, pErr := tc.SendWrapped(&pArgs); pErr != nil {
t.Fatal(pErr)
}
expMS := baseStats
expMS.Add(enginepb.MVCCStats{
LiveBytes: 25,
KeyBytes: 14,
ValBytes: 11,
LiveCount: 1,
KeyCount: 1,
ValCount: 1,
})
verifyRangeStats(tc.engine, tc.rng.RangeID, expMS, t)
// Put a 2nd value transactionally.
pArgs = putArgs([]byte("b"), []byte("value2"))
// Consistent UUID needed for a deterministic SysBytes value. This is because
// a random UUID could have a 0x00 byte that would be escaped by the encoding,
// increasing the encoded size and throwing off statistics verification.
uuid, err := uuid.FromString("ea5b9590-a157-421b-8b93-a4caa2c41137")
if err != nil {
t.Fatal(err)
}
txn := newTransaction("test", pArgs.Key, 1, enginepb.SERIALIZABLE, tc.clock)
txn.Priority = 123 // So we don't have random values messing with the byte counts on encoding
txn.ID = uuid
if _, pErr := tc.SendWrappedWith(roachpb.Header{Txn: txn}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
expMS = baseStats
expMS.Add(enginepb.MVCCStats{
LiveBytes: 101,
KeyBytes: 28,
ValBytes: 73,
IntentBytes: 23,
LiveCount: 2,
KeyCount: 2,
ValCount: 2,
IntentCount: 1,
})
verifyRangeStats(tc.engine, tc.rng.RangeID, expMS, t)
// Resolve the 2nd value.
rArgs := &roachpb.ResolveIntentRequest{
Span: roachpb.Span{
Key: pArgs.Key,
},
IntentTxn: txn.TxnMeta,
Status: roachpb.COMMITTED,
}
if _, pErr := tc.SendWrapped(rArgs); pErr != nil {
t.Fatal(pErr)
}
expMS = baseStats
expMS.Add(enginepb.MVCCStats{
LiveBytes: 50,
KeyBytes: 28,
ValBytes: 22,
LiveCount: 2,
KeyCount: 2,
ValCount: 2,
})
verifyRangeStats(tc.engine, tc.rng.RangeID, expMS, t)
// Delete the 1st value.
dArgs := deleteArgs([]byte("a"))
if _, pErr := tc.SendWrapped(&dArgs); pErr != nil {
t.Fatal(pErr)
}
expMS = baseStats
expMS.Add(enginepb.MVCCStats{
LiveBytes: 25,
KeyBytes: 40,
ValBytes: 22,
LiveCount: 1,
KeyCount: 2,
ValCount: 3,
})
verifyRangeStats(tc.engine, tc.rng.RangeID, expMS, t)
}
// TestMerge verifies that the Merge command is behaving as expected. Time
// series data is used, as it is the only data type currently fully supported by
// the merge command.
func TestMerge(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := []byte("mergedkey")
args := make([]roachpb.InternalTimeSeriesData, 3)
expected := roachpb.InternalTimeSeriesData{
StartTimestampNanos: 0,
SampleDurationNanos: 1000,
Samples: make([]roachpb.InternalTimeSeriesSample, len(args)),
}
for i := 0; i < len(args); i++ {
sample := roachpb.InternalTimeSeriesSample{
Offset: int32(i),
Count: 1,
Sum: float64(i),
}
args[i] = roachpb.InternalTimeSeriesData{
StartTimestampNanos: expected.StartTimestampNanos,
SampleDurationNanos: expected.SampleDurationNanos,
Samples: []roachpb.InternalTimeSeriesSample{sample},
}
expected.Samples[i] = sample
}
for _, arg := range args {
var v roachpb.Value
if err := v.SetProto(&arg); err != nil {
t.Fatal(err)
}
mergeArgs := internalMergeArgs(key, v)
if _, pErr := tc.SendWrapped(&mergeArgs); pErr != nil {
t.Fatalf("unexpected error from Merge: %s", pErr)
}
}
getArgs := getArgs(key)
reply, pErr := tc.SendWrapped(&getArgs)
if pErr != nil {
t.Fatalf("unexpected error from Get: %s", pErr)
}
resp := reply.(*roachpb.GetResponse)
if resp.Value == nil {
t.Fatal("GetResponse had nil value")
}
var actual roachpb.InternalTimeSeriesData
if err := resp.Value.GetProto(&actual); err != nil {
t.Fatal(err)
}
if !proto.Equal(&actual, &expected) {
t.Errorf("Get did not return expected value: %v != %v", actual, expected)
}
}
// TestTruncateLog verifies that the TruncateLog command removes a
// prefix of the raft logs (modifying FirstIndex() and making them
// inaccessible via Entries()).
func TestTruncateLog(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
tc.rng.store.SetRaftLogQueueActive(false)
// Populate the log with 10 entries. Save the LastIndex after each write.
var indexes []uint64
for i := 0; i < 10; i++ {
args := incrementArgs([]byte("a"), int64(i))
if _, pErr := tc.SendWrapped(&args); pErr != nil {
t.Fatal(pErr)
}
idx, err := tc.rng.GetLastIndex()
if err != nil {
t.Fatal(err)
}
indexes = append(indexes, idx)
}
rangeID := tc.rng.RangeID
// Discard the first half of the log.
truncateArgs := truncateLogArgs(indexes[5], rangeID)
if _, pErr := tc.SendWrapped(&truncateArgs); pErr != nil {
t.Fatal(pErr)
}
// FirstIndex has changed.
firstIndex, err := tc.rng.GetFirstIndex()
if err != nil {
t.Fatal(err)
}
if firstIndex != indexes[5] {
t.Errorf("expected firstIndex == %d, got %d", indexes[5], firstIndex)
}
// We can still get what remains of the log.
tc.rng.mu.Lock()
entries, err := tc.rng.Entries(indexes[5], indexes[9], math.MaxUint64)
tc.rng.mu.Unlock()
if err != nil {
t.Fatal(err)
}
if len(entries) != int(indexes[9]-indexes[5]) {
t.Errorf("expected %d entries, got %d", indexes[9]-indexes[5], len(entries))
}
// But any range that includes the truncated entries returns an error.
tc.rng.mu.Lock()
_, err = tc.rng.Entries(indexes[4], indexes[9], math.MaxUint64)
tc.rng.mu.Unlock()
if err != raft.ErrCompacted {
t.Errorf("expected ErrCompacted, got %s", err)
}
// The term of the last truncated entry is still available.
tc.rng.mu.Lock()
term, err := tc.rng.Term(indexes[4])
tc.rng.mu.Unlock()
if err != nil {
t.Fatal(err)
}
if term == 0 {
t.Errorf("invalid term 0 for truncated entry")
}
// The terms of older entries are gone.
tc.rng.mu.Lock()
_, err = tc.rng.Term(indexes[3])
tc.rng.mu.Unlock()
if err != raft.ErrCompacted {
t.Errorf("expected ErrCompacted, got %s", err)
}
// Truncating logs that have already been truncated should not return an
// error.
truncateArgs = truncateLogArgs(indexes[3], rangeID)
if _, pErr := tc.SendWrapped(&truncateArgs); pErr != nil {
t.Fatal(pErr)
}
// Truncating logs that have the wrong rangeID included should not return
// an error but should not truncate any logs.
truncateArgs = truncateLogArgs(indexes[9], rangeID+1)
if _, pErr := tc.SendWrapped(&truncateArgs); pErr != nil {
t.Fatal(pErr)
}
tc.rng.mu.Lock()
// The term of the last truncated entry is still available.
term, err = tc.rng.Term(indexes[4])
tc.rng.mu.Unlock()
if err != nil {
t.Fatal(err)
}
if term == 0 {
t.Errorf("invalid term 0 for truncated entry")
}
}
// TestConditionFailedError tests that a ConditionFailedError correctly
// bubbles up from MVCC to Range.
func TestConditionFailedError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := []byte("k")
value := []byte("quack")
pArgs := putArgs(key, value)
if _, pErr := tc.SendWrapped(&pArgs); pErr != nil {
t.Fatal(pErr)
}
val := roachpb.MakeValueFromString("moo")
args := roachpb.ConditionalPutRequest{
Span: roachpb.Span{
Key: key,
},
Value: roachpb.MakeValueFromBytes(value),
ExpValue: &val,
}
_, pErr := tc.SendWrappedWith(roachpb.Header{Timestamp: hlc.MinTimestamp}, &args)
if cErr, ok := pErr.GetDetail().(*roachpb.ConditionFailedError); pErr == nil || !ok {
t.Fatalf("expected ConditionFailedError, got %T with content %+v",
pErr, pErr)
} else if valueBytes, err := cErr.ActualValue.GetBytes(); err != nil {
t.Fatal(err)
} else if cErr.ActualValue == nil || !bytes.Equal(valueBytes, value) {
t.Errorf("ConditionFailedError with bytes %q expected, but got %+v",
value, cErr.ActualValue)
}
}
// TestReplicaSetsEqual tests to ensure that intersectReplicaSets
// returns the correct responses.
func TestReplicaSetsEqual(t *testing.T) {
defer leaktest.AfterTest(t)()
testData := []struct {
expected bool
a []roachpb.ReplicaDescriptor
b []roachpb.ReplicaDescriptor
}{
{true, []roachpb.ReplicaDescriptor{}, []roachpb.ReplicaDescriptor{}},
{true, createReplicaSets([]roachpb.StoreID{1}), createReplicaSets([]roachpb.StoreID{1})},
{true, createReplicaSets([]roachpb.StoreID{1, 2}), createReplicaSets([]roachpb.StoreID{1, 2})},
{true, createReplicaSets([]roachpb.StoreID{1, 2}), createReplicaSets([]roachpb.StoreID{2, 1})},
{false, createReplicaSets([]roachpb.StoreID{1}), createReplicaSets([]roachpb.StoreID{2})},
{false, createReplicaSets([]roachpb.StoreID{1, 2}), createReplicaSets([]roachpb.StoreID{2})},
{false, createReplicaSets([]roachpb.StoreID{1, 2}), createReplicaSets([]roachpb.StoreID{1})},
{false, createReplicaSets([]roachpb.StoreID{}), createReplicaSets([]roachpb.StoreID{1})},
{true, createReplicaSets([]roachpb.StoreID{1, 2, 3}), createReplicaSets([]roachpb.StoreID{2, 3, 1})},
{true, createReplicaSets([]roachpb.StoreID{1, 1}), createReplicaSets([]roachpb.StoreID{1, 1})},
{false, createReplicaSets([]roachpb.StoreID{1, 1}), createReplicaSets([]roachpb.StoreID{1, 1, 1})},
{true, createReplicaSets([]roachpb.StoreID{1, 2, 3, 1, 2, 3}), createReplicaSets([]roachpb.StoreID{1, 1, 2, 2, 3, 3})},
}
for _, test := range testData {
if replicaSetsEqual(test.a, test.b) != test.expected {
t.Fatalf("unexpected replica intersection: %+v", test)
}
}
}
func TestAppliedIndex(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
var appliedIndex uint64
var sum int64
for i := int64(1); i <= 10; i++ {
args := incrementArgs([]byte("a"), i)
resp, pErr := tc.SendWrapped(&args)
if pErr != nil {
t.Fatal(pErr)
}
reply := resp.(*roachpb.IncrementResponse)
sum += i
if reply.NewValue != sum {
t.Errorf("expected %d, got %d", sum, reply.NewValue)
}
tc.rng.mu.Lock()
newAppliedIndex := tc.rng.mu.state.RaftAppliedIndex
tc.rng.mu.Unlock()
if newAppliedIndex <= appliedIndex {
t.Errorf("appliedIndex did not advance. Was %d, now %d", appliedIndex, newAppliedIndex)
}
appliedIndex = newAppliedIndex
}
}
// TestReplicaCorruption verifies that a replicaCorruptionError correctly marks
// the range as corrupt.
func TestReplicaCorruption(t *testing.T) {
defer leaktest.AfterTest(t)()
tsc := TestStoreContext()
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if filterArgs.Req.Header().Key.Equal(roachpb.Key("boom")) {
return roachpb.NewError(NewReplicaCorruptionError(errors.New("boom")))
}
return nil
}
tc := testContext{}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
// First send a regular command.
args := putArgs(roachpb.Key("test1"), []byte("value"))
if _, pErr := tc.SendWrapped(&args); pErr != nil {
t.Fatal(pErr)
}
key := roachpb.Key("boom")
// maybeSetCorrupt should have been called.
args = putArgs(key, []byte("value"))
_, pErr := tc.SendWrapped(&args)
if !testutils.IsPError(pErr, "replica corruption \\(processed=true\\)") {
t.Fatalf("unexpected error: %s", pErr)
}
// Verify replica destroyed was set.
rkey, err := keys.Addr(key)
if err != nil {
t.Fatal(err)
}
r := tc.store.LookupReplica(rkey, rkey)
r.mu.Lock()
defer r.mu.Unlock()
if r.mu.destroyed.Error() != pErr.GetDetail().Error() {
t.Fatalf("expected r.mu.destroyed == pErr.GetDetail(), instead %q != %q", r.mu.destroyed, pErr.GetDetail())
}
// Verify destroyed error was persisted.
pErr, err = loadReplicaDestroyedError(context.Background(), r.store.Engine(), r.RangeID)
if err != nil {
t.Fatal(err)
}
if r.mu.destroyed.Error() != pErr.GetDetail().Error() {
t.Fatalf("expected r.mu.destroyed == pErr.GetDetail(), instead %q != %q", r.mu.destroyed, pErr.GetDetail())
}
// TODO(bdarnell): when maybeSetCorrupt is finished verify that future commands fail too.
}
// TestChangeReplicasDuplicateError tests that a replica change that would
// use a NodeID twice in the replica configuration fails.
func TestChangeReplicasDuplicateError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
if err := tc.rng.ChangeReplicas(
context.Background(),
roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: tc.store.Ident.NodeID,
StoreID: 9999,
},
tc.rng.Desc(),
); err == nil || !strings.Contains(err.Error(), "already present") {
t.Fatalf("must not be able to add second replica to same node (err=%s)", err)
}
}
// TestReplicaDanglingMetaIntent creates a dangling intent on a meta2
// record and verifies that RangeLookup requests behave
// appropriately. Normally, the old value and a write intent error
// should be returned. If IgnoreIntents is specified, then a random
// choice of old or new is returned with no error.
// TODO(tschottdorf): add a test in which there is a dangling intent on a
// descriptor we would've otherwise discarded in a reverse scan; verify that
// we don't erroneously return that descriptor (recently fixed bug).
func TestReplicaDanglingMetaIntent(t *testing.T) {
defer leaktest.AfterTest(t)()
// Test RangeLookup with Scan.
testRangeDanglingMetaIntent(t, false)
// Test RangeLookup with ReverseScan.
testRangeDanglingMetaIntent(t, true)
}
func testRangeDanglingMetaIntent(t *testing.T, isReverse bool) {
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("a")
// Get original meta2 descriptor.
rlArgs := &roachpb.RangeLookupRequest{
Span: roachpb.Span{
Key: keys.RangeMetaKey(roachpb.RKey(key)),
},
MaxRanges: 1,
Reverse: isReverse,
}
var rlReply *roachpb.RangeLookupResponse
reply, pErr := tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, rlArgs)
if pErr != nil {
t.Fatal(pErr)
}
rlReply = reply.(*roachpb.RangeLookupResponse)
origDesc := rlReply.Ranges[0]
newDesc := origDesc
var err error
newDesc.EndKey, err = keys.Addr(key)
if err != nil {
t.Fatal(err)
}
// Write the new descriptor as an intent.
data, err := protoutil.Marshal(&newDesc)
if err != nil {
t.Fatal(err)
}
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
// Officially begin the transaction. If not for this, the intent resolution
// machinery would simply remove the intent we write below, see #3020.
// We send directly to Replica throughout this test, so there's no danger
// of the Store aborting this transaction (i.e. we don't have to set a high
// priority).
pArgs := putArgs(keys.RangeMetaKey(roachpb.RKey(key)), data)
txn.Sequence++
if _, pErr = maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), roachpb.Header{Txn: txn}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
// Now lookup the range; should get the value. Since the lookup is
// inconsistent, there's no WriteIntentError.
// Note that 'A' < 'a'.
rlArgs.Key = keys.RangeMetaKey(roachpb.RKey{'A'})
reply, pErr = tc.SendWrappedWith(roachpb.Header{
Timestamp: hlc.MinTimestamp,
ReadConsistency: roachpb.INCONSISTENT,
}, rlArgs)
if pErr != nil {
t.Errorf("unexpected lookup error: %s", pErr)
}
rlReply = reply.(*roachpb.RangeLookupResponse)
if !reflect.DeepEqual(rlReply.Ranges[0], origDesc) {
t.Errorf("expected original descriptor %s; got %s", &origDesc, &rlReply.Ranges[0])
}
// Switch to consistent lookups, which should run into the intent.
_, pErr = tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.CONSISTENT,
}, rlArgs)
if _, ok := pErr.GetDetail().(*roachpb.WriteIntentError); !ok {
t.Fatalf("expected WriteIntentError, not %s", pErr)
}
// Try a single lookup with ConsiderIntents. Expect to see both descriptors.
// First, try this consistently, which should not be allowed.
rlArgs.ConsiderIntents = true
_, pErr = tc.SendWrapped(rlArgs)
if !testutils.IsPError(pErr, "can not read consistently and special-case intents") {
t.Fatalf("wanted specific error, not %s", pErr)
}
// After changing back to inconsistent lookups, should be good to go.
var origSeen, newSeen bool
clonedRLArgs := *rlArgs
reply, pErr = tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &clonedRLArgs)
if pErr != nil {
t.Fatal(pErr)
}
rlReply = reply.(*roachpb.RangeLookupResponse)
for _, seen := range rlReply.Ranges {
if reflect.DeepEqual(seen, origDesc) {
origSeen = true
} else if reflect.DeepEqual(seen, newDesc) {
newSeen = true
} else {
t.Errorf("expected orig/new descriptor %s/%s; got %s", &origDesc, &newDesc, &seen)
}
}
if !origSeen || !newSeen {
t.Errorf("expected to see both original and new descriptor; saw original = %t, saw new = %t", origSeen, newSeen)
}
}
// TestReplicaLookupUseReverseScan verifies the correctness of the results which are retrieved
// from RangeLookup by using ReverseScan.
func TestReplicaLookupUseReverseScan(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Test ranges: ["a","c"), ["c","f"), ["f","h") and ["h","y").
testRanges := []roachpb.RangeDescriptor{
{RangeID: 2, StartKey: roachpb.RKey("a"), EndKey: roachpb.RKey("c")},
{RangeID: 3, StartKey: roachpb.RKey("c"), EndKey: roachpb.RKey("f")},
{RangeID: 4, StartKey: roachpb.RKey("f"), EndKey: roachpb.RKey("h")},
{RangeID: 5, StartKey: roachpb.RKey("h"), EndKey: roachpb.RKey("y")},
}
// The range ["f","h") has dangling intent in meta2.
withIntentRangeIndex := 2
testCases := []struct {
key string
expected roachpb.RangeDescriptor
}{
// For testRanges[0|1|3] there is no intent. A key in the middle
// and the end key should both give us the range itself.
{key: "b", expected: testRanges[0]},
{key: "c", expected: testRanges[0]},
{key: "d", expected: testRanges[1]},
{key: "f", expected: testRanges[1]},
{key: "j", expected: testRanges[3]},
// testRanges[2] has an intent, so the inconsistent scan will read
// an old value (nil). Since we're in reverse mode, testRanges[1]
// is the result.
{key: "g", expected: testRanges[1]},
{key: "h", expected: testRanges[1]},
}
txn := newTransaction("test", roachpb.Key{}, 1, enginepb.SERIALIZABLE, tc.clock)
for i, r := range testRanges {
if i != withIntentRangeIndex {
// Write the new descriptor as an intent.
data, err := protoutil.Marshal(&r)
if err != nil {
t.Fatal(err)
}
pArgs := putArgs(keys.RangeMetaKey(roachpb.RKey(r.EndKey)), data)
txn.Sequence++
if _, pErr := tc.SendWrappedWith(roachpb.Header{Txn: txn}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
}
}
// Resolve the intents.
rArgs := &roachpb.ResolveIntentRangeRequest{
Span: roachpb.Span{
Key: keys.RangeMetaKey(roachpb.RKey("a")),
EndKey: keys.RangeMetaKey(roachpb.RKey("z")),
},
IntentTxn: txn.TxnMeta,
Status: roachpb.COMMITTED,
}
if _, pErr := tc.SendWrapped(rArgs); pErr != nil {
t.Fatal(pErr)
}
// Get original meta2 descriptor.
rlArgs := &roachpb.RangeLookupRequest{
MaxRanges: 1,
Reverse: true,
}
var rlReply *roachpb.RangeLookupResponse
// Test ReverseScan without intents.
for _, c := range testCases {
clonedRLArgs := *rlArgs
clonedRLArgs.Key = keys.RangeMetaKey(roachpb.RKey(c.key))
reply, pErr := tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &clonedRLArgs)
if pErr != nil {
t.Fatal(pErr)
}
rlReply = reply.(*roachpb.RangeLookupResponse)
seen := rlReply.Ranges[0]
if !(seen.StartKey.Equal(c.expected.StartKey) && seen.EndKey.Equal(c.expected.EndKey)) {
t.Errorf("expected descriptor %s; got %s", &c.expected, &seen)
}
}
// Write the new descriptor as an intent.
intentRange := testRanges[withIntentRangeIndex]
data, err := protoutil.Marshal(&intentRange)
if err != nil {
t.Fatal(err)
}
pArgs := putArgs(keys.RangeMetaKey(roachpb.RKey(intentRange.EndKey)), data)
txn2 := newTransaction("test", roachpb.Key{}, 1, enginepb.SERIALIZABLE, tc.clock)
if _, pErr := tc.SendWrappedWith(roachpb.Header{Txn: txn2}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
// Test ReverseScan with intents.
for _, c := range testCases {
clonedRLArgs := *rlArgs
clonedRLArgs.Key = keys.RangeMetaKey(roachpb.RKey(c.key))
reply, pErr := tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &clonedRLArgs)
if pErr != nil {
t.Fatal(pErr)
}
rlReply = reply.(*roachpb.RangeLookupResponse)
seen := rlReply.Ranges[0]
if !(seen.StartKey.Equal(c.expected.StartKey) && seen.EndKey.Equal(c.expected.EndKey)) {
t.Errorf("expected descriptor %s; got %s", &c.expected, &seen)
}
}
}
func TestReplicaLookup(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
expected := []roachpb.RangeDescriptor{*tc.rng.Desc()}
testCases := []struct {
key roachpb.RKey
reverse bool
expected []roachpb.RangeDescriptor
}{
// Test with the first range (StartKey==KeyMin). Normally we look
// up this range in gossip instead of executing the RPC, but
// RangeLookup is still used when up-to-date information is
// required.
{key: roachpb.RKeyMin, reverse: false, expected: expected},
// Test with the last key in a meta prefix. This is an edge case in the
// implementation.
{key: keys.MustAddr(keys.Meta1KeyMax), reverse: false, expected: expected},
{key: keys.MustAddr(keys.Meta2KeyMax), reverse: false, expected: nil},
{key: keys.MustAddr(keys.Meta1KeyMax), reverse: true, expected: expected},
{key: keys.MustAddr(keys.Meta2KeyMax), reverse: true, expected: expected},
}
for _, c := range testCases {
resp, pErr := tc.SendWrapped(&roachpb.RangeLookupRequest{
Span: roachpb.Span{
Key: c.key.AsRawKey(),
},
MaxRanges: 1,
Reverse: c.reverse,
})
if pErr != nil {
if c.expected != nil {
t.Fatal(pErr)
}
} else {
reply := resp.(*roachpb.RangeLookupResponse)
if !reflect.DeepEqual(reply.Ranges, c.expected) {
t.Fatalf("expected %+v, got %+v", c.expected, reply.Ranges)
}
}
}
}
// TestRequestLeaderEncounterGroupDeleteError verifies that a lease request which fails with
// RaftGroupDeletedError is converted to a RangeNotFoundError in the Store.
func TestRequestLeaderEncounterGroupDeleteError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Mock proposeRaftCommand to return an roachpb.RaftGroupDeletedError.
proposeRaftCommandFn := func(*pendingCmd) error {
return &roachpb.RaftGroupDeletedError{}
}
rng := tc.rng
rng.mu.Lock()
rng.mu.proposeRaftCommandFn = proposeRaftCommandFn
rng.mu.Unlock()
gArgs := getArgs(roachpb.Key("a"))
// Force the read command request a new lease.
clock := tc.clock
ts := clock.Update(clock.Now().Add(leaseExpiry(tc.rng), 0))
_, pErr := client.SendWrappedWith(tc.store, nil, roachpb.Header{
Timestamp: ts,
RangeID: 1,
}, &gArgs)
if _, ok := pErr.GetDetail().(*roachpb.RangeNotFoundError); !ok {
t.Fatalf("expected a RangeNotFoundError, get %s", pErr)
}
}
func TestIntentIntersect(t *testing.T) {
defer leaktest.AfterTest(t)()
iPt := roachpb.Span{
Key: roachpb.Key("asd"),
EndKey: nil,
}
iRn := roachpb.Span{
Key: roachpb.Key("c"),
EndKey: roachpb.Key("x"),
}
suffix := roachpb.RKey("abcd")
iLc := roachpb.Span{
Key: keys.MakeRangeKey(roachpb.RKey("c"), suffix, nil),
EndKey: keys.MakeRangeKey(roachpb.RKey("x"), suffix, nil),
}
kl1 := string(iLc.Key)
kl2 := string(iLc.EndKey)
for i, tc := range []struct {
intent roachpb.Span
from, to string
exp []string
}{
{intent: iPt, from: "", to: "z", exp: []string{"", "", "asd", ""}},
{intent: iRn, from: "", to: "a", exp: []string{"", "", "c", "x"}},
{intent: iRn, from: "", to: "c", exp: []string{"", "", "c", "x"}},
{intent: iRn, from: "a", to: "z", exp: []string{"c", "x"}},
{intent: iRn, from: "c", to: "d", exp: []string{"c", "d", "d", "x"}},
{intent: iRn, from: "c", to: "x", exp: []string{"c", "x"}},
{intent: iRn, from: "d", to: "x", exp: []string{"d", "x", "c", "d"}},
{intent: iRn, from: "d", to: "w", exp: []string{"d", "w", "c", "d", "w", "x"}},
{intent: iRn, from: "c", to: "w", exp: []string{"c", "w", "w", "x"}},
{intent: iRn, from: "w", to: "x", exp: []string{"w", "x", "c", "w"}},
{intent: iRn, from: "x", to: "z", exp: []string{"", "", "c", "x"}},
{intent: iRn, from: "y", to: "z", exp: []string{"", "", "c", "x"}},
// A local intent range always comes back in one piece, either inside
// or outside of the Range.
{intent: iLc, from: "a", to: "b", exp: []string{"", "", kl1, kl2}},
{intent: iLc, from: "d", to: "z", exp: []string{"", "", kl1, kl2}},
{intent: iLc, from: "f", to: "g", exp: []string{"", "", kl1, kl2}},
{intent: iLc, from: "c", to: "x", exp: []string{kl1, kl2}},
{intent: iLc, from: "a", to: "z", exp: []string{kl1, kl2}},
} {
var all []string
in, out := intersectSpan(tc.intent, roachpb.RangeDescriptor{
StartKey: roachpb.RKey(tc.from),
EndKey: roachpb.RKey(tc.to),
})
if in != nil {
all = append(all, string(in.Key), string(in.EndKey))
} else {
all = append(all, "", "")
}
for _, o := range out {
all = append(all, string(o.Key), string(o.EndKey))
}
if !reflect.DeepEqual(all, tc.exp) {
t.Errorf("%d: wanted %v, got %v", i, tc.exp, all)
}
}
}
// TestBatchErrorWithIndex tests that when an individual entry in a
// batch results in an error with an index, the index of this command
// is stored into the error.
func TestBatchErrorWithIndex(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
ba := roachpb.BatchRequest{}
// This one succeeds.
ba.Add(&roachpb.PutRequest{
Span: roachpb.Span{Key: roachpb.Key("k")},
Value: roachpb.MakeValueFromString("not nil"),
})
// This one fails with a ConditionalPutError, which will populate the
// returned error's index.
ba.Add(&roachpb.ConditionalPutRequest{
Span: roachpb.Span{Key: roachpb.Key("k")},
Value: roachpb.MakeValueFromString("irrelevant"),
ExpValue: nil, // not true after above Put
})
// This one is never executed.
ba.Add(&roachpb.GetRequest{
Span: roachpb.Span{Key: roachpb.Key("k")},
})
if _, pErr := tc.Sender().Send(context.Background(), ba); pErr == nil {
t.Fatal("expected an error")
} else if pErr.Index == nil || pErr.Index.Index != 1 || !testutils.IsPError(pErr, "unexpected value") {
t.Fatalf("invalid index or error type: %s", pErr)
}
}
// TestReplicaLoadSystemConfigSpanIntent verifies that intents on the SystemConfigSpan
// cause an error, but trigger asynchronous cleanup.
func TestReplicaLoadSystemConfigSpanIntent(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
scStartSddr, err := keys.Addr(keys.SystemConfigSpan.Key)
if err != nil {
t.Fatal(err)
}
rng := tc.store.LookupReplica(scStartSddr, nil)
if rng == nil {
t.Fatalf("no replica contains the SystemConfig span")
}
// Create a transaction and write an intent to the system
// config span.
key := keys.SystemConfigSpan.Key
_, btH := beginTxnArgs(key, newTransaction("test", key, 1, enginepb.SERIALIZABLE, rng.store.Clock()))
btH.Txn.Priority = 1 // low so it can be pushed
put := putArgs(key, []byte("foo"))
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// Abort the transaction so that the async intent resolution caused
// by loading the system config span doesn't waste any time in
// clearing the intent.
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, rng.store.Clock())
pusher.Priority = 2 // will push successfully
pushArgs := pushTxnArgs(pusher, btH.Txn, roachpb.PUSH_ABORT)
if _, pErr := tc.SendWrapped(&pushArgs); pErr != nil {
t.Fatal(pErr)
}
// Verify that the intent trips up loading the SystemConfig data.
if _, _, err := rng.loadSystemConfigSpan(); err != errSystemConfigIntent {
t.Fatal(err)
}
// In the loop, wait until the intent is aborted. Then write a "real" value
// there and verify that we can now load the data as expected.
v := roachpb.MakeValueFromString("foo")
util.SucceedsSoon(t, func() error {
if err := engine.MVCCPut(context.Background(), rng.store.Engine(), &enginepb.MVCCStats{},
keys.SystemConfigSpan.Key, rng.store.Clock().Now(), v, nil); err != nil {
return err
}
kvs, _, err := rng.loadSystemConfigSpan()
if err != nil {
return err
}
if len(kvs) != 1 || !bytes.Equal(kvs[0].Key, keys.SystemConfigSpan.Key) {
return errors.Errorf("expected only key %s in SystemConfigSpan map: %+v", keys.SystemConfigSpan.Key, kvs)
}
return nil
})
}
func TestReplicaDestroy(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
rep, err := tc.store.GetReplica(1)
if err != nil {
t.Fatal(err)
}
// First try and fail with a stale descriptor.
origDesc := rep.Desc()
newDesc := protoutil.Clone(origDesc).(*roachpb.RangeDescriptor)
for i := range newDesc.Replicas {
if newDesc.Replicas[i].StoreID == tc.store.StoreID() {
newDesc.Replicas[i].ReplicaID++
newDesc.NextReplicaID++
break
}
}
if err := rep.setDesc(newDesc); err != nil {
t.Fatal(err)
}
if err := rep.Destroy(*origDesc); !testutils.IsError(err, "replica ID has changed") {
t.Fatalf("expected error 'replica ID has changed' but got %s", err)
}
// Now try a fresh descriptor and succeed.
if err := rep.Destroy(*rep.Desc()); err != nil {
t.Fatal(err)
}
}
func TestEntries(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
tc.rng.store.SetRaftLogQueueActive(false)
// Populate the log with 10 entries. Save the LastIndex after each write.
var indexes []uint64
for i := 0; i < 10; i++ {
args := incrementArgs([]byte("a"), int64(i))
if _, pErr := tc.SendWrapped(&args); pErr != nil {
t.Fatal(pErr)
}
idx, err := tc.rng.GetLastIndex()
if err != nil {
t.Fatal(err)
}
indexes = append(indexes, idx)
}
rng := tc.rng
rangeID := rng.RangeID
// Discard the first half of the log.
truncateArgs := truncateLogArgs(indexes[5], rangeID)
if _, pErr := tc.SendWrapped(&truncateArgs); pErr != nil {
t.Fatal(pErr)
}
for i, tc := range []struct {
lo uint64
hi uint64
maxBytes uint64
expResultCount int
expError error
}{
// Case 0: Just most of the entries.
{lo: indexes[5], hi: indexes[9], expResultCount: 4},
// Case 1: Get a single entry.
{lo: indexes[5], hi: indexes[6], expResultCount: 1},
// Case 2: Use MaxUint64 instead of 0 for maxBytes.
{lo: indexes[5], hi: indexes[9], maxBytes: math.MaxUint64, expResultCount: 4},
// Case 3: maxBytes is set low so only a single value should be
// returned.
{lo: indexes[5], hi: indexes[9], maxBytes: 1, expResultCount: 1},
// Case 4: hi value is past the last index, should return all available
// entries
{lo: indexes[5], hi: indexes[9] + 1, expResultCount: 5},
// Case 5: all values have been truncated.
{lo: indexes[1], hi: indexes[2], expError: raft.ErrCompacted},
// Case 6: hi has just been truncated.
{lo: indexes[1], hi: indexes[4], expError: raft.ErrCompacted},
// Case 7: another case where hi has just been truncated.
{lo: indexes[3], hi: indexes[4], expError: raft.ErrCompacted},
// Case 8: lo has been truncated and hi is the truncation point.
{lo: indexes[4], hi: indexes[5], expError: raft.ErrCompacted},
// Case 9: lo has been truncated but hi is available.
{lo: indexes[4], hi: indexes[9], expError: raft.ErrCompacted},
// Case 10: lo has been truncated and hi is not available.
{lo: indexes[4], hi: indexes[9] + 100, expError: raft.ErrCompacted},
// Case 11: lo has been truncated but hi is available, and maxBytes is
// set low.
{lo: indexes[4], hi: indexes[9], maxBytes: 1, expError: raft.ErrCompacted},
// Case 12: lo is available but hi isn't.
{lo: indexes[5], hi: indexes[9] + 100, expError: raft.ErrUnavailable},
// Case 13: both lo and hi are not available.
{lo: indexes[9] + 100, hi: indexes[9] + 1000, expError: raft.ErrUnavailable},
// Case 14: lo is available, hi is not, but it was cut off by maxBytes.
{lo: indexes[5], hi: indexes[9] + 1000, maxBytes: 1, expResultCount: 1},
} {
rng.mu.Lock()
ents, err := rng.Entries(tc.lo, tc.hi, tc.maxBytes)
rng.mu.Unlock()
if tc.expError == nil && err != nil {
t.Errorf("%d: expected no error, got %s", i, err)
continue
} else if err != tc.expError {
t.Errorf("%d: expected error %s, got %s", i, tc.expError, err)
continue
}
if len(ents) != tc.expResultCount {
t.Errorf("%d: expected %d entries, got %d", i, tc.expResultCount, len(ents))
}
}
// Case 15: Lo must be less than or equal to hi.
rng.mu.Lock()
if _, err := rng.Entries(indexes[9], indexes[5], 0); err == nil {
t.Errorf("15: error expected, got none")
}
rng.mu.Unlock()
// Case 16: add a gap to the indexes.
if err := engine.MVCCDelete(context.Background(), tc.store.Engine(), nil, keys.RaftLogKey(rangeID, indexes[6]), hlc.ZeroTimestamp,
nil); err != nil {
t.Fatal(err)
}
rng.mu.Lock()
defer rng.mu.Unlock()
if _, err := rng.Entries(indexes[5], indexes[9], 0); err == nil {
t.Errorf("16: error expected, got none")
}
// Case 17: don't hit the gap due to maxBytes.
ents, err := rng.Entries(indexes[5], indexes[9], 1)
if err != nil {
t.Errorf("17: expected no error, got %s", err)
}
if len(ents) != 1 {
t.Errorf("17: expected 1 entry, got %d", len(ents))
}
// Case 18: don't hit the gap due to truncation.
if _, err := rng.Entries(indexes[4], indexes[9], 0); err != raft.ErrCompacted {
t.Errorf("18: expected error %s , got %s", raft.ErrCompacted, err)
}
}
func TestTerm(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
tc.rng.store.SetRaftLogQueueActive(false)
rng := tc.rng
rangeID := rng.RangeID
// Populate the log with 10 entries. Save the LastIndex after each write.
var indexes []uint64
for i := 0; i < 10; i++ {
args := incrementArgs([]byte("a"), int64(i))
if _, pErr := tc.SendWrapped(&args); pErr != nil {
t.Fatal(pErr)
}
idx, err := tc.rng.GetLastIndex()
if err != nil {
t.Fatal(err)
}
indexes = append(indexes, idx)
}
// Discard the first half of the log.
truncateArgs := truncateLogArgs(indexes[5], rangeID)
if _, pErr := tc.SendWrapped(&truncateArgs); pErr != nil {
t.Fatal(pErr)
}
rng.mu.Lock()
defer rng.mu.Unlock()
firstIndex, err := rng.FirstIndex()
if err != nil {
t.Fatal(err)
}
if firstIndex != indexes[5] {
t.Fatalf("expected firstIndex %d to be %d", firstIndex, indexes[4])
}
// Truncated logs should return an ErrCompacted error.
if _, err := tc.rng.Term(indexes[1]); err != raft.ErrCompacted {
t.Errorf("expected ErrCompacted, got %s", err)
}
if _, err := tc.rng.Term(indexes[3]); err != raft.ErrCompacted {
t.Errorf("expected ErrCompacted, got %s", err)
}
// FirstIndex-1 should return the term of firstIndex.
firstIndexTerm, err := tc.rng.Term(firstIndex)
if err != nil {
t.Errorf("expect no error, got %s", err)
}
term, err := tc.rng.Term(indexes[4])
if err != nil {
t.Errorf("expect no error, got %s", err)
}
if term != firstIndexTerm {
t.Errorf("expected firstIndex-1's term:%d to equal that of firstIndex:%d", term, firstIndexTerm)
}
lastIndex, err := rng.LastIndex()
if err != nil {
t.Fatal(err)
}
// Last index should return correctly.
if _, err := tc.rng.Term(lastIndex); err != nil {
t.Errorf("expected no error, got %s", err)
}
// Terms for after the last index should return ErrUnavailable.
if _, err := tc.rng.Term(lastIndex + 1); err != raft.ErrUnavailable {
t.Errorf("expected ErrUnavailable, got %s", err)
}
if _, err := tc.rng.Term(indexes[9] + 1000); err != raft.ErrUnavailable {
t.Errorf("expected ErrUnavailable, got %s", err)
}
}
func TestGCIncorrectRange(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Split range into two ranges.
splitKey := roachpb.RKey("c")
rng1 := tc.rng
rng2 := splitTestRange(tc.store, splitKey, splitKey, t)
// Write a key to range 2 at two different timestamps so we can
// GC the earlier timestamp without needing to delete it.
key := splitKey.PrefixEnd().AsRawKey()
val := []byte("value")
putReq := putArgs(key, val)
ts1 := makeTS(1, 0)
ts2 := makeTS(2, 0)
ts1Header := roachpb.Header{Timestamp: ts1}
ts2Header := roachpb.Header{Timestamp: ts2}
if _, pErr := client.SendWrappedWith(rng2, context.Background(), ts1Header, &putReq); pErr != nil {
t.Errorf("unexpected pError on put key request: %s", pErr)
}
if _, pErr := client.SendWrappedWith(rng2, context.Background(), ts2Header, &putReq); pErr != nil {
t.Errorf("unexpected pError on put key request: %s", pErr)
}
// Send GC request to range 1 for the key on range 2, which
// should succeed even though it doesn't contain the key, because
// the request for the incorrect key will be silently dropped.
gKey := gcKey(key, ts1)
gcReq := gcArgs(rng1.Desc().StartKey, rng1.Desc().EndKey, gKey)
if _, pErr := client.SendWrappedWith(rng1, context.Background(), roachpb.Header{Timestamp: tc.clock.Now()}, &gcReq); pErr != nil {
t.Errorf("unexpected pError on garbage collection request to incorrect range: %s", pErr)
}
// Make sure the key still exists on range 2.
getReq := getArgs(key)
if res, pErr := client.SendWrappedWith(rng2, context.Background(), ts1Header, &getReq); pErr != nil {
t.Errorf("unexpected pError on get request to correct range: %s", pErr)
} else if resVal := res.(*roachpb.GetResponse).Value; resVal == nil {
t.Errorf("expected value %s to exists after GC to incorrect range but before GC to correct range, found %v", val, resVal)
}
// Send GC request to range 2 for the same key.
gcReq = gcArgs(rng2.Desc().StartKey, rng2.Desc().EndKey, gKey)
if _, pErr := client.SendWrappedWith(rng2, context.Background(), roachpb.Header{Timestamp: tc.clock.Now()}, &gcReq); pErr != nil {
t.Errorf("unexpected pError on garbage collection request to correct range: %s", pErr)
}
// Make sure the key no longer exists on range 2.
if res, pErr := client.SendWrappedWith(rng2, context.Background(), ts1Header, &getReq); pErr != nil {
t.Errorf("unexpected pError on get request to correct range: %s", pErr)
} else if resVal := res.(*roachpb.GetResponse).Value; resVal != nil {
t.Errorf("expected value at key %s to no longer exist after GC to correct range, found value %v", key, resVal)
}
}
// TestReplicaCancelRaft checks that it is possible to safely abandon Raft
// commands via a cancelable context.Context.
func TestReplicaCancelRaft(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, cancelEarly := range []bool{true, false} {
func() {
// Pick a key unlikely to be used by background processes.
key := []byte("acdfg")
ctx, cancel := context.WithCancel(context.Background())
tsc := TestStoreContext()
if !cancelEarly {
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if !filterArgs.Req.Header().Key.Equal(key) {
return nil
}
cancel()
return nil
}
}
tc := testContext{}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
if cancelEarly {
cancel()
tc.rng.mu.Lock()
tc.rng.mu.proposeRaftCommandFn = func(*pendingCmd) error {
return nil
}
tc.rng.mu.Unlock()
}
var ba roachpb.BatchRequest
ba.Add(&roachpb.GetRequest{
Span: roachpb.Span{Key: key},
})
if err := ba.SetActiveTimestamp(tc.clock.Now); err != nil {
t.Fatal(err)
}
br, pErr := tc.rng.addWriteCmd(ctx, ba)
if pErr == nil {
if !cancelEarly {
// We cancelled the context while the command was already
// being processed, so the client had to wait for successful
// execution.
return
}
t.Fatalf("expected an error, but got successful response %+v", br)
}
// If we cancelled the context early enough, we expect to receive a
// corresponding error and not wait for the command.
if !testutils.IsPError(pErr, context.Canceled.Error()) {
t.Fatalf("unexpected error: %s", pErr)
}
}()
}
}
// verify the checksum for the range and return it.
func verifyChecksum(t *testing.T, rng *Replica) []byte {
ctx := context.Background()
id := uuid.MakeV4()
rng.computeChecksumTrigger(ctx, roachpb.ComputeChecksumRequest{
ChecksumID: id,
Version: replicaChecksumVersion,
})
c, ok := rng.getChecksum(ctx, id)
if !ok {
t.Fatalf("checksum for id = %v not found", id)
}
if c.checksum == nil {
t.Fatal("couldn't compute checksum")
}
rng.verifyChecksumTrigger(
ctx,
roachpb.VerifyChecksumRequest{
ChecksumID: id,
Version: replicaChecksumVersion,
Checksum: c.checksum,
})
return c.checksum
}
// TODO(tschottdorf): this test is really frail and unidiomatic. Consider
// some better high-level check of this functionality.
func TestComputeVerifyChecksum(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
rng := tc.rng
key := roachpb.Key("a")
{
incArgs := incrementArgs(key, 23)
if _, err := tc.SendWrapped(&incArgs); err != nil {
t.Fatal(err)
}
}
// We use this helper below to gauge whether another Raft command possibly
// snuck in (in which case we recompute). We can't use the in-memory state
// because it's not updated atomically with the batch and because this
// test doesn't respect Raft ordering.
getAppliedIndex := func() uint64 {
rng.mu.Lock()
defer rng.mu.Unlock()
appliedIndex, _, err := loadAppliedIndex(context.Background(), rng.store.Engine(), rng.RangeID)
if err != nil {
t.Fatal(err)
}
return appliedIndex
}
// The following part of the test is inherently racy if other Raft commands
// get processed (which could happen due to reproposals). The loop makes
// sure that we catch this.
util.SucceedsSoon(t, func() error {
oldAppliedIndex := getAppliedIndex()
initialChecksum := verifyChecksum(t, rng)
// Getting a value will not affect the snapshot checksum.
gArgs := getArgs(roachpb.Key("a"))
if _, err := tc.SendWrapped(&gArgs); err != nil {
t.Fatal(err)
}
checksum := verifyChecksum(t, rng)
appliedIndex := getAppliedIndex()
if appliedIndex != oldAppliedIndex {
return errors.Errorf("applied index changed from %d to %d",
oldAppliedIndex, appliedIndex)
}
if !bytes.Equal(initialChecksum, checksum) {
t.Fatalf("changed checksum: e = %v, c = %v", initialChecksum, checksum)
}
return nil
})
util.SucceedsSoon(t, func() error {
oldAppliedIndex := getAppliedIndex()
initialChecksum := verifyChecksum(t, rng)
// Modifying the range will change the checksum.
incArgs := incrementArgs(key, 5)
if _, err := tc.SendWrapped(&incArgs); err != nil {
t.Fatal(err)
}
checksum := verifyChecksum(t, rng)
appliedIndex := getAppliedIndex()
if diff := appliedIndex - oldAppliedIndex; diff != 1 {
return errors.Errorf("applied index changed by %d, from %d to %d",
diff, oldAppliedIndex, appliedIndex)
}
if bytes.Equal(initialChecksum, checksum) {
t.Fatalf("same checksum: e = %v, c = %v", initialChecksum, checksum)
}
return nil
})
// Verify that a bad version/checksum sent will result in an error.
id1 := uuid.MakeV4()
rng.computeChecksumTrigger(
context.Background(),
roachpb.ComputeChecksumRequest{
ChecksumID: id1,
Version: replicaChecksumVersion,
})
// Set a callback for checksum mismatch panics.
badChecksumChan := make(chan []ReplicaSnapshotDiff, 1)
rng.store.ctx.TestingKnobs.BadChecksumPanic = func(diff []ReplicaSnapshotDiff) {
badChecksumChan <- diff
}
// First test that sending a Verification request with a bad version and
// bad checksum will return without panicking because of a bad checksum.
rng.verifyChecksumTrigger(
context.Background(),
roachpb.VerifyChecksumRequest{
ChecksumID: id1,
Version: 10000001,
Checksum: []byte("bad checksum"),
})
select {
case badChecksum := <-badChecksumChan:
t.Fatalf("bad checksum: %v", badChecksum)
default:
}
// Setting the correct version will verify the checksum see a
// checksum mismatch and trigger a rerun of the consistency check,
// but the second consistency check will succeed because the checksum
// provided in the second consistency check is the correct one.
rng.verifyChecksumTrigger(
context.Background(),
roachpb.VerifyChecksumRequest{
ChecksumID: id1,
Version: replicaChecksumVersion,
Checksum: []byte("bad checksum"),
})
select {
case badChecksum := <-badChecksumChan:
t.Fatalf("bad checksum: %v", badChecksum)
default:
}
// Repeat the same but provide a snapshot this time. This will
// result in the checksum failure not running the second consistency
// check; it will panic.
rng.verifyChecksumTrigger(
context.Background(),
roachpb.VerifyChecksumRequest{
ChecksumID: id1,
Version: replicaChecksumVersion,
Checksum: []byte("bad checksum"),
Snapshot: &roachpb.RaftSnapshotData{},
})
select {
case <-badChecksumChan:
default:
t.Fatal("expected bad checksum, but did not get one")
}
id2 := uuid.MakeV4()
// Sending a ComputeChecksum with a bad version doesn't result in a
// computed checksum.
if _, _, err := rng.ComputeChecksum(
context.Background(),
nil,
nil,
roachpb.Header{},
roachpb.ComputeChecksumRequest{
ChecksumID: id2,
Version: 23343434,
},
); err != nil {
t.Fatal(err)
}
// Sending a VerifyChecksum with a bad checksum is a noop.
rng.verifyChecksumTrigger(
context.Background(),
roachpb.VerifyChecksumRequest{
ChecksumID: id2,
Version: replicaChecksumVersion,
Checksum: []byte("bad checksum"),
Snapshot: &roachpb.RaftSnapshotData{},
})
select {
case badChecksum := <-badChecksumChan:
t.Fatalf("bad checksum: %v", badChecksum)
default:
}
}
func TestNewReplicaCorruptionError(t *testing.T) {
defer leaktest.AfterTest(t)()
for i, tc := range []struct {
errStruct *roachpb.ReplicaCorruptionError
expErr string
}{
{NewReplicaCorruptionError(errors.New("")), "replica corruption (processed=false)"},
{NewReplicaCorruptionError(errors.New("foo")), "replica corruption (processed=false): foo"},
{NewReplicaCorruptionError(errors.Wrap(errors.New("bar"), "foo")), "replica corruption (processed=false): foo: bar"},
} {
// This uses fmt.Sprint because that ends up calling Error() and is the
// intended use. A previous version of this test called String() directly
// which called the wrong (reflection-based) implementation.
if errStr := fmt.Sprint(tc.errStruct); errStr != tc.expErr {
t.Errorf("%d: expected '%s' but got '%s'", i, tc.expErr, errStr)
}
}
}
func TestDiffRange(t *testing.T) {
defer leaktest.AfterTest(t)()
if diff := diffRange(nil, nil); diff != nil {
t.Fatalf("diff of nils = %v", diff)
}
timestamp := hlc.Timestamp{WallTime: 1729, Logical: 1}
value := []byte("foo")
// Construct the two snapshots.
leaderSnapshot := &roachpb.RaftSnapshotData{
KV: []roachpb.RaftSnapshotData_KeyValue{
{Key: []byte("a"), Timestamp: timestamp, Value: value},
{Key: []byte("abc"), Timestamp: timestamp, Value: value},
{Key: []byte("abcd"), Timestamp: timestamp, Value: value},
{Key: []byte("abcde"), Timestamp: timestamp, Value: value},
{Key: []byte("abcdefg"), Timestamp: timestamp, Value: value},
{Key: []byte("abcdefg"), Timestamp: timestamp.Add(0, -1), Value: value},
{Key: []byte("abcdefgh"), Timestamp: timestamp, Value: value},
{Key: []byte("x"), Timestamp: timestamp, Value: value},
{Key: []byte("y"), Timestamp: timestamp, Value: value},
},
}
// No diff works.
if diff := diffRange(leaderSnapshot, leaderSnapshot); diff != nil {
t.Fatalf("diff of similar snapshots = %v", diff)
}
replicaSnapshot := &roachpb.RaftSnapshotData{
KV: []roachpb.RaftSnapshotData_KeyValue{
{Key: []byte("ab"), Timestamp: timestamp, Value: value},
{Key: []byte("abc"), Timestamp: timestamp, Value: value},
{Key: []byte("abcde"), Timestamp: timestamp, Value: value},
{Key: []byte("abcdef"), Timestamp: timestamp, Value: value},
{Key: []byte("abcdefg"), Timestamp: timestamp.Add(0, 1), Value: value},
{Key: []byte("abcdefg"), Timestamp: timestamp, Value: value},
{Key: []byte("abcdefgh"), Timestamp: timestamp, Value: value},
{Key: []byte("x"), Timestamp: timestamp, Value: []byte("bar")},
{Key: []byte("z"), Timestamp: timestamp, Value: value},
},
}
// The expected diff.
eDiff := []ReplicaSnapshotDiff{
{LeaseHolder: true, Key: []byte("a"), Timestamp: timestamp, Value: value},
{LeaseHolder: false, Key: []byte("ab"), Timestamp: timestamp, Value: value},
{LeaseHolder: true, Key: []byte("abcd"), Timestamp: timestamp, Value: value},
{LeaseHolder: false, Key: []byte("abcdef"), Timestamp: timestamp, Value: value},
{LeaseHolder: false, Key: []byte("abcdefg"), Timestamp: timestamp.Add(0, 1), Value: value},
{LeaseHolder: true, Key: []byte("abcdefg"), Timestamp: timestamp.Add(0, -1), Value: value},
{LeaseHolder: true, Key: []byte("x"), Timestamp: timestamp, Value: value},
{LeaseHolder: false, Key: []byte("x"), Timestamp: timestamp, Value: []byte("bar")},
{LeaseHolder: true, Key: []byte("y"), Timestamp: timestamp, Value: value},
{LeaseHolder: false, Key: []byte("z"), Timestamp: timestamp, Value: value},
}
diff := diffRange(leaderSnapshot, replicaSnapshot)
if diff == nil {
t.Fatalf("differing snapshots didn't reveal diff %v", diff)
}
if len(eDiff) != len(diff) {
t.Fatalf("expected diff length different from diff (%d vs %d) , %v vs %v", len(eDiff), len(diff), eDiff, diff)
}
for i, e := range eDiff {
v := diff[i]
if e.LeaseHolder != v.LeaseHolder || !bytes.Equal(e.Key, v.Key) || !e.Timestamp.Equal(v.Timestamp) || !bytes.Equal(e.Value, v.Value) {
t.Fatalf("diff varies at row %d, %v vs %v", i, e, v)
}
}
}
func TestAsyncSnapshot(t *testing.T) {
defer leaktest.AfterTest(t)()
tsc := TestStoreContext()
tsc.BlockingSnapshotDuration = 0
tc := testContext{}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
// Lock the replica manually instead of going through GetSnapshot()
// because we want to test the underlying async functionality.
tc.rng.mu.Lock()
_, err := tc.rng.Snapshot()
tc.rng.mu.Unlock()
// In async operation, the first call never succeeds.
if err != raft.ErrSnapshotTemporarilyUnavailable {
t.Fatalf("expected ErrSnapshotTemporarilyUnavailable, got %s", err)
}
// It will eventually succeed.
util.SucceedsSoon(t, func() error {
tc.rng.mu.Lock()
snap, err := tc.rng.Snapshot()
tc.rng.mu.Unlock()
if err != nil {
return err
}
if len(snap.Data) == 0 {
return errors.Errorf("snapshot is empty")
}
return nil
})
}
func TestAsyncSnapshotMaxAge(t *testing.T) {
defer leaktest.AfterTest(t)()
tsc := TestStoreContext()
tsc.BlockingSnapshotDuration = 0
tsc.AsyncSnapshotMaxAge = time.Millisecond
tc := testContext{}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
// Lock the replica manually instead of going through GetSnapshot()
// because we want to test the underlying async functionality.
tc.rng.mu.Lock()
_, err := tc.rng.Snapshot()
tc.rng.mu.Unlock()
// In async operation, the first call never succeeds.
if err != raft.ErrSnapshotTemporarilyUnavailable {
t.Fatalf("expected ErrSnapshotTemporarilyUnavailable, got %s", err)
}
// Wait for the snapshot to be generated and abandoned.
time.Sleep(100 * time.Millisecond)
tc.rng.mu.Lock()
defer tc.rng.mu.Unlock()
// The channel was closed without producing a result.
snap, ok := <-tc.rng.mu.snapshotChan
if ok {
t.Fatalf("expected channel to be closed but got result: %v", snap)
}
}
func TestSyncSnapshot(t *testing.T) {
defer leaktest.AfterTest(t)()
tsc := TestStoreContext()
tsc.BlockingSnapshotDuration = time.Second
tc := testContext{}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
// With enough time in BlockingSnapshotDuration, we succeed on the
// first try.
tc.rng.mu.Lock()
snap, err := tc.rng.Snapshot()
tc.rng.mu.Unlock()
if err != nil {
t.Fatal(err)
}
if len(snap.Data) == 0 {
t.Fatal("snapshot is empty")
}
}
// TestReplicaIDChangePending verifies that on a replica ID change, pending
// commands are re-proposed on the new raft group.
func TestReplicaIDChangePending(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
rng := tc.rng
// Stop the command from being proposed to the raft group and being removed.
rng.mu.Lock()
rng.mu.proposeRaftCommandFn = func(p *pendingCmd) error { return nil }
rng.mu.Unlock()
// Add a command to the pending list.
magicTS := tc.clock.Now()
ba := roachpb.BatchRequest{}
ba.Timestamp = magicTS
ba.Add(&roachpb.GetRequest{
Span: roachpb.Span{
Key: roachpb.Key("a"),
},
})
_, _, err := rng.proposeRaftCommand(context.Background(), ba)
if err != nil {
t.Fatal(err)
}
// Set the raft command handler so we can tell if the command has been
// re-proposed.
commandProposed := make(chan struct{}, 1)
rng.mu.Lock()
defer rng.mu.Unlock()
rng.mu.proposeRaftCommandFn = func(p *pendingCmd) error {
if p.raftCmd.Cmd.Timestamp.Equal(magicTS) {
commandProposed <- struct{}{}
}
return nil
}
// Set the ReplicaID on the replica.
if err := rng.setReplicaIDLocked(2); err != nil {
t.Fatal(err)
}
select {
case <-commandProposed:
default:
t.Fatal("command was not re-proposed")
}
}
// runWrongIndexTest runs a reproposal or refurbishment test, optionally
// simulating an error during the renewal of the command. If repropose is
// false, refurbishes instead.
// If withErr is true, injects an error when the reproposal or refurbishment
// takes place.
func runWrongIndexTest(t *testing.T, repropose bool, withErr bool, expProposals int32) {
var tc testContext
tc.Start(t)
defer tc.Stop()
prefix := fmt.Sprintf("repropose=%t withErr=%t: ", repropose, withErr)
fatalf := func(msg string, args ...interface{}) {
t.Fatal(errors.Errorf(prefix+msg, args...))
}
type magicKey struct{}
var c int32 // updated atomically
tc.rng.mu.Lock()
tc.rng.mu.proposeRaftCommandFn = func(cmd *pendingCmd) error {
if v := cmd.ctx.Value(magicKey{}); v != nil {
curAttempt := atomic.AddInt32(&c, 1)
if (repropose || curAttempt == 2) && withErr {
return errors.New("boom")
}
}
return defaultProposeRaftCommandLocked(tc.rng, cmd)
}
tc.rng.mu.Unlock()
if pErr := tc.rng.redirectOnOrAcquireLease(context.Background()); pErr != nil {
fatalf("%s", pErr)
}
pArg := putArgs(roachpb.Key("a"), []byte("asd"))
{
var ba roachpb.BatchRequest
ba.Add(&pArg)
ba.Timestamp = tc.clock.Now()
if _, pErr := tc.Sender().Send(context.Background(), ba); pErr != nil {
t.Fatal(pErr)
}
}
tc.rng.mu.Lock()
ai := tc.rng.mu.state.LeaseAppliedIndex
tc.rng.mu.Unlock()
if ai < 1 {
t.Fatal("committed a batch, but still at lease index zero")
}
wrongIndex := ai - 1 // will chose this as MaxLeaseIndex
log.Infof(context.Background(), "test begins")
var ba roachpb.BatchRequest
ba.Timestamp = tc.clock.Now()
ba.Add(&pArg)
repDesc, err := tc.rng.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
ch := func() chan roachpb.ResponseWithError {
tc.rng.mu.Lock()
defer tc.rng.mu.Unlock()
// Make a new command, but pretend it didn't increment the assignment
// counter. This leaks some implementation, but not too much.
preAssigned := tc.rng.mu.lastAssignedLeaseIndex
cmd := tc.rng.prepareRaftCommandLocked(
context.WithValue(context.Background(), magicKey{}, "foo"),
makeIDKey(), repDesc, ba)
cmd.raftCmd.MaxLeaseIndex = preAssigned
tc.rng.mu.lastAssignedLeaseIndex = preAssigned
if err != nil {
fatalf("%s", err)
}
cmd.raftCmd.MaxLeaseIndex = wrongIndex
tc.rng.insertRaftCommandLocked(cmd)
if repropose {
if err := tc.rng.refreshPendingCmdsLocked(noReason, 0); err != nil {
fatalf("%s", err)
}
} else if err := tc.rng.proposePendingCmdLocked(cmd); err != nil {
fatalf("%s", err)
}
return cmd.done
}()
var errStr string
if repropose {
errStr = "boom"
} else {
errStr = "observed at lease index"
}
if rwe := <-ch; rwe.Err != nil != withErr ||
(withErr && !testutils.IsPError(rwe.Err, errStr)) {
fatalf("%s", rwe.Err)
}
if n := atomic.LoadInt32(&c); n != expProposals {
fatalf("expected %d proposals, got %d", expProposals, n)
}
}
// Making the test more fun for human eyes.
const (
propose = false
repropose = true
noErr = false
withErr = true
)
func TestReplicaRefurbishOnWrongIndex_ReproposeNoError(t *testing.T) {
defer leaktest.AfterTest(t)()
// Set up a command at wrong index, but don't propose it but
// immediately call the repropose logic, which should refurbish it.
runWrongIndexTest(t, repropose, noErr, 1)
}
func TestReplicaRefurbishOnWrongIndex_ReproposeError(t *testing.T) {
defer leaktest.AfterTest(t)()
// Like its NoError variant, but the reproposal errors out and is
// received by the client.
runWrongIndexTest(t, repropose, withErr, 1)
}
func TestReplicaRefurbishOnWrongIndex_ProposeNoError(t *testing.T) {
defer leaktest.AfterTest(t)()
// Propose a command at a past index and let the application of the command
// refurbish it successfully.
runWrongIndexTest(t, propose, noErr, 2)
}
func TestReplicaRefurbishOnWrongIndex_ProposeError(t *testing.T) {
defer leaktest.AfterTest(t)()
// Propose a command at a past index and let the application of the command
// refurbish it. Refurbishing fails; asserts that the client receives
// the error.
runWrongIndexTest(t, propose, withErr, 2)
}
// TestReplicaCancelRaftCommandProgress creates a number of Raft commands and
// immediately abandons some of them, while proposing the remaining ones. It
// then verifies that all the non-abandoned commands get applied (which would
// not be the case if gaps in the applied index posed an issue).
func TestReplicaCancelRaftCommandProgress(t *testing.T) {
defer leaktest.AfterTest(t)()
var tc testContext
tc.Start(t)
defer tc.Stop()
rng := tc.rng
repDesc, err := rng.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
const num = 10
var chs []chan roachpb.ResponseWithError
func() {
rng.mu.Lock()
defer rng.mu.Unlock()
for i := 0; i < num; i++ {
var ba roachpb.BatchRequest
ba.Timestamp = tc.clock.Now()
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{
Key: roachpb.Key(fmt.Sprintf("k%d", i))}})
cmd := rng.prepareRaftCommandLocked(context.Background(), makeIDKey(), repDesc, ba)
rng.insertRaftCommandLocked(cmd)
// We actually propose the command only if we don't
// cancel it to simulate the case in which Raft loses
// the command and it isn't reproposed due to the
// client abandoning it.
if rand.Intn(2) == 0 {
log.Infof(context.Background(), "abandoning command %d", i)
delete(rng.mu.pendingCmds, cmd.idKey)
} else if err := rng.proposePendingCmdLocked(cmd); err != nil {
t.Fatal(err)
} else {
chs = append(chs, cmd.done)
}
}
}()
for _, ch := range chs {
if rwe := <-ch; rwe.Err != nil {
t.Fatal(rwe.Err)
}
}
}
// TestReplicaBurstPendingCommandsAndRepropose verifies that a burst of
// proposed commands assigns a correct sequence of required indexes,
// and then goes and checks that a reproposal (without prior proposal) results
// in these commands applying at the computed indexes.
func TestReplicaBurstPendingCommandsAndRepropose(t *testing.T) {
defer leaktest.AfterTest(t)()
var tc testContext
sc := TestStoreContext()
// Ensure that the refresh call in this test is the only reason
// commands will get reproposed.
// TODO(bdarnell): why is this single-node test seeing a "new
// leader" (on rare occasions)? #8422
sc.TestingKnobs.DisableRefreshReasonNewLeader = true
tc.StartWithStoreContext(t, sc)
defer tc.Stop()
const num = 10
repDesc, err := tc.rng.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
type magicKey struct{}
var seenCmds []int
tc.rng.mu.Lock()
tc.rng.mu.proposeRaftCommandFn = func(cmd *pendingCmd) error {
if v := cmd.ctx.Value(magicKey{}); v != nil {
seenCmds = append(seenCmds, int(cmd.raftCmd.MaxLeaseIndex))
}
return defaultProposeRaftCommandLocked(tc.rng, cmd)
}
tc.rng.mu.Unlock()
if pErr := tc.rng.redirectOnOrAcquireLease(context.Background()); pErr != nil {
t.Fatal(pErr)
}
expIndexes := make([]int, 0, num)
chs := func() []chan roachpb.ResponseWithError {
tc.rng.mu.Lock()
defer tc.rng.mu.Unlock()
chs := make([]chan roachpb.ResponseWithError, 0, num)
origIndexes := make([]int, 0, num)
for i := 0; i < num; i++ {
expIndexes = append(expIndexes, i+1)
ctx := context.WithValue(context.Background(), magicKey{}, "foo")
var ba roachpb.BatchRequest
ba.Timestamp = tc.clock.Now()
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{
Key: roachpb.Key(fmt.Sprintf("k%d", i))}})
cmd := tc.rng.prepareRaftCommandLocked(ctx, makeIDKey(), repDesc, ba)
tc.rng.insertRaftCommandLocked(cmd)
chs = append(chs, cmd.done)
}
for _, p := range tc.rng.mu.pendingCmds {
if v := p.ctx.Value(magicKey{}); v != nil {
origIndexes = append(origIndexes, int(p.raftCmd.MaxLeaseIndex))
}
}
sort.Ints(origIndexes)
if !reflect.DeepEqual(expIndexes, origIndexes) {
t.Fatalf("wanted required indexes %v, got %v", expIndexes, origIndexes)
}
if err := tc.rng.refreshPendingCmdsLocked(noReason, 0); err != nil {
t.Fatal(err)
}
return chs
}()
for _, ch := range chs {
if pErr := (<-ch).Err; pErr != nil {
t.Fatal(pErr)
}
}
if !reflect.DeepEqual(seenCmds, expIndexes) {
t.Fatalf("expected indexes %v, got %v", expIndexes, seenCmds)
}
util.SucceedsSoon(t, func() error {
tc.rng.mu.Lock()
defer tc.rng.mu.Unlock()
nonePending := len(tc.rng.mu.pendingCmds) == 0
c := int(tc.rng.mu.lastAssignedLeaseIndex) - int(tc.rng.mu.state.LeaseAppliedIndex)
if nonePending && c > 0 {
return fmt.Errorf("no pending cmds, but have required index offset %d", c)
}
if nonePending {
return nil
}
return errors.New("still pending commands")
})
}
func TestReplicaRefreshPendingCommandsTicks(t *testing.T) {
defer leaktest.AfterTest(t)()
var tc testContext
tc.Start(t)
defer tc.Stop()
// Grab processRaftMu in order to block normal raft replica processing. This
// test is ticking the replicas manually and doesn't want the store to be
// doing so concurrently.
tc.store.processRaftMu.Lock()
defer tc.store.processRaftMu.Unlock()
r := tc.rng
repDesc, err := r.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
electionTicks := tc.store.ctx.RaftElectionTimeoutTicks
{
// The verifications of the reproposal counts below rely on r.mu.ticks
// starting with a value of 0 (modulo electionTicks). Move the replica into
// that state in case the replica was ticked before we grabbed
// processRaftMu.
r.mu.Lock()
ticks := r.mu.ticks
r.mu.Unlock()
for ; (ticks % electionTicks) != 0; ticks++ {
if err := r.tick(); err != nil {
t.Fatal(err)
}
}
}
// We tick the replica 2*RaftElectionTimeoutTicks. RaftElectionTimeoutTicks
// is special in that it controls how often pending commands are reproposed
// or refurbished.
for i := 0; i < 2*electionTicks; i++ {
// Add another pending command on each iteration.
r.mu.Lock()
id := fmt.Sprintf("%08d", i)
var ba roachpb.BatchRequest
ba.Timestamp = tc.clock.Now()
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: roachpb.Key(id)}})
cmd := r.prepareRaftCommandLocked(context.Background(),
storagebase.CmdIDKey(id), repDesc, ba)
r.insertRaftCommandLocked(cmd)
if err := r.proposePendingCmdLocked(cmd); err != nil {
t.Fatal(err)
}
// Build a map from command key to proposed-at-ticks.
m := map[storagebase.CmdIDKey]int{}
for id, p := range r.mu.pendingCmds {
m[id] = p.proposedAtTicks
}
r.mu.Unlock()
// Tick raft.
if err := r.tick(); err != nil {
t.Fatal(err)
}
// Gather up the reproprosed commands.
r.mu.Lock()
var reproposed []*pendingCmd
for id, p := range r.mu.pendingCmds {
if m[id] != p.proposedAtTicks {
reproposed = append(reproposed, p)
}
}
ticks := r.mu.ticks
r.mu.Unlock()
// Reproposals are only performed every electionTicks. We'll need to fix
// this test if that changes.
if (ticks % electionTicks) == 0 {
if len(reproposed) != i-1 {
t.Fatalf("%d: expected %d reproprosed commands, but found %+v", i, i-1, reproposed)
}
} else {
if len(reproposed) != 0 {
t.Fatalf("%d: expected no reproprosed commands, but found %+v", i, reproposed)
}
}
}
}
// TestReplicaDoubleRefurbish exercises a code path in which a command is seen
// fit for refurbishment, but has already been refurbished earlier (with that
// command being in-flight). See #7185.
func TestReplicaDoubleRefurbish(t *testing.T) {
defer leaktest.AfterTest(t)()
var tc testContext
tc.Start(t)
defer tc.Stop()
var ba roachpb.BatchRequest
ba.Timestamp = tc.clock.Now()
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: roachpb.Key("r")}})
repDesc, err := tc.rng.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
// Make a Raft command; we'll set things up so that it will be considered
// for refurbishment multiple times.
tc.rng.mu.Lock()
cmd := tc.rng.prepareRaftCommandLocked(context.Background(), makeIDKey(), repDesc, ba)
ch := cmd.done // must not use cmd outside of mutex
tc.rng.mu.Unlock()
{
// Send some random request to advance the lease applied counter to
// make `cmd` refurbish when we put it into Raft.
var ba roachpb.BatchRequest
ba.Timestamp = tc.clock.Now()
pArgs := putArgs(roachpb.Key("foo"), []byte("bar"))
ba.Add(&pArgs)
if _, pErr := tc.rng.Send(context.Background(), ba); pErr != nil {
t.Fatal(pErr)
}
}
const num = 10
func() {
tc.rng.mu.Lock()
defer tc.rng.mu.Unlock()
// Insert the command and propose it ten times. Before the commit
// which introduced this test, the first application would repropose,
// and the second would decide to not repropose, but accidentally send
// the error to the client, so that the successful refurbishment would
// be the second result received by the client.
tc.rng.insertRaftCommandLocked(cmd)
for i := 0; i < num; i++ {
if err := tc.rng.proposePendingCmdLocked(cmd); err != nil {
t.Fatal(err)
}
}
}()
var i int
for resp := range ch {
i++
if i != 1 {
t.Fatalf("received more than one response on the done channel: %+v", resp)
}
}
}
// TestCommandTimeThreshold verifies that commands outside the replica GC
// threshold fail.
func TestCommandTimeThreshold(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
ts := makeTS(1, 0)
ts2 := makeTS(2, 0)
ts3 := makeTS(3, 0)
key := roachpb.Key("a")
keycp := roachpb.Key("c")
va := []byte("a")
vb := []byte("b")
// Verify a Get works.
gArgs := getArgs(key)
if _, err := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts,
}, &gArgs); err != nil {
t.Fatalf("could not get data: %s", err)
}
// Verify a later Get works.
if _, err := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts3,
}, &gArgs); err != nil {
t.Fatalf("could not get data: %s", err)
}
// Put some data for use with CP later on.
pArgs := putArgs(keycp, va)
if _, err := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts,
}, &pArgs); err != nil {
t.Fatalf("could not put data: %s", err)
}
// Do a GC.
gcr := roachpb.GCRequest{
Threshold: ts2,
}
if _, err := tc.SendWrapped(&gcr); err != nil {
t.Fatal(err)
}
// Do the same Get, which should now fail.
if _, err := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts,
}, &gArgs); err == nil {
t.Fatal("expected failure")
} else if err.String() != "batch timestamp 0.000000001,0 must be after replica GC threshold 0.000000002,0" {
t.Fatalf("unexpected error: %s", err)
}
// Verify a later Get works.
if _, err := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts3,
}, &gArgs); err != nil {
t.Fatalf("could not get data: %s", err)
}
// Verify an early CPut fails.
cpArgs := cPutArgs(keycp, vb, va)
if _, err := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts2,
}, &cpArgs); err == nil {
t.Fatal("expected failure")
} else if err.String() != "batch timestamp 0.000000002,0 must be after replica GC threshold 0.000000002,0" {
t.Fatalf("unexpected error: %s", err)
}
// Verify a later CPut works.
if _, err := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts3,
}, &cpArgs); err != nil {
t.Fatalf("could not cput data: %s", err)
}
}
// TestReserveAndApplySnapshot checks to see if a snapshot is correctly applied
// and that its reservation is removed.
func TestReserveAndApplySnapshot(t *testing.T) {
defer leaktest.AfterTest(t)()
tsc := TestStoreContext()
tc := testContext{}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
checkReservations := func(t *testing.T, expected int) {
tc.store.bookie.mu.Lock()
defer tc.store.bookie.mu.Unlock()
if e, a := expected, len(tc.store.bookie.mu.reservationsByRangeID); e != a {
t.Fatalf("wrong number of reservations - expected:%d, actual:%d", e, a)
}
}
key := roachpb.RKey("a")
firstRng := tc.store.LookupReplica(key, nil)
snap, err := firstRng.GetSnapshot(context.Background())
if err != nil {
t.Fatal(err)
}
tc.store.metrics.Available.Update(tc.store.bookie.maxReservedBytes)
// Note that this is an artificial scenario in which we're adding a
// reservation for a replica that is already on the range. This test is
// designed to test the filling of the reservation specifically and in
// normal operation there should not be a reservation for an existing
// replica.
req := ReservationRequest{
StoreRequestHeader: StoreRequestHeader{
StoreID: tc.store.StoreID(),
NodeID: tc.store.nodeDesc.NodeID,
},
RangeID: firstRng.RangeID,
RangeSize: 10,
}
if !tc.store.Reserve(context.Background(), req).Reserved {
t.Fatalf("Can't reserve the replica")
}
checkReservations(t, 1)
// Apply a snapshot and check the reservation was filled. Note that this
// out-of-band application could be a root cause if this test ever crashes.
if err := firstRng.applySnapshot(context.Background(), snap, raftpb.HardState{}); err != nil {
t.Fatal(err)
}
checkReservations(t, 0)
}
storage: Skip TestReplicaBurstPendingCommandsAndRepropose
Revert #8419, which did not fix the problem.
// Copyright 2014 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Spencer Kimball (spencer.kimball@gmail.com)
package storage
import (
"bytes"
"fmt"
"math"
"math/rand"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"golang.org/x/net/context"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/cockroachdb/cockroach/base"
"github.com/cockroachdb/cockroach/config"
"github.com/cockroachdb/cockroach/gossip"
"github.com/cockroachdb/cockroach/internal/client"
"github.com/cockroachdb/cockroach/keys"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/rpc"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/storage/engine/enginepb"
"github.com/cockroachdb/cockroach/storage/storagebase"
"github.com/cockroachdb/cockroach/testutils"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/caller"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/leaktest"
"github.com/cockroachdb/cockroach/util/log"
"github.com/cockroachdb/cockroach/util/metric"
"github.com/cockroachdb/cockroach/util/protoutil"
"github.com/cockroachdb/cockroach/util/stop"
"github.com/cockroachdb/cockroach/util/uuid"
)
func testRangeDescriptor() *roachpb.RangeDescriptor {
return &roachpb.RangeDescriptor{
RangeID: 1,
StartKey: roachpb.RKeyMin,
EndKey: roachpb.RKeyMax,
Replicas: []roachpb.ReplicaDescriptor{
{
ReplicaID: 1,
NodeID: 1,
StoreID: 1,
},
},
NextReplicaID: 2,
}
}
// boostrapMode controls how the first range is created in testContext.
type bootstrapMode int
const (
// Use Store.BootstrapRange, which writes the range descriptor and
// other metadata. Most tests should use this mode because it more
// closely resembles the real world.
bootstrapRangeWithMetadata bootstrapMode = iota
// Create a range with NewRange and Store.AddRangeTest. The store's data
// will be persisted but metadata will not.
//
// Tests which run in this mode play fast and loose; they want
// a Replica which doesn't have too many moving parts, but then
// may still exercise a sizable amount of code, be it by accident
// or design. We bootstrap them here with what's absolutely
// necessary to not immediately crash on a Raft command, but
// nothing more.
// If you read this and you're writing a new test, try not to
// use this mode - it's deprecated and tends to get in the way
// of new development.
bootstrapRangeOnly
)
// leaseExpiry returns a duration in nanos after which any range lease the
// Replica may hold is expired. It is more precise than LeaseExpiration
// in that it returns the minimal duration necessary.
func leaseExpiry(rng *Replica) int64 {
if l, _ := rng.getLease(); l != nil {
return l.Expiration.WallTime + 1
}
return 0
}
// testContext contains all the objects necessary to test a Range.
// In most cases, simply call Start(t) (and later Stop()) on a zero-initialized
// testContext{}. Any fields which are initialized to non-nil values
// will be used as-is.
type testContext struct {
testing.TB
transport *RaftTransport
store *Store
rng *Replica
rangeID roachpb.RangeID
gossip *gossip.Gossip
engine engine.Engine
manualClock *hlc.ManualClock
clock *hlc.Clock
stopper *stop.Stopper
bootstrapMode bootstrapMode
}
// Start initializes the test context with a single range covering the
// entire keyspace.
func (tc *testContext) Start(t testing.TB) {
ctx := TestStoreContext()
tc.StartWithStoreContext(t, ctx)
}
// StartWithStoreContext initializes the test context with a single
// range covering the entire keyspace.
func (tc *testContext) StartWithStoreContext(t testing.TB, ctx StoreContext) {
tc.TB = t
if tc.stopper == nil {
tc.stopper = stop.NewStopper()
}
// Setup fake zone config handler.
config.TestingSetupZoneConfigHook(tc.stopper)
if tc.gossip == nil {
rpcContext := rpc.NewContext(&base.Context{Insecure: true}, nil, tc.stopper)
server := rpc.NewServer(rpcContext) // never started
tc.gossip = gossip.New(rpcContext, server, nil, tc.stopper, metric.NewRegistry())
tc.gossip.SetNodeID(1)
}
if tc.manualClock == nil {
tc.manualClock = hlc.NewManualClock(0)
}
if tc.clock == nil {
tc.clock = hlc.NewClock(tc.manualClock.UnixNano)
}
if tc.engine == nil {
tc.engine = engine.NewInMem(roachpb.Attributes{Attrs: []string{"dc1", "mem"}}, 1<<20, tc.stopper)
}
if tc.transport == nil {
tc.transport = NewDummyRaftTransport()
}
if tc.store == nil {
ctx.Clock = tc.clock
ctx.Gossip = tc.gossip
ctx.Transport = tc.transport
// Create a test sender without setting a store. This is to deal with the
// circular dependency between the test sender and the store. The actual
// store will be passed to the sender after it is created and bootstrapped.
sender := &testSender{}
ctx.DB = client.NewDB(sender)
tc.store = NewStore(ctx, tc.engine, &roachpb.NodeDescriptor{NodeID: 1})
if err := tc.store.Bootstrap(roachpb.StoreIdent{
ClusterID: uuid.MakeV4(),
NodeID: 1,
StoreID: 1,
}, tc.stopper); err != nil {
t.Fatal(err)
}
// Now that we have our actual store, monkey patch the sender used in ctx.DB.
sender.store = tc.store
// We created the store without a real KV client, so it can't perform splits.
tc.store.splitQueue.SetDisabled(true)
if tc.rng == nil && tc.bootstrapMode == bootstrapRangeWithMetadata {
if err := tc.store.BootstrapRange(nil); err != nil {
t.Fatal(err)
}
}
if err := tc.store.Start(tc.stopper); err != nil {
t.Fatal(err)
}
tc.store.WaitForInit()
}
realRange := tc.rng == nil
if realRange {
if tc.bootstrapMode == bootstrapRangeOnly {
testDesc := testRangeDescriptor()
if _, err := writeInitialState(
context.Background(),
tc.store.Engine(),
enginepb.MVCCStats{},
*testDesc,
); err != nil {
t.Fatal(err)
}
rng, err := NewReplica(testDesc, tc.store, 0)
if err != nil {
t.Fatal(err)
}
if err := tc.store.AddReplicaTest(rng); err != nil {
t.Fatal(err)
}
}
var err error
tc.rng, err = tc.store.GetReplica(1)
if err != nil {
t.Fatal(err)
}
tc.rangeID = tc.rng.RangeID
}
if err := tc.initConfigs(realRange, t); err != nil {
t.Fatal(err)
}
}
func (tc *testContext) Sender() client.Sender {
return client.Wrap(tc.rng, func(ba roachpb.BatchRequest) roachpb.BatchRequest {
if ba.RangeID != 0 {
ba.RangeID = 1
}
if ba.Timestamp == hlc.ZeroTimestamp {
if err := ba.SetActiveTimestamp(tc.clock.Now); err != nil {
tc.Fatal(err)
}
}
return ba
})
}
// SendWrappedWith is a convenience function which wraps the request in a batch
// and sends it
func (tc *testContext) SendWrappedWith(h roachpb.Header, args roachpb.Request) (roachpb.Response, *roachpb.Error) {
return client.SendWrappedWith(tc.Sender(), context.Background(), h, args)
}
// SendWrapped is identical to SendWrappedWith with a zero header.
func (tc *testContext) SendWrapped(args roachpb.Request) (roachpb.Response, *roachpb.Error) {
return tc.SendWrappedWith(roachpb.Header{}, args)
}
func (tc *testContext) Stop() {
tc.stopper.Stop()
}
// initConfigs creates default configuration entries.
func (tc *testContext) initConfigs(realRange bool, t testing.TB) error {
// Put an empty system config into gossip so that gossip callbacks get
// run. We're using a fake config, but it's hooked into SystemConfig.
if err := tc.gossip.AddInfoProto(gossip.KeySystemConfig,
&config.SystemConfig{}, 0); err != nil {
return err
}
util.SucceedsSoon(t, func() error {
if _, ok := tc.gossip.GetSystemConfig(); !ok {
return errors.Errorf("expected system config to be set")
}
return nil
})
return nil
}
func newTransaction(name string, baseKey roachpb.Key, userPriority roachpb.UserPriority,
isolation enginepb.IsolationType, clock *hlc.Clock) *roachpb.Transaction {
var offset int64
var now hlc.Timestamp
if clock != nil {
offset = clock.MaxOffset().Nanoseconds()
now = clock.Now()
}
return roachpb.NewTransaction(name, baseKey, userPriority, isolation, now, offset)
}
// createReplicaSets creates new roachpb.ReplicaDescriptor protos based on an array of
// StoreIDs to aid in testing. Note that this does not actually produce any
// replicas, it just creates the descriptors.
func createReplicaSets(replicaNumbers []roachpb.StoreID) []roachpb.ReplicaDescriptor {
result := []roachpb.ReplicaDescriptor{}
for _, replicaNumber := range replicaNumbers {
result = append(result, roachpb.ReplicaDescriptor{
StoreID: replicaNumber,
})
}
return result
}
// TestIsOnePhaseCommit verifies the circumstances where a
// transactional batch can be committed as an atomic write.
func TestIsOnePhaseCommit(t *testing.T) {
defer leaktest.AfterTest(t)()
txnReqs := []roachpb.RequestUnion{
{BeginTransaction: &roachpb.BeginTransactionRequest{}},
{Put: &roachpb.PutRequest{}},
{EndTransaction: &roachpb.EndTransactionRequest{}},
}
testCases := []struct {
bu []roachpb.RequestUnion
isTxn bool
isWTO bool
isTSOff bool
exp1PC bool
}{
{[]roachpb.RequestUnion{}, false, false, false, false},
{[]roachpb.RequestUnion{}, true, false, false, false},
{[]roachpb.RequestUnion{{Get: &roachpb.GetRequest{}}}, true, false, false, false},
{[]roachpb.RequestUnion{{Put: &roachpb.PutRequest{}}}, true, false, false, false},
{txnReqs[0 : len(txnReqs)-1], true, false, false, false},
{txnReqs[1:], true, false, false, false},
{txnReqs, true, false, false, true},
{txnReqs, true, true, false, false},
{txnReqs, true, false, true, false},
{txnReqs, true, true, true, false},
}
clock := hlc.NewClock(hlc.UnixNano)
for i, c := range testCases {
ba := roachpb.BatchRequest{Requests: c.bu}
if c.isTxn {
ba.Txn = newTransaction("txn", roachpb.Key("a"), 1, enginepb.SNAPSHOT, clock)
if c.isWTO {
ba.Txn.WriteTooOld = true
}
ba.Txn.Timestamp = ba.Txn.OrigTimestamp.Add(1, 0)
if c.isTSOff {
ba.Txn.Isolation = enginepb.SERIALIZABLE
}
}
if is1PC := isOnePhaseCommit(ba); is1PC != c.exp1PC {
t.Errorf("%d: expected 1pc=%t; got %t", i, c.exp1PC, is1PC)
}
}
}
// TestReplicaContains verifies that the range uses Key.Address() in
// order to properly resolve addresses for local keys.
func TestReplicaContains(t *testing.T) {
defer leaktest.AfterTest(t)()
desc := &roachpb.RangeDescriptor{
RangeID: 1,
StartKey: roachpb.RKey("a"),
EndKey: roachpb.RKey("b"),
}
// This test really only needs a hollow shell of a Replica.
r := &Replica{}
r.mu.state.Desc = desc
r.rangeDesc.Store(desc)
if statsKey := keys.RangeStatsKey(desc.RangeID); !r.ContainsKey(statsKey) {
t.Errorf("expected range to contain range stats key %q", statsKey)
}
if !r.ContainsKey(roachpb.Key("aa")) {
t.Errorf("expected range to contain key \"aa\"")
}
if !r.ContainsKey(keys.RangeDescriptorKey([]byte("aa"))) {
t.Errorf("expected range to contain range descriptor key for \"aa\"")
}
if !r.ContainsKeyRange(roachpb.Key("aa"), roachpb.Key("b")) {
t.Errorf("expected range to contain key range \"aa\"-\"b\"")
}
if !r.ContainsKeyRange(keys.RangeDescriptorKey([]byte("aa")),
keys.RangeDescriptorKey([]byte("b"))) {
t.Errorf("expected range to contain key transaction range \"aa\"-\"b\"")
}
}
func sendLeaseRequest(r *Replica, l *roachpb.Lease) error {
ba := roachpb.BatchRequest{}
ba.Timestamp = r.store.Clock().Now()
ba.Add(&roachpb.RequestLeaseRequest{Lease: *l})
ch, _, err := r.proposeRaftCommand(context.Background(), ba)
if err == nil {
// Next if the command was committed, wait for the range to apply it.
// TODO(bdarnell): refactor this to a more conventional error-handling pattern.
err = (<-ch).Err.GoError()
}
return err
}
// TestReplicaReadConsistency verifies behavior of the range under
// different read consistencies. Note that this unittest plays
// fast and loose with granting range leases.
func TestReplicaReadConsistency(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Modify range descriptor to include a second replica; range lease can
// only be obtained by Replicas which are part of the range descriptor. This
// workaround is sufficient for the purpose of this test.
secondReplica := roachpb.ReplicaDescriptor{
NodeID: 2,
StoreID: 2,
ReplicaID: 2,
}
rngDesc := tc.rng.Desc()
rngDesc.Replicas = append(rngDesc.Replicas, secondReplica)
tc.rng.setDescWithoutProcessUpdate(rngDesc)
gArgs := getArgs(roachpb.Key("a"))
// Try consistent read and verify success.
if _, err := tc.SendWrapped(&gArgs); err != nil {
t.Errorf("expected success on consistent read: %s", err)
}
// Try a consensus read and verify error.
if _, err := tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.CONSENSUS,
}, &gArgs); err == nil {
t.Errorf("expected error on consensus read")
}
// Try an inconsistent read within a transaction.
txn := newTransaction("test", roachpb.Key("a"), 1, enginepb.SERIALIZABLE, tc.clock)
if _, err := tc.SendWrappedWith(roachpb.Header{
Txn: txn,
ReadConsistency: roachpb.INCONSISTENT,
}, &gArgs); err == nil {
t.Errorf("expected error on inconsistent read within a txn")
}
// Lose the lease and verify CONSISTENT reads receive NotLeaseHolderError
// and INCONSISTENT reads work as expected.
start := hlc.ZeroTimestamp.Add(leaseExpiry(tc.rng), 0)
tc.manualClock.Set(start.WallTime)
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: start,
StartStasis: start.Add(10, 0),
Expiration: start.Add(10, 0),
Replica: roachpb.ReplicaDescriptor{ // a different node
ReplicaID: 2,
NodeID: 2,
StoreID: 2,
},
}); err != nil {
t.Fatal(err)
}
// Send without Txn.
_, pErr := tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.CONSISTENT,
}, &gArgs)
if _, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); !ok {
t.Errorf("expected not lease holder error; got %s", pErr)
}
if _, pErr := tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &gArgs); pErr != nil {
t.Errorf("expected success reading with inconsistent: %s", pErr)
}
}
// TestApplyCmdLeaseError verifies that when during application of a Raft
// command the proposing node no longer holds the range lease, an error is
// returned. This prevents regression of #1483.
func TestApplyCmdLeaseError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Modify range descriptor to include a second replica; range lease can
// only be obtained by Replicas which are part of the range descriptor. This
// workaround is sufficient for the purpose of this test.
secondReplica := roachpb.ReplicaDescriptor{
NodeID: 2,
StoreID: 2,
ReplicaID: 2,
}
rngDesc := tc.rng.Desc()
rngDesc.Replicas = append(rngDesc.Replicas, secondReplica)
tc.rng.setDescWithoutProcessUpdate(rngDesc)
pArgs := putArgs(roachpb.Key("a"), []byte("asd"))
// Lose the lease.
start := hlc.ZeroTimestamp.Add(leaseExpiry(tc.rng), 0)
tc.manualClock.Set(start.WallTime)
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: start,
StartStasis: start.Add(10, 0),
Expiration: start.Add(10, 0),
Replica: roachpb.ReplicaDescriptor{ // a different node
ReplicaID: 2,
NodeID: 2,
StoreID: 2,
},
}); err != nil {
t.Fatal(err)
}
_, pErr := tc.SendWrappedWith(roachpb.Header{
Timestamp: tc.clock.Now().Add(-100, 0),
}, &pArgs)
if _, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); !ok {
t.Fatalf("expected not lease holder error in return, got %v", pErr)
}
}
func TestReplicaRangeBoundsChecking(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.RKey("a")
firstRng := tc.store.LookupReplica(key, nil)
newRng := splitTestRange(tc.store, key, key, t)
if pErr := newRng.redirectOnOrAcquireLease(context.Background()); pErr != nil {
t.Fatal(pErr)
}
gArgs := getArgs(roachpb.Key("b"))
_, pErr := tc.SendWrapped(&gArgs)
if mismatchErr, ok := pErr.GetDetail().(*roachpb.RangeKeyMismatchError); !ok {
t.Errorf("expected range key mismatch error: %s", pErr)
} else {
if mismatchedDesc := mismatchErr.MismatchedRange; mismatchedDesc == nil || mismatchedDesc.RangeID != firstRng.RangeID {
t.Errorf("expected mismatched range to be %d, found %v", firstRng.RangeID, mismatchedDesc)
}
if suggestedDesc := mismatchErr.SuggestedRange; suggestedDesc == nil || suggestedDesc.RangeID != newRng.RangeID {
t.Errorf("expected suggested range to be %d, found %v", newRng.RangeID, suggestedDesc)
}
}
}
// hasLease returns whether the most recent range lease was held by the given
// range replica and whether it's expired for the given timestamp.
func hasLease(rng *Replica, timestamp hlc.Timestamp) (owned bool, expired bool) {
l, _ := rng.getLease()
return l.OwnedBy(rng.store.StoreID()), !l.Covers(timestamp)
}
func TestReplicaLease(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Modify range descriptor to include a second replica; leader lease can
// only be obtained by Replicas which are part of the range descriptor. This
// workaround is sufficient for the purpose of this test.
secondReplica := roachpb.ReplicaDescriptor{
NodeID: 2,
StoreID: 2,
ReplicaID: 2,
}
rngDesc := tc.rng.Desc()
rngDesc.Replicas = append(rngDesc.Replicas, secondReplica)
tc.rng.setDescWithoutProcessUpdate(rngDesc)
// Test that leases with invalid times are rejected.
// Start leases at a point that avoids overlapping with the existing lease.
one := hlc.ZeroTimestamp.Add(time.Second.Nanoseconds(), 0)
for _, lease := range []roachpb.Lease{
{Start: one, StartStasis: one},
{Start: one, StartStasis: one.Next(), Expiration: one},
} {
if _, _, err := tc.rng.RequestLease(context.Background(), tc.store.Engine(), nil,
roachpb.Header{}, roachpb.RequestLeaseRequest{
Lease: lease,
}); !testutils.IsError(err, "illegal lease interval") {
t.Fatalf("unexpected error: %v", err)
}
}
if held, _ := hasLease(tc.rng, tc.clock.Now()); !held {
t.Errorf("expected lease on range start")
}
tc.manualClock.Set(leaseExpiry(tc.rng))
now := tc.clock.Now()
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: now.Add(10, 0),
StartStasis: now.Add(20, 0),
Expiration: now.Add(20, 0),
Replica: secondReplica,
}); err != nil {
t.Fatal(err)
}
if held, expired := hasLease(tc.rng, tc.clock.Now().Add(15, 0)); held || expired {
t.Errorf("expected second replica to have range lease")
}
{
pErr := tc.rng.redirectOnOrAcquireLease(context.Background())
if lErr, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); !ok || lErr == nil {
t.Fatalf("wanted NotLeaseHolderError, got %s", pErr)
}
}
// Advance clock past expiration and verify that another has
// range lease will not be true.
tc.manualClock.Increment(21) // 21ns have passed
if held, expired := hasLease(tc.rng, tc.clock.Now()); held || !expired {
t.Errorf("expected another replica to have expired lease")
}
// Verify that command returns NotLeaseHolderError when lease is rejected.
rng, err := NewReplica(testRangeDescriptor(), tc.store, 0)
if err != nil {
t.Fatal(err)
}
rng.mu.Lock()
rng.mu.proposeRaftCommandFn = func(*pendingCmd) error {
return &roachpb.LeaseRejectedError{
Message: "replica not found",
}
}
rng.mu.Unlock()
{
if _, ok := rng.redirectOnOrAcquireLease(context.Background()).GetDetail().(*roachpb.NotLeaseHolderError); !ok {
t.Fatalf("expected %T, got %s", &roachpb.NotLeaseHolderError{}, err)
}
}
}
// TestReplicaNotLeaseHolderError verifies NotLeaderError when lease is rejected.
func TestReplicaNotLeaseHolderError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Modify range descriptor to include a second replica; range lease can
// only be obtained by Replicas which are part of the range descriptor. This
// workaround is sufficient for the purpose of this test.
secondReplica := roachpb.ReplicaDescriptor{
NodeID: 2,
StoreID: 2,
ReplicaID: 2,
}
rngDesc := tc.rng.Desc()
rngDesc.Replicas = append(rngDesc.Replicas, secondReplica)
tc.rng.setDescWithoutProcessUpdate(rngDesc)
tc.manualClock.Set(leaseExpiry(tc.rng))
now := tc.clock.Now()
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: now,
StartStasis: now.Add(10, 0),
Expiration: now.Add(10, 0),
Replica: roachpb.ReplicaDescriptor{
ReplicaID: 2,
NodeID: 2,
StoreID: 2,
},
}); err != nil {
t.Fatal(err)
}
header := roachpb.Span{
Key: roachpb.Key("a"),
}
testCases := []roachpb.Request{
// Admin split covers admin commands.
&roachpb.AdminSplitRequest{
Span: header,
SplitKey: roachpb.Key("a"),
},
// Get covers read-only commands.
&roachpb.GetRequest{
Span: header,
},
// Put covers read-write commands.
&roachpb.PutRequest{
Span: header,
Value: roachpb.MakeValueFromString("value"),
},
}
for i, test := range testCases {
_, pErr := tc.SendWrappedWith(roachpb.Header{Timestamp: now}, test)
if _, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); !ok {
t.Errorf("%d: expected not lease holder error: %s", i, pErr)
}
}
}
// TestReplicaLeaseCounters verifies leaseRequest metrics counters are updated
// correctly after a lease request.
func TestReplicaLeaseCounters(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
assert := func(actual, min, max int64) {
if actual < min || actual > max {
t.Fatal(errors.Errorf(
"metrics counters actual=%d, expected=[%d,%d]", actual, min, max))
}
}
metrics := tc.rng.store.metrics
assert(metrics.LeaseRequestSuccessCount.Count(), 1, 1000)
assert(metrics.LeaseRequestErrorCount.Count(), 0, 0)
now := tc.clock.Now()
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: now,
StartStasis: now.Add(10, 0),
Expiration: now.Add(10, 0),
Replica: roachpb.ReplicaDescriptor{
ReplicaID: 1,
NodeID: 1,
StoreID: 1,
},
}); err != nil {
t.Fatal(err)
}
assert(metrics.LeaseRequestSuccessCount.Count(), 2, 1000)
assert(metrics.LeaseRequestErrorCount.Count(), 0, 0)
// Make lease request fail by providing an invalid ReplicaDescriptor.
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: now,
StartStasis: now.Add(10, 0),
Expiration: now.Add(10, 0),
Replica: roachpb.ReplicaDescriptor{
ReplicaID: 2,
NodeID: 99,
StoreID: 99,
},
}); err == nil {
t.Fatal("lease request did not fail on invalid ReplicaDescriptor")
}
assert(metrics.LeaseRequestSuccessCount.Count(), 2, 1000)
assert(metrics.LeaseRequestErrorCount.Count(), 1, 1000)
}
// TestReplicaGossipConfigsOnLease verifies that config info is gossiped
// upon acquisition of the range lease.
func TestReplicaGossipConfigsOnLease(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Modify range descriptor to include a second replica; range lease can
// only be obtained by Replicas which are part of the range descriptor. This
// workaround is sufficient for the purpose of this test.
secondReplica := roachpb.ReplicaDescriptor{
NodeID: 2,
StoreID: 2,
ReplicaID: 2,
}
rngDesc := tc.rng.Desc()
rngDesc.Replicas = append(rngDesc.Replicas, secondReplica)
tc.rng.setDescWithoutProcessUpdate(rngDesc)
// Write some arbitrary data in the system config span.
key := keys.MakeTablePrefix(keys.MaxSystemConfigDescID)
var val roachpb.Value
val.SetInt(42)
if err := engine.MVCCPut(context.Background(), tc.engine, nil, key, hlc.MinTimestamp, val, nil); err != nil {
t.Fatal(err)
}
// If this actually failed, we would have gossiped from MVCCPutProto.
// Unlikely, but why not check.
if cfg, ok := tc.gossip.GetSystemConfig(); ok {
if nv := len(cfg.Values); nv == 1 && cfg.Values[nv-1].Key.Equal(key) {
t.Errorf("unexpected gossip of system config: %s", cfg)
}
}
// Expire our own lease which we automagically acquired due to being
// first range and config holder.
tc.manualClock.Set(leaseExpiry(tc.rng))
now := tc.clock.Now()
// Give lease to someone else.
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: now,
StartStasis: now.Add(10, 0),
Expiration: now.Add(10, 0),
Replica: roachpb.ReplicaDescriptor{
ReplicaID: 2,
NodeID: 2,
StoreID: 2,
},
}); err != nil {
t.Fatal(err)
}
// Expire that lease.
tc.manualClock.Increment(11 + int64(tc.clock.MaxOffset())) // advance time
now = tc.clock.Now()
// Give lease to this range.
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: now.Add(11, 0),
StartStasis: now.Add(20, 0),
Expiration: now.Add(20, 0),
Replica: roachpb.ReplicaDescriptor{
ReplicaID: 1,
NodeID: 1,
StoreID: 1,
},
}); err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
cfg, ok := tc.gossip.GetSystemConfig()
if !ok {
return errors.Errorf("expected system config to be set")
}
numValues := len(cfg.Values)
if numValues != 1 {
return errors.Errorf("num config values != 1; got %d", numValues)
}
if k := cfg.Values[numValues-1].Key; !k.Equal(key) {
return errors.Errorf("invalid key for config value (%q != %q)", k, key)
}
return nil
})
}
// TestReplicaTSCacheLowWaterOnLease verifies that the low water mark
// is set on the timestamp cache when the node is granted the lease holder
// lease after not holding it and it is not set when the node is
// granted the range lease when it was the last holder.
func TestReplicaTSCacheLowWaterOnLease(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
tc.clock.SetMaxOffset(maxClockOffset)
// Disable raft log truncation which confuses this test.
tc.store.SetRaftLogQueueActive(false)
// Modify range descriptor to include a second replica; range lease can
// only be obtained by Replicas which are part of the range descriptor. This
// workaround is sufficient for the purpose of this test.
secondReplica := roachpb.ReplicaDescriptor{
NodeID: 2,
StoreID: 2,
ReplicaID: 2,
}
rngDesc := tc.rng.Desc()
rngDesc.Replicas = append(rngDesc.Replicas, secondReplica)
tc.rng.setDescWithoutProcessUpdate(rngDesc)
tc.manualClock.Set(leaseExpiry(tc.rng))
now := hlc.Timestamp{WallTime: tc.manualClock.UnixNano()}
tc.rng.mu.Lock()
baseRTS, _ := tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("a"), nil /* end */, nil /* txn */)
tc.rng.mu.Unlock()
baseLowWater := baseRTS.WallTime
newLowWater := now.Add(50, 0).WallTime + baseLowWater
testCases := []struct {
storeID roachpb.StoreID
start hlc.Timestamp
expiration hlc.Timestamp
expLowWater int64
expErr string
}{
// Grant the lease fresh.
{storeID: tc.store.StoreID(),
start: now, expiration: now.Add(10, 0),
expLowWater: baseLowWater},
// Renew the lease.
{storeID: tc.store.StoreID(),
start: now.Add(15, 0), expiration: now.Add(30, 0),
expLowWater: baseLowWater},
// Renew the lease but shorten expiration. This errors out.
{storeID: tc.store.StoreID(),
start: now.Add(16, 0), expiration: now.Add(25, 0),
expErr: "lease shortening currently unsupported",
},
// Another Store attempts to get the lease, but overlaps. If the
// previous lease expiration had worked, this would have too.
{storeID: tc.store.StoreID() + 1,
start: now.Add(29, 0), expiration: now.Add(50, 0),
expLowWater: baseLowWater,
expErr: "overlaps previous",
},
// The other store tries again, this time without the overlap.
{storeID: tc.store.StoreID() + 1,
start: now.Add(31, 0), expiration: now.Add(50, 0),
expLowWater: baseLowWater},
// Lease is regranted to this replica. Store clock moves forward avoid
// influencing the result.
{storeID: tc.store.StoreID(),
start: now.Add(60, 0), expiration: now.Add(70, 0),
expLowWater: newLowWater},
// Lease is held by another once more.
{storeID: tc.store.StoreID() + 1,
start: now.Add(70, 0), expiration: now.Add(90, 0),
expLowWater: newLowWater},
}
for i, test := range testCases {
if err := sendLeaseRequest(tc.rng, &roachpb.Lease{
Start: test.start,
StartStasis: test.expiration.Add(-1, 0), // smaller than durations used
Expiration: test.expiration,
Replica: roachpb.ReplicaDescriptor{
ReplicaID: roachpb.ReplicaID(test.storeID),
NodeID: roachpb.NodeID(test.storeID),
StoreID: test.storeID,
},
}); err != nil {
if test.expErr == "" || !testutils.IsError(err, test.expErr) {
t.Fatalf("%d: unexpected error %s", i, err)
}
}
// Verify expected low water mark.
tc.rng.mu.Lock()
rTS, rOK := tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("a"), nil, nil)
wTS, wOK := tc.rng.mu.tsCache.GetMaxWrite(roachpb.Key("a"), nil, nil)
tc.rng.mu.Unlock()
if rTS.WallTime != test.expLowWater || wTS.WallTime != test.expLowWater || rOK || wOK {
t.Errorf("%d: expected low water %d; got %d, %d; rOK=%t, wOK=%t", i, test.expLowWater, rTS.WallTime, wTS.WallTime, rOK, wOK)
}
}
}
// TestReplicaLeaseRejectUnknownRaftNodeID ensures that a replica cannot
// obtain the range lease if it is not part of the current range descriptor.
// TODO(mrtracy): This should probably be tested in client_raft_test package,
// using a real second store.
func TestReplicaLeaseRejectUnknownRaftNodeID(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
tc.manualClock.Set(leaseExpiry(tc.rng))
now := tc.clock.Now()
lease := &roachpb.Lease{
Start: now,
StartStasis: now.Add(10, 0),
Expiration: now.Add(10, 0),
Replica: roachpb.ReplicaDescriptor{
ReplicaID: 2,
NodeID: 2,
StoreID: 2,
},
}
ba := roachpb.BatchRequest{}
ba.Timestamp = tc.rng.store.Clock().Now()
ba.Add(&roachpb.RequestLeaseRequest{Lease: *lease})
ch, _, err := tc.rng.proposeRaftCommand(context.Background(), ba)
if err == nil {
// Next if the command was committed, wait for the range to apply it.
// TODO(bdarnell): refactor to a more conventional error-handling pattern.
// Remove ambiguity about where the "replica not found" error comes from.
err = (<-ch).Err.GoError()
}
if !testutils.IsError(err, "replica not found") {
t.Errorf("unexpected error obtaining lease for invalid store: %v", err)
}
}
// TestReplicaDrainLease makes sure that no new leases are granted when
// the Store is in DrainLeases mode.
func TestReplicaDrainLease(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Acquire initial lease.
if pErr := tc.rng.redirectOnOrAcquireLease(context.Background()); pErr != nil {
t.Fatal(pErr)
}
var slept atomic.Value
slept.Store(false)
if err := tc.stopper.RunAsyncTask(func() {
// Wait just a bit so that the main thread can check that
// DrainLeases blocks (false negatives are possible, but 10ms is
// plenty to make this fail 99.999% of the time in practice).
time.Sleep(10 * time.Millisecond)
slept.Store(true)
// Expire the lease (and any others that may race in before we drain).
for {
tc.manualClock.Increment(leaseExpiry(tc.rng))
select {
case <-time.After(10 * time.Millisecond): // real code would use Ticker
case <-tc.stopper.ShouldQuiesce():
return
}
}
}); err != nil {
t.Fatal(err)
}
if err := tc.store.DrainLeases(true); err != nil {
t.Fatal(err)
}
if !slept.Load().(bool) {
t.Fatal("DrainLeases returned with active lease")
}
tc.rng.mu.Lock()
pErr := <-tc.rng.requestLeaseLocked(tc.clock.Now())
tc.rng.mu.Unlock()
_, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError)
if !ok {
t.Fatalf("expected NotLeaseHolderError, not %v", pErr)
}
if err := tc.store.DrainLeases(false); err != nil {
t.Fatal(err)
}
// Newly unfrozen, leases work again.
if pErr := tc.rng.redirectOnOrAcquireLease(context.Background()); pErr != nil {
t.Fatal(pErr)
}
}
// TestReplicaGossipFirstRange verifies that the first range gossips its
// location and the cluster ID.
func TestReplicaGossipFirstRange(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
for _, key := range []string{gossip.KeyClusterID, gossip.KeyFirstRangeDescriptor, gossip.KeySentinel} {
bytes, err := tc.gossip.GetInfo(key)
if err != nil {
t.Errorf("missing first range gossip of key %s", key)
}
if key == gossip.KeyFirstRangeDescriptor {
var rangeDesc roachpb.RangeDescriptor
if err := proto.Unmarshal(bytes, &rangeDesc); err != nil {
t.Fatal(err)
}
}
if key == gossip.KeyClusterID && len(bytes) == 0 {
t.Errorf("expected non-empty gossiped cluster ID, got %q", bytes)
}
if key == gossip.KeySentinel && len(bytes) == 0 {
t.Errorf("expected non-empty gossiped sentinel, got %q", bytes)
}
}
}
// TestReplicaGossipAllConfigs verifies that all config types are gossiped.
func TestReplicaGossipAllConfigs(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
if _, ok := tc.gossip.GetSystemConfig(); !ok {
t.Fatal("config not set")
}
}
func maybeWrapWithBeginTransaction(sender client.Sender, ctx context.Context, header roachpb.Header, req roachpb.Request) (roachpb.Response, *roachpb.Error) {
if header.Txn == nil || header.Txn.Writing {
return client.SendWrappedWith(sender, ctx, header, req)
}
if ctx == nil {
ctx = context.Background()
}
var ba roachpb.BatchRequest
bt, _ := beginTxnArgs(req.Header().Key, header.Txn)
ba.Header = header
ba.Add(&bt)
ba.Add(req)
br, pErr := sender.Send(ctx, ba)
if pErr != nil {
return nil, pErr
}
unwrappedReply := br.Responses[1].GetInner()
unwrappedHeader := unwrappedReply.Header()
unwrappedHeader.Txn = br.Txn
unwrappedReply.SetHeader(unwrappedHeader)
return unwrappedReply, nil
}
// TestReplicaNoGossipConfig verifies that certain commands (e.g.,
// reads, writes in uncommitted transactions) do not trigger gossip.
func TestReplicaNoGossipConfig(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Write some arbitrary data in the system span (up to, but not including MaxReservedID+1)
key := keys.MakeTablePrefix(keys.MaxReservedDescID)
txn := newTransaction("test", key, 1 /* userPriority */, enginepb.SERIALIZABLE, tc.clock)
h := roachpb.Header{Txn: txn}
req1 := putArgs(key, []byte("foo"))
req2, _ := endTxnArgs(txn, true /* commit */)
req2.IntentSpans = []roachpb.Span{{Key: key}}
req3 := getArgs(key)
testCases := []struct {
req roachpb.Request
h roachpb.Header
}{
{&req1, h},
{&req2, h},
{&req3, roachpb.Header{}},
}
for i, test := range testCases {
txn.Sequence++
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), test.h, test.req); pErr != nil {
t.Fatal(pErr)
}
txn.Writing = true
// System config is not gossiped.
cfg, ok := tc.gossip.GetSystemConfig()
if !ok {
t.Fatal("config not set")
}
if len(cfg.Values) != 0 {
t.Errorf("System config was gossiped at #%d", i)
}
}
}
// TestReplicaNoGossipFromNonLeader verifies that a non-lease holder replica
// does not gossip configurations.
func TestReplicaNoGossipFromNonLeader(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Write some arbitrary data in the system span (up to, but not including MaxReservedID+1)
key := keys.MakeTablePrefix(keys.MaxReservedDescID)
txn := newTransaction("test", key, 1 /* userPriority */, enginepb.SERIALIZABLE, tc.clock)
req1 := putArgs(key, nil)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), nil, roachpb.Header{
Txn: txn,
}, &req1); pErr != nil {
t.Fatal(pErr)
}
txn.Writing = true
txn.Sequence++
req2, h := endTxnArgs(txn, true /* commit */)
req2.IntentSpans = []roachpb.Span{{Key: key}}
txn.Sequence++
if _, pErr := tc.SendWrappedWith(h, &req2); pErr != nil {
t.Fatal(pErr)
}
// Execute a get to resolve the intent.
req3 := getArgs(key)
if _, pErr := tc.SendWrappedWith(roachpb.Header{Timestamp: txn.Timestamp}, &req3); pErr != nil {
t.Fatal(pErr)
}
// Increment the clock's timestamp to expire the range lease.
tc.manualClock.Set(leaseExpiry(tc.rng))
if lease, _ := tc.rng.getLease(); lease.Covers(tc.clock.Now()) {
t.Fatal("range lease should have been expired")
}
// Make sure the information for db1 is not gossiped. Since obtaining
// a lease updates the gossiped information, we do that.
if pErr := tc.rng.redirectOnOrAcquireLease(context.Background()); pErr != nil {
t.Fatal(pErr)
}
// Fetch the raw gossip info. GetSystemConfig is based on callbacks at
// modification time. But we're checking for _not_ gossiped, so there should
// be no callbacks. Easier to check the raw info.
var cfg config.SystemConfig
err := tc.gossip.GetInfoProto(gossip.KeySystemConfig, &cfg)
if err != nil {
t.Fatal(err)
}
if len(cfg.Values) != 0 {
t.Fatalf("non-lease holder gossiped the system config")
}
}
func getArgs(key []byte) roachpb.GetRequest {
return roachpb.GetRequest{
Span: roachpb.Span{
Key: key,
},
}
}
func putArgs(key roachpb.Key, value []byte) roachpb.PutRequest {
return roachpb.PutRequest{
Span: roachpb.Span{
Key: key,
},
Value: roachpb.MakeValueFromBytes(value),
}
}
func cPutArgs(key roachpb.Key, value, expValue []byte) roachpb.ConditionalPutRequest {
expV := roachpb.MakeValueFromBytes(expValue)
return roachpb.ConditionalPutRequest{
Span: roachpb.Span{
Key: key,
},
Value: roachpb.MakeValueFromBytes(value),
ExpValue: &expV,
}
}
func deleteArgs(key roachpb.Key) roachpb.DeleteRequest {
return roachpb.DeleteRequest{
Span: roachpb.Span{
Key: key,
},
}
}
// readOrWriteArgs returns either get or put arguments depending on
// value of "read". Get for true; Put for false.
func readOrWriteArgs(key roachpb.Key, read bool) roachpb.Request {
if read {
gArgs := getArgs(key)
return &gArgs
}
pArgs := putArgs(key, []byte("value"))
return &pArgs
}
func incrementArgs(key []byte, inc int64) roachpb.IncrementRequest {
return roachpb.IncrementRequest{
Span: roachpb.Span{
Key: key,
},
Increment: inc,
}
}
func scanArgs(start, end []byte) roachpb.ScanRequest {
return roachpb.ScanRequest{
Span: roachpb.Span{
Key: start,
EndKey: end,
},
}
}
func beginTxnArgs(key []byte, txn *roachpb.Transaction) (_ roachpb.BeginTransactionRequest, h roachpb.Header) {
h.Txn = txn
return roachpb.BeginTransactionRequest{
Span: roachpb.Span{
Key: txn.Key,
},
}, h
}
func endTxnArgs(txn *roachpb.Transaction, commit bool) (_ roachpb.EndTransactionRequest, h roachpb.Header) {
h.Txn = txn
return roachpb.EndTransactionRequest{
Span: roachpb.Span{
Key: txn.Key, // not allowed when going through TxnCoordSender, but we're not
},
Commit: commit,
}, h
}
func pushTxnArgs(pusher, pushee *roachpb.Transaction, pushType roachpb.PushTxnType) roachpb.PushTxnRequest {
return roachpb.PushTxnRequest{
Span: roachpb.Span{
Key: pushee.Key,
},
Now: pusher.Timestamp,
PushTo: pusher.Timestamp,
PusherTxn: *pusher,
PusheeTxn: pushee.TxnMeta,
PushType: pushType,
}
}
func heartbeatArgs(txn *roachpb.Transaction) (_ roachpb.HeartbeatTxnRequest, h roachpb.Header) {
h.Txn = txn
return roachpb.HeartbeatTxnRequest{
Span: roachpb.Span{
Key: txn.Key,
},
}, h
}
func internalMergeArgs(key []byte, value roachpb.Value) roachpb.MergeRequest {
return roachpb.MergeRequest{
Span: roachpb.Span{
Key: key,
},
Value: value,
}
}
func truncateLogArgs(index uint64, rangeID roachpb.RangeID) roachpb.TruncateLogRequest {
return roachpb.TruncateLogRequest{
Index: index,
RangeID: rangeID,
}
}
func gcKey(key roachpb.Key, timestamp hlc.Timestamp) roachpb.GCRequest_GCKey {
return roachpb.GCRequest_GCKey{
Key: key,
Timestamp: timestamp,
}
}
func gcArgs(startKey []byte, endKey []byte, keys ...roachpb.GCRequest_GCKey) roachpb.GCRequest {
return roachpb.GCRequest{
Span: roachpb.Span{
Key: startKey,
EndKey: endKey,
},
Keys: keys,
}
}
// TestOptimizePuts verifies that contiguous runs of puts and
// conditional puts are marked as "blind" if they're written
// to a virgin keyspace.
func TestOptimizePuts(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
pArgs := make([]roachpb.PutRequest, optimizePutThreshold)
cpArgs := make([]roachpb.ConditionalPutRequest, optimizePutThreshold)
for i := 0; i < optimizePutThreshold; i++ {
pArgs[i] = putArgs([]byte(fmt.Sprintf("%02d", i)), []byte("1"))
cpArgs[i] = cPutArgs([]byte(fmt.Sprintf("%02d", i)), []byte("1"), []byte("0"))
}
incArgs := incrementArgs([]byte("inc"), 1)
testCases := []struct {
exKey roachpb.Key
reqs []roachpb.Request
expBlind []bool
}{
// No existing keys, single put.
{
nil,
[]roachpb.Request{
&pArgs[0],
},
[]bool{
false,
},
},
// No existing keys, nine puts.
{
nil,
[]roachpb.Request{
&pArgs[0], &pArgs[1], &pArgs[2], &pArgs[3], &pArgs[4], &pArgs[5], &pArgs[6], &pArgs[7], &pArgs[8],
},
[]bool{
false, false, false, false, false, false, false, false, false,
},
},
// No existing keys, ten puts.
{
nil,
[]roachpb.Request{
&pArgs[0], &pArgs[1], &pArgs[2], &pArgs[3], &pArgs[4], &pArgs[5], &pArgs[6], &pArgs[7], &pArgs[8], &pArgs[9],
},
[]bool{
true, true, true, true, true, true, true, true, true, true,
},
},
// Existing key at "0", ten conditional puts.
{
roachpb.Key("0"),
[]roachpb.Request{
&cpArgs[0], &cpArgs[1], &cpArgs[2], &cpArgs[3], &cpArgs[4], &cpArgs[5], &cpArgs[6], &cpArgs[7], &cpArgs[8], &cpArgs[9],
},
[]bool{
true, true, true, true, true, true, true, true, true, true,
},
},
// Existing key at 11, mixed puts and conditional puts.
{
roachpb.Key("11"),
[]roachpb.Request{
&pArgs[0], &cpArgs[1], &pArgs[2], &cpArgs[3], &pArgs[4], &cpArgs[5], &pArgs[6], &cpArgs[7], &pArgs[8], &cpArgs[9],
},
[]bool{
true, true, true, true, true, true, true, true, true, true,
},
},
// Existing key at 00, ten puts, expect nothing blind.
{
roachpb.Key("00"),
[]roachpb.Request{
&pArgs[0], &pArgs[1], &pArgs[2], &pArgs[3], &pArgs[4], &pArgs[5], &pArgs[6], &pArgs[7], &pArgs[8], &pArgs[9],
},
[]bool{
false, false, false, false, false, false, false, false, false, false,
},
},
// Existing key at 00, ten puts in reverse order, expect nothing blind.
{
roachpb.Key("00"),
[]roachpb.Request{
&pArgs[9], &pArgs[8], &pArgs[7], &pArgs[6], &pArgs[5], &pArgs[4], &pArgs[3], &pArgs[2], &pArgs[1], &pArgs[0],
},
[]bool{
false, false, false, false, false, false, false, false, false, false,
},
},
// Existing key at 05, ten puts, expect first five puts are blind.
{
roachpb.Key("05"),
[]roachpb.Request{
&pArgs[0], &pArgs[1], &pArgs[2], &pArgs[3], &pArgs[4], &pArgs[5], &pArgs[6], &pArgs[7], &pArgs[8], &pArgs[9],
},
[]bool{
true, true, true, true, true, false, false, false, false, false,
},
},
// No existing key, ten puts + inc + ten cputs.
{
nil,
[]roachpb.Request{
&pArgs[0], &pArgs[1], &pArgs[2], &pArgs[3], &pArgs[4], &pArgs[5], &pArgs[6], &pArgs[7], &pArgs[8], &pArgs[9],
&incArgs, &cpArgs[0], &cpArgs[1], &cpArgs[2], &cpArgs[3], &cpArgs[4], &cpArgs[5], &cpArgs[6], &cpArgs[7], &cpArgs[8], &cpArgs[9],
},
[]bool{
true, true, true, true, true, true, true, true, true, true,
false, false, false, false, false, false, false, false, false, false, false,
},
},
// Duplicate put at 11th key; should see ten puts.
{
nil,
[]roachpb.Request{
&pArgs[0], &pArgs[1], &pArgs[2], &pArgs[3], &pArgs[4], &pArgs[5], &pArgs[6], &pArgs[7], &pArgs[8], &pArgs[9], &pArgs[9],
},
[]bool{
true, true, true, true, true, true, true, true, true, true, false,
},
},
// Duplicate cput at 11th key; should see ten puts.
{
nil,
[]roachpb.Request{
&pArgs[0], &pArgs[1], &pArgs[2], &pArgs[3], &pArgs[4], &pArgs[5], &pArgs[6], &pArgs[7], &pArgs[8], &pArgs[9], &cpArgs[9],
},
[]bool{
true, true, true, true, true, true, true, true, true, true, false,
},
},
// Duplicate cput at 6th key; should see ten cputs.
{
nil,
[]roachpb.Request{
&cpArgs[0], &cpArgs[1], &cpArgs[2], &cpArgs[3], &cpArgs[4], &cpArgs[5], &cpArgs[6], &cpArgs[7], &cpArgs[8], &cpArgs[9], &cpArgs[9],
},
[]bool{
true, true, true, true, true, true, true, true, true, true, false,
},
},
}
for i, c := range testCases {
if c.exKey != nil {
if err := engine.MVCCPut(context.Background(), tc.engine, nil, c.exKey,
hlc.ZeroTimestamp, roachpb.MakeValueFromString("foo"), nil); err != nil {
t.Fatal(err)
}
}
batch := roachpb.BatchRequest{}
for _, r := range c.reqs {
batch.Add(r)
}
optimizePuts(tc.engine, batch.Requests, false)
blind := []bool{}
for _, r := range batch.Requests {
switch t := r.GetInner().(type) {
case *roachpb.PutRequest:
blind = append(blind, t.Blind)
t.Blind = false
case *roachpb.ConditionalPutRequest:
blind = append(blind, t.Blind)
t.Blind = false
default:
blind = append(blind, false)
}
}
if !reflect.DeepEqual(blind, c.expBlind) {
t.Errorf("%d: expected %+v; got %+v", i, c.expBlind, blind)
}
if c.exKey != nil {
if err := tc.engine.Clear(engine.MakeMVCCMetadataKey(c.exKey)); err != nil {
t.Fatal(err)
}
}
}
}
// TestAcquireLease verifies that the range lease is acquired
// for read and write methods, and eagerly renewed.
func TestAcquireLease(t *testing.T) {
defer leaktest.AfterTest(t)()
gArgs := getArgs([]byte("a"))
pArgs := putArgs([]byte("b"), []byte("1"))
testCases := []roachpb.Request{&gArgs, &pArgs}
for i, test := range testCases {
tc := testContext{}
tc.Start(t)
// This is a single-replica test; since we're automatically pushing back
// the start of a lease as far as possible, and since there is an auto-
// matic lease for us at the beginning, we'll basically create a lease from
// then on.
lease, _ := tc.rng.getLease()
expStart := lease.Start
tc.manualClock.Set(leaseExpiry(tc.rng))
ts := tc.clock.Now().Next()
if _, pErr := tc.SendWrappedWith(roachpb.Header{Timestamp: ts}, test); pErr != nil {
t.Error(pErr)
}
if held, expired := hasLease(tc.rng, ts); !held || expired {
t.Errorf("%d: expected lease acquisition", i)
}
lease, _ = tc.rng.getLease()
if !lease.Start.Equal(expStart) {
t.Errorf("%d: unexpected lease start: %s; expected %s", i, lease.Start, expStart)
}
if !ts.Less(lease.StartStasis) {
t.Errorf("%d: %s already in stasis (or beyond): %+v", i, ts, lease)
}
shouldRenewTS := lease.StartStasis.Add(-1, 0)
tc.manualClock.Set(shouldRenewTS.WallTime + 1)
if _, pErr := tc.SendWrapped(test); pErr != nil {
t.Error(pErr)
}
// Since the command we sent above does not get blocked on the lease
// extension, we need to wait for it to go through.
util.SucceedsSoon(t, func() error {
newLease, _ := tc.rng.getLease()
if !lease.StartStasis.Less(newLease.StartStasis) {
return errors.Errorf("%d: lease did not get extended: %+v to %+v", i, lease, newLease)
}
return nil
})
tc.Stop()
if t.Failed() {
return
}
}
}
func TestLeaseConcurrent(t *testing.T) {
defer leaktest.AfterTest(t)()
const num = 5
// The test was written to test this individual block below. The worry
// was that this would NPE (it does not; proto.Clone is unusual in that it
// returns the input value instead).
{
if protoutil.Clone((*roachpb.Error)(nil)).(*roachpb.Error) != nil {
t.Fatal("could not clone nil *Error")
}
}
// Testing concurrent range lease requests is still a good idea. We check
// that they work and clone *Error, which prevents regression of #6111.
const origMsg = "boom"
for _, withError := range []bool{false, true} {
func(withError bool) {
tc := testContext{}
tc.Start(t)
defer tc.Stop()
var wg sync.WaitGroup
wg.Add(num)
var active atomic.Value
active.Store(false)
var seen int32
tc.rng.mu.Lock()
tc.rng.mu.proposeRaftCommandFn = func(cmd *pendingCmd) error {
ll, ok := cmd.raftCmd.Cmd.Requests[0].
GetInner().(*roachpb.RequestLeaseRequest)
if !ok || !active.Load().(bool) {
return defaultProposeRaftCommandLocked(tc.rng, cmd)
}
if c := atomic.AddInt32(&seen, 1); c > 1 {
// Morally speaking, this is an error, but reproposals can
// happen and so we warn (in case this trips the test up
// in more unexpected ways).
log.Infof(context.Background(), "reproposal of %+v", ll)
}
go func() {
wg.Wait()
if withError {
cmd.done <- roachpb.ResponseWithError{
Err: roachpb.NewErrorf(origMsg),
}
return
}
tc.rng.mu.Lock()
defer tc.rng.mu.Unlock()
if err := defaultProposeRaftCommandLocked(tc.rng, cmd); err != nil {
panic(err) // unlikely, so punt on proper handling
}
}()
return nil
}
tc.rng.mu.Unlock()
active.Store(true)
tc.manualClock.Increment(leaseExpiry(tc.rng))
ts := tc.clock.Now()
pErrCh := make(chan *roachpb.Error, num)
for i := 0; i < num; i++ {
if err := tc.stopper.RunAsyncTask(func() {
tc.rng.mu.Lock()
leaseCh := tc.rng.requestLeaseLocked(ts)
tc.rng.mu.Unlock()
wg.Done()
pErr := <-leaseCh
// Mutate the errors as we receive them to expose races.
if pErr != nil {
pErr.OriginNode = 0
}
pErrCh <- pErr
}); err != nil {
t.Fatal(err)
}
}
pErrs := make([]*roachpb.Error, num)
for i := range pErrs {
// Make sure all of the responses are in (just so that we can
// mess with the "original" error knowing that all of the
// cloning must have happened by now).
pErrs[i] = <-pErrCh
}
newMsg := "moob"
for i, pErr := range pErrs {
if withError != (pErr != nil) {
t.Errorf("%d: wanted error: %t, got error %v", i, withError, pErr)
}
if testutils.IsPError(pErr, newMsg) {
t.Errorf("%d: errors shared memory: %v", i, pErr)
} else if testutils.IsPError(pErr, origMsg) {
// Mess with anyone holding the same reference.
pErr.Message = newMsg
} else if pErr != nil {
t.Errorf("%d: unexpected error: %s", i, pErr)
}
}
}(withError)
}
}
// TestReplicaUpdateTSCache verifies that reads and writes update the
// timestamp cache.
func TestReplicaUpdateTSCache(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Set clock to time 1s and do the read.
t0 := 1 * time.Second
tc.manualClock.Set(t0.Nanoseconds())
gArgs := getArgs([]byte("a"))
ts := tc.clock.Now()
_, pErr := tc.SendWrappedWith(roachpb.Header{Timestamp: ts}, &gArgs)
if pErr != nil {
t.Error(pErr)
}
// Set clock to time 2s for write.
t1 := 2 * time.Second
key := roachpb.Key([]byte("b"))
tc.manualClock.Set(t1.Nanoseconds())
drArgs := roachpb.NewDeleteRange(key, key.Next(), false)
ts = tc.clock.Now()
_, pErr = tc.SendWrappedWith(roachpb.Header{Timestamp: ts}, drArgs)
if pErr != nil {
t.Error(pErr)
}
// Verify the timestamp cache has rTS=1s and wTS=0s for "a".
tc.rng.mu.Lock()
defer tc.rng.mu.Unlock()
_, rOK := tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("a"), nil, nil)
_, wOK := tc.rng.mu.tsCache.GetMaxWrite(roachpb.Key("a"), nil, nil)
if rOK || wOK {
t.Errorf("expected rOK=false and wOK=false; rOK=%t, wOK=%t", rOK, wOK)
}
tc.rng.mu.tsCache.ExpandRequests(hlc.ZeroTimestamp)
rTS, rOK := tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("a"), nil, nil)
wTS, wOK := tc.rng.mu.tsCache.GetMaxWrite(roachpb.Key("a"), nil, nil)
if rTS.WallTime != t0.Nanoseconds() || wTS.WallTime != 0 || !rOK || wOK {
t.Errorf("expected rTS=1s and wTS=0s, but got %s, %s; rOK=%t, wOK=%t", rTS, wTS, rOK, wOK)
}
// Verify the timestamp cache has rTS=0s and wTS=2s for "b".
rTS, rOK = tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("b"), nil, nil)
wTS, wOK = tc.rng.mu.tsCache.GetMaxWrite(roachpb.Key("b"), nil, nil)
if rTS.WallTime != 0 || wTS.WallTime != t1.Nanoseconds() || rOK || !wOK {
t.Errorf("expected rTS=0s and wTS=2s, but got %s, %s; rOK=%t, wOK=%t", rTS, wTS, rOK, wOK)
}
// Verify another key ("c") has 0sec in timestamp cache.
rTS, rOK = tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("c"), nil, nil)
wTS, wOK = tc.rng.mu.tsCache.GetMaxWrite(roachpb.Key("c"), nil, nil)
if rTS.WallTime != 0 || wTS.WallTime != 0 || rOK || wOK {
t.Errorf("expected rTS=0s and wTS=0s, but got %s %s; rOK=%t, wOK=%t", rTS, wTS, rOK, wOK)
}
}
// TestReplicaCommandQueue verifies that reads/writes must wait for
// pending commands to complete through Raft before being executed on
// range.
func TestReplicaCommandQueue(t *testing.T) {
defer leaktest.AfterTest(t)()
// Intercept commands with matching command IDs and block them.
blockingStart := make(chan struct{})
blockingDone := make(chan struct{})
tc := testContext{}
tsc := TestStoreContext()
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if filterArgs.Hdr.UserPriority == 42 {
blockingStart <- struct{}{}
<-blockingDone
}
return nil
}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
defer close(blockingDone) // make sure teardown can happen
// Test all four combinations of reads & writes waiting.
testCases := []struct {
cmd1Read, cmd2Read bool
expWait bool
}{
// Read/read doesn't wait.
{true, true, false},
// All other combinations must wait.
{true, false, true},
{false, true, true},
{false, false, true},
}
tooLong := 5 * time.Second
for i, test := range testCases {
key1 := roachpb.Key(fmt.Sprintf("key1-%d", i))
key2 := roachpb.Key(fmt.Sprintf("key2-%d", i))
// Asynchronously put a value to the rng with blocking enabled.
cmd1Done := make(chan struct{})
if err := tc.stopper.RunAsyncTask(func() {
args := readOrWriteArgs(key1, test.cmd1Read)
_, pErr := tc.SendWrappedWith(roachpb.Header{
UserPriority: 42,
}, args)
if pErr != nil {
t.Fatalf("test %d: %s", i, pErr)
}
close(cmd1Done)
}); err != nil {
t.Fatal(err)
}
// Wait for cmd1 to get into the command queue.
<-blockingStart
// First, try a command for same key as cmd1 to verify it blocks.
cmd2Done := make(chan struct{})
if err := tc.stopper.RunAsyncTask(func() {
args := readOrWriteArgs(key1, test.cmd2Read)
_, pErr := tc.SendWrapped(args)
if pErr != nil {
t.Fatalf("test %d: %s", i, pErr)
}
close(cmd2Done)
}); err != nil {
t.Fatal(err)
}
// Next, try read for a non-impacted key--should go through immediately.
cmd3Done := make(chan struct{})
if err := tc.stopper.RunAsyncTask(func() {
args := readOrWriteArgs(key2, true)
_, pErr := tc.SendWrapped(args)
if pErr != nil {
t.Fatalf("test %d: %s", i, pErr)
}
close(cmd3Done)
}); err != nil {
t.Fatal(err)
}
if test.expWait {
// Verify cmd3 finishes but not cmd2.
select {
case <-cmd2Done:
t.Fatalf("test %d: should not have been able to execute cmd2", i)
case <-cmd3Done:
// success.
case <-cmd1Done:
t.Fatalf("test %d: should not have been able execute cmd1 while blocked", i)
case <-time.After(tooLong):
t.Fatalf("test %d: waited %s for cmd3 of key2", i, tooLong)
}
} else {
select {
case <-cmd2Done:
// success.
case <-cmd1Done:
t.Fatalf("test %d: should not have been able to execute cmd1 while blocked", i)
case <-time.After(tooLong):
t.Fatalf("test %d: waited %s for cmd2 of key1", i, tooLong)
}
<-cmd3Done
}
blockingDone <- struct{}{}
select {
case <-cmd2Done:
// success.
case <-time.After(tooLong):
t.Fatalf("test %d: waited %s for cmd2 of key1", i, tooLong)
}
}
}
// TestReplicaCommandQueueInconsistent verifies that inconsistent reads need
// not wait for pending commands to complete through Raft.
func TestReplicaCommandQueueInconsistent(t *testing.T) {
defer leaktest.AfterTest(t)()
key := roachpb.Key("key1")
blockingStart := make(chan struct{}, 1)
blockingDone := make(chan struct{})
tc := testContext{}
tsc := TestStoreContext()
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if put, ok := filterArgs.Req.(*roachpb.PutRequest); ok {
putBytes, err := put.Value.GetBytes()
if err != nil {
return roachpb.NewErrorWithTxn(err, filterArgs.Hdr.Txn)
}
if bytes.Equal(put.Key, key) && bytes.Equal(putBytes, []byte{1}) {
// Absence of replay protection can mean that we end up here
// more often than we expect, hence the select (#3669).
select {
case blockingStart <- struct{}{}:
default:
}
<-blockingDone
}
}
return nil
}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
cmd1Done := make(chan struct{})
go func() {
args := putArgs(key, []byte{1})
_, pErr := tc.SendWrapped(&args)
if pErr != nil {
t.Fatal(pErr)
}
close(cmd1Done)
}()
// Wait for cmd1 to get into the command queue.
<-blockingStart
// An inconsistent read to the key won't wait.
cmd2Done := make(chan struct{})
go func() {
args := getArgs(key)
_, pErr := tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &args)
if pErr != nil {
t.Fatal(pErr)
}
close(cmd2Done)
}()
select {
case <-cmd2Done:
// success.
case <-cmd1Done:
t.Fatalf("cmd1 should have been blocked")
}
close(blockingDone)
<-cmd1Done
// Success.
}
func SendWrapped(sender client.Sender, ctx context.Context, header roachpb.Header, args roachpb.Request) (roachpb.Response, roachpb.BatchResponse_Header, *roachpb.Error) {
var ba roachpb.BatchRequest
ba.Add(args)
ba.Header = header
br, pErr := sender.Send(ctx, ba)
if pErr != nil {
return nil, roachpb.BatchResponse_Header{}, pErr
}
return br.Responses[0].GetInner(), br.BatchResponse_Header, pErr
}
// TestReplicaUseTSCache verifies that write timestamps are upgraded
// based on the read timestamp cache.
func TestReplicaUseTSCache(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Set clock to time 1s and do the read.
t0 := 1 * time.Second
tc.manualClock.Set(t0.Nanoseconds())
args := getArgs([]byte("a"))
_, pErr := tc.SendWrapped(&args)
if pErr != nil {
t.Error(pErr)
}
pArgs := putArgs([]byte("a"), []byte("value"))
_, respH, pErr := SendWrapped(tc.Sender(), context.Background(), roachpb.Header{}, &pArgs)
if pErr != nil {
t.Fatal(pErr)
}
if respH.Timestamp.WallTime != tc.clock.Timestamp().WallTime {
t.Errorf("expected write timestamp to upgrade to 1s; got %s", respH.Timestamp)
}
}
// TestReplicaNoTSCacheInconsistent verifies that the timestamp cache
// is not affected by inconsistent reads.
func TestReplicaNoTSCacheInconsistent(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Set clock to time 1s and do the read.
t0 := 1 * time.Second
tc.manualClock.Set(t0.Nanoseconds())
args := getArgs([]byte("a"))
ts := tc.clock.Now()
_, pErr := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts,
ReadConsistency: roachpb.INCONSISTENT,
}, &args)
if pErr != nil {
t.Error(pErr)
}
pArgs := putArgs([]byte("a"), []byte("value"))
_, respH, pErr := SendWrapped(tc.Sender(), context.Background(), roachpb.Header{Timestamp: hlc.ZeroTimestamp.Add(0, 1)}, &pArgs)
if pErr != nil {
t.Fatal(pErr)
}
if respH.Timestamp.WallTime == tc.clock.Timestamp().WallTime {
t.Errorf("expected write timestamp not to upgrade to 1s; got %s", respH.Timestamp)
}
}
// TestReplicaNoTSCacheUpdateOnFailure verifies that read and write
// commands do not update the timestamp cache if they result in
// failure.
func TestReplicaNoTSCacheUpdateOnFailure(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Test for both read & write attempts.
for i, read := range []bool{true, false} {
key := roachpb.Key(fmt.Sprintf("key-%d", i))
// Start by laying down an intent to trip up future read or write to same key.
pArgs := putArgs(key, []byte("value"))
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
_, pErr := tc.SendWrappedWith(roachpb.Header{
Txn: txn,
}, &pArgs)
if pErr != nil {
t.Fatalf("test %d: %s", i, pErr)
}
// Now attempt read or write.
args := readOrWriteArgs(key, read)
ts := tc.clock.Now() // later timestamp
if _, pErr := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts,
}, args); pErr == nil {
t.Errorf("test %d: expected failure", i)
}
// Write the intent again -- should not have its timestamp upgraded!
txn.Sequence++
if _, respH, pErr := SendWrapped(tc.Sender(), context.Background(), roachpb.Header{Txn: txn}, &pArgs); pErr != nil {
t.Fatalf("test %d: %s", i, pErr)
} else if !respH.Txn.Timestamp.Equal(txn.Timestamp) {
t.Errorf("expected timestamp not to advance %s != %s", respH.Timestamp, txn.Timestamp)
}
}
}
// TestReplicaNoTimestampIncrementWithinTxn verifies that successive
// read and write commands within the same transaction do not cause
// the write to receive an incremented timestamp.
func TestReplicaNoTimestampIncrementWithinTxn(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Test for both read & write attempts.
key := roachpb.Key("a")
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
// Start with a read to warm the timestamp cache.
gArgs := getArgs(key)
if _, pErr := tc.SendWrappedWith(roachpb.Header{
Txn: txn,
}, &gArgs); pErr != nil {
t.Fatal(pErr)
}
// Now try a write and verify timestamp isn't incremented.
pArgs := putArgs(key, []byte("value"))
txn.Sequence++
_, respH, pErr := SendWrapped(tc.Sender(), context.Background(), roachpb.Header{Txn: txn}, &pArgs)
if pErr != nil {
t.Fatal(pErr)
}
if !respH.Txn.Timestamp.Equal(txn.Timestamp) {
t.Errorf("expected timestamp to remain %s; got %s", txn.Timestamp, respH.Timestamp)
}
// Resolve the intent.
rArgs := &roachpb.ResolveIntentRequest{
Span: pArgs.Header(),
IntentTxn: txn.TxnMeta,
Status: roachpb.COMMITTED,
}
txn.Sequence++
if _, pErr = tc.SendWrappedWith(roachpb.Header{Timestamp: txn.Timestamp}, rArgs); pErr != nil {
t.Fatal(pErr)
}
// Finally, try a non-transactional write and verify timestamp is incremented.
ts := txn.Timestamp
expTS := ts
expTS.Logical++
_, respH, pErr = SendWrapped(tc.Sender(), context.Background(), roachpb.Header{Timestamp: ts}, &pArgs)
if pErr != nil {
t.Errorf("unexpected pError: %s", pErr)
}
if !respH.Timestamp.Equal(expTS) {
t.Errorf("expected timestamp to increment to %s; got %s", expTS, respH.Timestamp)
}
}
// TestReplicaAbortCacheReadError verifies that an error is returned
// to the client in the event that a abort cache entry is found but is
// not decodable.
func TestReplicaAbortCacheReadError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
k := []byte("a")
txn := newTransaction("test", k, 10, enginepb.SERIALIZABLE, tc.clock)
args := incrementArgs(k, 1)
txn.Sequence = 1
if _, pErr := tc.SendWrappedWith(roachpb.Header{
Txn: txn,
}, &args); pErr != nil {
t.Fatal(pErr)
}
// Overwrite Abort cache entry with garbage for the last op.
key := keys.AbortCacheKey(tc.rng.RangeID, txn.ID)
err := engine.MVCCPut(context.Background(), tc.engine, nil, key, hlc.ZeroTimestamp, roachpb.MakeValueFromString("never read in this test"), nil)
if err != nil {
t.Fatal(err)
}
// Now try increment again and verify error.
_, pErr := tc.SendWrappedWith(roachpb.Header{
Txn: txn,
}, &args)
if !testutils.IsPError(pErr, "replica corruption") {
t.Fatal(pErr)
}
}
// TestReplicaAbortCacheStoredTxnRetryError verifies that if a cached
// entry is present, a transaction restart error is returned.
func TestReplicaAbortCacheStoredTxnRetryError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := []byte("a")
{
txn := newTransaction("test", key, 10, enginepb.SERIALIZABLE, tc.clock)
txn.Sequence = int32(1)
entry := roachpb.AbortCacheEntry{
Key: txn.Key,
Timestamp: txn.Timestamp,
Priority: 0,
}
if err := tc.rng.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil {
t.Fatal(err)
}
args := incrementArgs(key, 1)
_, pErr := tc.SendWrappedWith(roachpb.Header{
Txn: txn,
}, &args)
if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); !ok {
t.Fatalf("unexpected error %v", pErr)
}
}
// Try the same again, this time verifying that the Put will actually
// populate the cache appropriately.
txn := newTransaction("test", key, 10, enginepb.SERIALIZABLE, tc.clock)
txn.Sequence = 321
args := incrementArgs(key, 1)
try := func() *roachpb.Error {
_, pErr := tc.SendWrappedWith(roachpb.Header{
Txn: txn,
}, &args)
return pErr
}
if pErr := try(); pErr != nil {
t.Fatal(pErr)
}
txn.Timestamp.Forward(txn.Timestamp.Add(10, 10)) // can't hurt
{
pErr := try()
if _, ok := pErr.GetDetail().(*roachpb.TransactionRetryError); !ok {
t.Fatal(pErr)
}
}
// Pretend we restarted by increasing the epoch. That's all that's needed.
txn.Epoch++
txn.Sequence++
if pErr := try(); pErr != nil {
t.Fatal(pErr)
}
// Now increase the sequence as well. Still good to go.
txn.Sequence++
if pErr := try(); pErr != nil {
t.Fatal(pErr)
}
}
// TestTransactionRetryLeavesIntents sets up a transaction retry event
// and verifies that the intents which were written as part of a
// single batch are left in place despite the failed end transaction.
func TestTransactionRetryLeavesIntents(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("a")
pushee := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee.Priority = 1
pusher.Priority = 2 // pusher will win
// Read from the key to increment the timestamp cache.
gArgs := getArgs(key)
if _, pErr := tc.SendWrapped(&gArgs); pErr != nil {
t.Fatal(pErr)
}
// Begin txn, write to key (with now-higher timestamp), and attempt to
// commit the txn, which should result in a retryable error.
btArgs, _ := beginTxnArgs(key, pushee)
pArgs := putArgs(key, []byte("foo"))
etArgs, _ := endTxnArgs(pushee, true /* commit */)
var ba roachpb.BatchRequest
ba.Header.Txn = pushee
ba.Add(&btArgs)
ba.Add(&pArgs)
ba.Add(&etArgs)
_, pErr := tc.Sender().Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.TransactionRetryError); !ok {
t.Fatalf("expected retry error; got %s", pErr)
}
// Now verify that the intent was still written for key.
_, pErr = tc.SendWrapped(&gArgs)
if _, ok := pErr.GetDetail().(*roachpb.WriteIntentError); !ok {
t.Fatalf("expected write intent error; got %s", pErr)
}
}
// TestReplicaAbortCacheOnlyWithIntent verifies that a transactional command
// which goes through Raft but is not a transactional write (i.e. does not
// leave intents) passes the abort cache unhindered.
func TestReplicaAbortCacheOnlyWithIntent(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
txn := newTransaction("test", []byte("test"), 10, enginepb.SERIALIZABLE, tc.clock)
txn.Sequence = 100
entry := roachpb.AbortCacheEntry{
Key: txn.Key,
Timestamp: txn.Timestamp,
Priority: 0,
}
if err := tc.rng.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil {
t.Fatal(err)
}
args, h := heartbeatArgs(txn)
// If the abort cache were active for this request, we'd catch a txn retry.
// Instead, we expect the error from heartbeating a nonexistent txn.
if _, pErr := tc.SendWrappedWith(h, &args); !testutils.IsPError(pErr, "record not present") {
t.Fatal(pErr)
}
}
// TestEndTransactionDeadline verifies that EndTransaction respects the
// transaction deadline.
func TestEndTransactionDeadline(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// 4 cases: no deadline, past deadline, equal deadline, future deadline.
for i := 0; i < 4; i++ {
key := roachpb.Key("key: " + strconv.Itoa(i))
txn := newTransaction("txn: "+strconv.Itoa(i), key, 1, enginepb.SERIALIZABLE, tc.clock)
put := putArgs(key, key)
_, header := beginTxnArgs(key, txn)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), header, &put); pErr != nil {
t.Fatal(pErr)
}
txn.Writing = true
etArgs, etHeader := endTxnArgs(txn, true /* commit */)
switch i {
case 0:
// No deadline.
case 1:
// Past deadline.
ts := txn.Timestamp.Prev()
etArgs.Deadline = &ts
case 2:
// Equal deadline.
etArgs.Deadline = &txn.Timestamp
case 3:
// Future deadline.
ts := txn.Timestamp.Next()
etArgs.Deadline = &ts
}
{
txn.Sequence++
_, pErr := tc.SendWrappedWith(etHeader, &etArgs)
switch i {
case 0:
// No deadline.
if pErr != nil {
t.Error(pErr)
}
case 1:
// Past deadline.
if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); !ok {
t.Errorf("expected TransactionAbortedError but got %T: %s", pErr, pErr)
}
case 2:
// Equal deadline.
if pErr != nil {
t.Error(pErr)
}
case 3:
// Future deadline.
if pErr != nil {
t.Error(pErr)
}
}
}
}
}
// TestEndTransactionDeadline_1PC verifies that a transaction that
// exceeded its deadline will be aborted even when one phase commit is
// applicable.
func TestEndTransactionDeadline_1PC(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("a")
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
bt, _ := beginTxnArgs(key, txn)
put := putArgs(key, []byte("value"))
et, etH := endTxnArgs(txn, true)
// Past deadline.
ts := txn.Timestamp.Prev()
et.Deadline = &ts
var ba roachpb.BatchRequest
ba.Header = etH
ba.Add(&bt, &put, &et)
_, pErr := tc.Sender().Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); !ok {
t.Errorf("expected TransactionAbortedError but got %T: %s", pErr, pErr)
}
}
// TestEndTransactionWithMalformedSplitTrigger verifies an
// EndTransaction call with a malformed commit trigger fails.
func TestEndTransactionWithMalformedSplitTrigger(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("foo")
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pArgs := putArgs(key, []byte("only here to make this a rw transaction"))
txn.Sequence++
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), roachpb.Header{
Txn: txn,
}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
txn.Writing = true
args, h := endTxnArgs(txn, true /* commit */)
// Make an EndTransaction request which would fail if not
// stripped. In this case, we set the start key to "bar" for a
// split of the default range; start key must be "" in this case.
args.InternalCommitTrigger = &roachpb.InternalCommitTrigger{
SplitTrigger: &roachpb.SplitTrigger{
LeftDesc: roachpb.RangeDescriptor{StartKey: roachpb.RKey("bar")},
},
}
txn.Sequence++
if _, pErr := tc.SendWrappedWith(h, &args); !testutils.IsPError(pErr, "range does not match splits") {
t.Errorf("expected range does not match splits error; got %s", pErr)
}
}
// TestEndTransactionBeforeHeartbeat verifies that a transaction
// can be committed/aborted before being heartbeat.
func TestEndTransactionBeforeHeartbeat(t *testing.T) {
defer leaktest.AfterTest(t)()
// Don't automatically GC the Txn record: We want to heartbeat the
// committed Transaction and compare it against our expectations.
// When it's removed, the heartbeat would recreate it.
defer setTxnAutoGC(false)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := []byte("a")
for _, commit := range []bool{true, false} {
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
_, btH := beginTxnArgs(key, txn)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
txn.Sequence++
txn.Writing = true
args, h := endTxnArgs(txn, commit)
resp, pErr := tc.SendWrappedWith(h, &args)
if pErr != nil {
t.Error(pErr)
}
reply := resp.(*roachpb.EndTransactionResponse)
expStatus := roachpb.COMMITTED
if !commit {
expStatus = roachpb.ABORTED
}
if reply.Txn.Status != expStatus {
t.Errorf("expected transaction status to be %s; got %s", expStatus, reply.Txn.Status)
}
// Try a heartbeat to the already-committed transaction; should get
// committed txn back, but without last heartbeat timestamp set.
txn.Epoch++ // need to fake a higher epoch to sneak past sequence cache
txn.Sequence++
hBA, h := heartbeatArgs(txn)
resp, pErr = tc.SendWrappedWith(h, &hBA)
if pErr != nil {
t.Error(pErr)
}
hBR := resp.(*roachpb.HeartbeatTxnResponse)
if hBR.Txn.Status != expStatus || hBR.Txn.LastHeartbeat != nil {
t.Errorf("unexpected heartbeat reply contents: %+v", hBR)
}
key = roachpb.Key(key).Next()
}
}
// TestEndTransactionAfterHeartbeat verifies that a transaction
// can be committed/aborted after being heartbeat.
func TestEndTransactionAfterHeartbeat(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("a")
for _, commit := range []bool{true, false} {
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
_, btH := beginTxnArgs(key, txn)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// Start out with a heartbeat to the transaction.
hBA, h := heartbeatArgs(txn)
txn.Sequence++
resp, pErr := tc.SendWrappedWith(h, &hBA)
if pErr != nil {
t.Fatal(pErr)
}
hBR := resp.(*roachpb.HeartbeatTxnResponse)
if hBR.Txn.Status != roachpb.PENDING || hBR.Txn.LastHeartbeat == nil {
t.Errorf("unexpected heartbeat reply contents: %+v", hBR)
}
args, h := endTxnArgs(txn, commit)
txn.Sequence++
resp, pErr = tc.SendWrappedWith(h, &args)
if pErr != nil {
t.Error(pErr)
}
reply := resp.(*roachpb.EndTransactionResponse)
expStatus := roachpb.COMMITTED
if !commit {
expStatus = roachpb.ABORTED
}
if reply.Txn.Status != expStatus {
t.Errorf("expected transaction status to be %s; got %s", expStatus, reply.Txn.Status)
}
if reply.Txn.LastHeartbeat == nil || !reply.Txn.LastHeartbeat.Equal(*hBR.Txn.LastHeartbeat) {
t.Errorf("expected heartbeats to remain equal: %+v != %+v",
reply.Txn.LastHeartbeat, hBR.Txn.LastHeartbeat)
}
key = key.Next()
}
}
// TestEndTransactionWithPushedTimestamp verifies that txn can be
// ended (both commit or abort) correctly when the commit timestamp is
// greater than the transaction timestamp, depending on the isolation
// level.
func TestEndTransactionWithPushedTimestamp(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
testCases := []struct {
commit bool
isolation enginepb.IsolationType
expErr bool
}{
{true, enginepb.SERIALIZABLE, true},
{true, enginepb.SNAPSHOT, false},
{false, enginepb.SERIALIZABLE, false},
{false, enginepb.SNAPSHOT, false},
}
key := roachpb.Key("a")
for i, test := range testCases {
pushee := newTransaction("pushee", key, 1, test.isolation, tc.clock)
pusher := newTransaction("pusher", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee.Priority = 1
pusher.Priority = 2 // pusher will win
_, btH := beginTxnArgs(key, pushee)
put := putArgs(key, []byte("value"))
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// Push pushee txn.
pushTxn := pushTxnArgs(pusher, pushee, roachpb.PUSH_TIMESTAMP)
pushTxn.Key = pusher.Key
if _, pErr := tc.SendWrapped(&pushTxn); pErr != nil {
t.Error(pErr)
}
// End the transaction with args timestamp moved forward in time.
endTxn, h := endTxnArgs(pushee, test.commit)
pushee.Sequence++
resp, pErr := tc.SendWrappedWith(h, &endTxn)
if test.expErr {
if _, ok := pErr.GetDetail().(*roachpb.TransactionRetryError); !ok {
t.Errorf("%d: expected retry error; got %s", i, pErr)
}
} else {
if pErr != nil {
t.Errorf("%d: unexpected error: %s", i, pErr)
}
expStatus := roachpb.COMMITTED
if !test.commit {
expStatus = roachpb.ABORTED
}
reply := resp.(*roachpb.EndTransactionResponse)
if reply.Txn.Status != expStatus {
t.Errorf("%d: expected transaction status to be %s; got %s", i, expStatus, reply.Txn.Status)
}
}
key = key.Next()
}
}
// TestEndTransactionWithIncrementedEpoch verifies that txn ended with
// a higher epoch (and priority) correctly assumes the higher epoch.
func TestEndTransactionWithIncrementedEpoch(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := []byte("a")
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
_, btH := beginTxnArgs(key, txn)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
txn.Writing = true
// Start out with a heartbeat to the transaction.
hBA, h := heartbeatArgs(txn)
txn.Sequence++
_, pErr := tc.SendWrappedWith(h, &hBA)
if pErr != nil {
t.Error(pErr)
}
// Now end the txn with increased epoch and priority.
args, h := endTxnArgs(txn, true)
h.Txn.Epoch = txn.Epoch + 1
h.Txn.Priority = txn.Priority + 1
txn.Sequence++
resp, pErr := tc.SendWrappedWith(h, &args)
if pErr != nil {
t.Error(pErr)
}
reply := resp.(*roachpb.EndTransactionResponse)
if reply.Txn.Status != roachpb.COMMITTED {
t.Errorf("expected transaction status to be COMMITTED; got %s", reply.Txn.Status)
}
if reply.Txn.Epoch != txn.Epoch {
t.Errorf("expected epoch to equal %d; got %d", txn.Epoch, reply.Txn.Epoch)
}
if reply.Txn.Priority != txn.Priority {
t.Errorf("expected priority to equal %d; got %d", txn.Priority, reply.Txn.Priority)
}
}
// TestEndTransactionWithErrors verifies various error conditions
// are checked such as transaction already being committed or
// aborted, or timestamp or epoch regression.
func TestEndTransactionWithErrors(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
regressTS := tc.clock.Now()
tc.manualClock.Set(1)
txn := newTransaction("test", roachpb.Key(""), 1, enginepb.SERIALIZABLE, tc.clock)
doesNotExist := roachpb.TransactionStatus(-1)
testCases := []struct {
key roachpb.Key
existStatus roachpb.TransactionStatus
existEpoch uint32
existTS hlc.Timestamp
expErrRegexp string
}{
{roachpb.Key("a"), doesNotExist, txn.Epoch, txn.Timestamp, "does not exist"},
{roachpb.Key("a"), roachpb.COMMITTED, txn.Epoch, txn.Timestamp, "txn \"test\" id=.*: already committed"},
{roachpb.Key("b"), roachpb.ABORTED, txn.Epoch, txn.Timestamp, "txn aborted \"test\" id=.*"},
{roachpb.Key("c"), roachpb.PENDING, txn.Epoch + 1, txn.Timestamp, "txn \"test\" id=.*: epoch regression: 0"},
{roachpb.Key("d"), roachpb.PENDING, txn.Epoch, regressTS, `txn "test" id=.*: timestamp regression: 0.000000001,\d+`},
}
for i, test := range testCases {
// Establish existing txn state by writing directly to range engine.
existTxn := txn.Clone()
existTxn.Key = test.key
existTxn.Status = test.existStatus
existTxn.Epoch = test.existEpoch
existTxn.Timestamp = test.existTS
txnKey := keys.TransactionKey(test.key, txn.ID)
if test.existStatus != doesNotExist {
if err := engine.MVCCPutProto(context.Background(), tc.rng.store.Engine(), nil, txnKey, hlc.ZeroTimestamp,
nil, &existTxn); err != nil {
t.Fatal(err)
}
}
// End the transaction, verify expected error.
txn.Key = test.key
args, h := endTxnArgs(txn, true)
txn.Sequence++
if _, pErr := tc.SendWrappedWith(h, &args); !testutils.IsPError(pErr, test.expErrRegexp) {
t.Errorf("%d: expected error:\n%s\not match:\n%s", i, pErr, test.expErrRegexp)
} else if txn := pErr.GetTxn(); txn != nil && txn.ID == nil {
// Prevent regression of #5591.
t.Fatalf("%d: received empty Transaction proto in error", i)
}
}
}
// TestEndTransactionRollbackAbortedTransaction verifies that no error
// is returned when a transaction that has already been aborted is
// rolled back by an EndTransactionRequest.
func TestEndTransactionRollbackAbortedTransaction(t *testing.T) {
defer leaktest.AfterTest(t)()
defer setTxnAutoGC(false)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := []byte("a")
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
_, btH := beginTxnArgs(key, txn)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// Abort the transaction by pushing it with a higher priority.
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pusher.Priority = txn.Priority + 1 // will push successfully
pushArgs := pushTxnArgs(pusher, btH.Txn, roachpb.PUSH_ABORT)
if _, pErr := tc.SendWrapped(&pushArgs); pErr != nil {
t.Fatal(pErr)
}
// Check if the intent has not yet been resolved.
var ba roachpb.BatchRequest
gArgs := getArgs(key)
ba.Add(&gArgs)
if err := ba.SetActiveTimestamp(tc.clock.Now); err != nil {
t.Fatal(err)
}
_, pErr := tc.Sender().Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.WriteIntentError); !ok {
t.Errorf("expected write intent error, but got %s", pErr)
}
// Abort the transaction again. No error is returned.
args, h := endTxnArgs(txn, false)
args.IntentSpans = []roachpb.Span{{Key: key}}
resp, pErr := tc.SendWrappedWith(h, &args)
if pErr != nil {
t.Error(pErr)
}
reply := resp.(*roachpb.EndTransactionResponse)
if reply.Txn.Status != roachpb.ABORTED {
t.Errorf("expected transaction status to be ABORTED; got %s", reply.Txn.Status)
}
// Verify that the intent has been resolved.
if _, pErr := tc.Sender().Send(context.Background(), ba); pErr != nil {
t.Errorf("expected resolved intent, but got %s", pErr)
}
}
// TestRaftReplayProtection verifies that non-transactional batches
// enjoy some protection from raft replays, but highlights an example
// where they won't.
func TestRaftReplayProtection(t *testing.T) {
defer leaktest.AfterTest(t)()
defer setTxnAutoGC(true)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("a")
incs := []int64{1, 3, 7}
sum := 2 * incs[0]
for _, n := range incs[1:] {
sum += n
}
{
// Start with an increment for key.
incArgs := incrementArgs(key, incs[0])
_, respH, pErr := SendWrapped(tc.Sender(), context.Background(), roachpb.Header{}, &incArgs)
if pErr != nil {
t.Fatal(pErr)
}
// Do an increment with timestamp to an earlier timestamp, but same key.
// This will bump up to a higher timestamp than the original increment
// and not surface a WriteTooOldError.
h := roachpb.Header{Timestamp: respH.Timestamp.Prev()}
_, respH, pErr = SendWrapped(tc.Sender(), context.Background(), h, &incArgs)
if pErr != nil {
t.Fatalf("unexpected error: %s", respH)
}
if expTS := h.Timestamp.Next().Next(); !respH.Timestamp.Equal(expTS) {
t.Fatalf("expected too-old increment to advance two logical ticks to %s; got %s", expTS, respH.Timestamp)
}
// Do an increment with exact timestamp; should propagate write too
// old error. This is assumed to be a replay because the timestamp
// encountered is an exact duplicate and nothing came before the
// increment in the batch.
h.Timestamp = respH.Timestamp
_, _, pErr = SendWrapped(tc.Sender(), context.Background(), h, &incArgs)
if _, ok := pErr.GetDetail().(*roachpb.WriteTooOldError); !ok {
t.Fatalf("expected WriteTooOldError; got %s", pErr)
}
}
// Send a double increment in a batch. This should increment twice,
// as the same key is being incremented in the same batch.
var ba roachpb.BatchRequest
for _, inc := range incs[1:] {
incArgs := incrementArgs(key, inc)
ba.Add(&incArgs)
}
br, pErr := tc.Sender().Send(context.Background(), ba)
if pErr != nil {
t.Fatalf("unexpected error: %s", pErr)
}
if latest := br.Responses[len(br.Responses)-1].GetInner().(*roachpb.IncrementResponse).NewValue; latest != sum {
t.Fatalf("expected %d, got %d", sum, latest)
}
// Now resend the batch with the same timestamp; this should look
// like the replay it is and surface a WriteTooOldError.
ba.Timestamp = br.Timestamp
_, pErr = tc.Sender().Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.WriteTooOldError); !ok {
t.Fatalf("expected WriteTooOldError; got %s", pErr)
}
// Send a DeleteRange & increment.
incArgs := incrementArgs(key, 1)
ba = roachpb.BatchRequest{}
ba.Add(roachpb.NewDeleteRange(key, key.Next(), false))
ba.Add(&incArgs)
br, pErr = tc.Sender().Send(context.Background(), ba)
if pErr != nil {
t.Fatalf("unexpected error: %s", pErr)
}
// Send exact same batch; the DeleteRange should trip up and
// we'll get a replay error.
ba.Timestamp = br.Timestamp
_, pErr = tc.Sender().Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.WriteTooOldError); !ok {
t.Fatalf("expected WriteTooOldError; got %s", pErr)
}
// Send just a DeleteRange batch.
ba = roachpb.BatchRequest{}
ba.Add(roachpb.NewDeleteRange(key, key.Next(), false))
br, pErr = tc.Sender().Send(context.Background(), ba)
if pErr != nil {
t.Fatalf("unexpected error: %s", pErr)
}
// Now send it again; will not look like a replay because the
// previous DeleteRange didn't leave any tombstones at this
// timestamp for the replay to "trip" over.
ba.Timestamp = br.Timestamp
_, pErr = tc.Sender().Send(context.Background(), ba)
if pErr != nil {
t.Fatalf("unexpected error: %s", pErr)
}
}
// TestRaftReplayProtectionInTxn verifies that transactional batches
// enjoy protection from raft replays.
func TestRaftReplayProtectionInTxn(t *testing.T) {
defer leaktest.AfterTest(t)()
defer setTxnAutoGC(true)()
ctx := TestStoreContext()
tc := testContext{}
tc.StartWithStoreContext(t, ctx)
defer tc.Stop()
key := roachpb.Key("a")
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
// Send a batch with begin txn, put & end txn.
var ba roachpb.BatchRequest
bt, btH := beginTxnArgs(key, txn)
put := putArgs(key, []byte("value"))
et, _ := endTxnArgs(txn, true)
et.IntentSpans = []roachpb.Span{{Key: key, EndKey: nil}}
ba.Header = btH
ba.Add(&bt)
ba.Add(&put)
ba.Add(&et)
_, pErr := tc.Sender().Send(context.Background(), ba)
if pErr != nil {
t.Fatalf("unexpected error: %s", pErr)
}
for i := 0; i < 2; i++ {
// Reach in and manually send to raft (to simulate Raft replay) and
// also avoid updating the timestamp cache; verify WriteTooOldError.
ba.Timestamp = txn.OrigTimestamp
ch, _, err := tc.rng.proposeRaftCommand(context.Background(), ba)
if err != nil {
t.Fatalf("%d: unexpected error: %s", i, err)
}
respWithErr := <-ch
if _, ok := respWithErr.Err.GetDetail().(*roachpb.WriteTooOldError); !ok {
t.Fatalf("%d: expected WriteTooOldError; got %s", i, respWithErr.Err)
}
}
}
// TestReplicaLaziness verifies that Raft Groups are brought up lazily.
func TestReplicaLaziness(t *testing.T) {
defer leaktest.AfterTest(t)()
// testWithAction is a function that creates an uninitialized Raft group,
// calls the supplied function, and then tests that the Raft group is
// initialized.
testWithAction := func(action func() roachpb.Request) {
tc := testContext{bootstrapMode: bootstrapRangeOnly}
tc.Start(t)
defer tc.Stop()
if status := tc.rng.RaftStatus(); status != nil {
t.Fatalf("expected raft group to not be initialized, got RaftStatus() of %v", status)
}
var ba roachpb.BatchRequest
request := action()
ba.Add(request)
if _, pErr := tc.Sender().Send(context.Background(), ba); pErr != nil {
t.Fatalf("unexpected error: %s", pErr)
}
if tc.rng.RaftStatus() == nil {
t.Fatalf("expected raft group to be initialized")
}
}
testWithAction(func() roachpb.Request {
put := putArgs(roachpb.Key("a"), []byte("value"))
return &put
})
testWithAction(func() roachpb.Request {
get := getArgs(roachpb.Key("a"))
return &get
})
testWithAction(func() roachpb.Request {
scan := scanArgs(roachpb.KeyMin, roachpb.KeyMax)
return &scan
})
}
// TestReplayProtection verifies that transactional replays cannot
// commit intents. The replay consists of an initial BeginTxn/Write
// batch and ends with an EndTxn batch.
func TestReplayProtection(t *testing.T) {
defer leaktest.AfterTest(t)()
defer setTxnAutoGC(true)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
for i, iso := range []enginepb.IsolationType{enginepb.SERIALIZABLE, enginepb.SNAPSHOT} {
key := roachpb.Key(fmt.Sprintf("a-%d", i))
keyB := roachpb.Key(fmt.Sprintf("b-%d", i))
txn := newTransaction("test", key, 1, iso, tc.clock)
// Send a batch with put to key.
var ba roachpb.BatchRequest
bt, btH := beginTxnArgs(key, txn)
put := putArgs(key, []byte("value"))
ba.Header = btH
ba.Add(&bt)
ba.Add(&put)
if err := ba.SetActiveTimestamp(tc.clock.Now); err != nil {
t.Fatal(err)
}
br, pErr := tc.Sender().Send(context.Background(), ba)
if pErr != nil {
t.Fatalf("%d: unexpected error: %s", i, pErr)
}
// Send a put for keyB.
putB := putArgs(keyB, []byte("value"))
putTxn := br.Txn.Clone()
putTxn.Sequence++
_, respH, pErr := SendWrapped(tc.Sender(), context.Background(), roachpb.Header{Txn: &putTxn}, &putB)
if pErr != nil {
t.Fatal(pErr)
}
// EndTransaction.
etTxn := respH.Txn.Clone()
etTxn.Sequence++
et, etH := endTxnArgs(&etTxn, true)
et.IntentSpans = []roachpb.Span{{Key: key, EndKey: nil}, {Key: keyB, EndKey: nil}}
if _, pErr := tc.SendWrappedWith(etH, &et); pErr != nil {
t.Fatalf("%d: unexpected error: %s", i, pErr)
}
// Verify txn record is cleaned.
var readTxn roachpb.Transaction
txnKey := keys.TransactionKey(txn.Key, txn.ID)
ok, err := engine.MVCCGetProto(context.Background(), tc.rng.store.Engine(), txnKey, hlc.ZeroTimestamp, true /* consistent */, nil /* txn */, &readTxn)
if err != nil || ok {
t.Errorf("%d: expected transaction record to be cleared (%t): %s", i, ok, err)
}
// Now replay begin & put. BeginTransaction should fail with a replay error.
_, pErr = tc.Sender().Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.TransactionReplayError); !ok {
t.Errorf("%d: expected transaction replay for iso=%s; got %s", i, iso, pErr)
}
// Intent should not have been created.
gArgs := getArgs(key)
if _, pErr = tc.SendWrapped(&gArgs); pErr != nil {
t.Errorf("%d: unexpected error reading key: %s", i, pErr)
}
// Send a put for keyB; should fail with a WriteTooOldError as this
// will look like an obvious replay.
_, _, pErr = SendWrapped(tc.Sender(), context.Background(), roachpb.Header{Txn: &putTxn}, &putB)
if _, ok := pErr.GetDetail().(*roachpb.WriteTooOldError); !ok {
t.Errorf("%d: expected write too old error for iso=%s; got %s", i, iso, pErr)
}
// EndTransaction should also fail, but with a status error (does not exist).
_, pErr = tc.SendWrappedWith(etH, &et)
if _, ok := pErr.GetDetail().(*roachpb.TransactionStatusError); !ok {
t.Errorf("%d: expected transaction aborted for iso=%s; got %s", i, iso, pErr)
}
// Expect that keyB intent did not get written!
gArgs = getArgs(keyB)
if _, pErr = tc.SendWrapped(&gArgs); pErr != nil {
t.Errorf("%d: unexpected error reading keyB: %s", i, pErr)
}
}
}
// TestEndTransactionGC verifies that a transaction record is immediately
// garbage-collected upon EndTransaction iff all of the supplied intents are
// local relative to the transaction record's location.
func TestEndTransactionLocalGC(t *testing.T) {
defer leaktest.AfterTest(t)()
defer setTxnAutoGC(true)()
tc := testContext{}
tsc := TestStoreContext()
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
// Make sure the direct GC path doesn't interfere with this test.
if filterArgs.Req.Method() == roachpb.GC {
return roachpb.NewErrorWithTxn(errors.Errorf("boom"), filterArgs.Hdr.Txn)
}
return nil
}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
splitKey := roachpb.RKey("c")
splitTestRange(tc.store, splitKey, splitKey, t)
key := roachpb.Key("a")
putKey := key
for i, test := range []struct {
intents []roachpb.Span
expGC bool
}{
// Range inside.
{[]roachpb.Span{{Key: roachpb.Key("a"), EndKey: roachpb.Key("b")}}, true},
// Two intents inside.
{[]roachpb.Span{{Key: roachpb.Key("a")}, {Key: roachpb.Key("b")}}, true},
// Intent range spilling over right endpoint.
{[]roachpb.Span{{Key: roachpb.Key("a"), EndKey: splitKey.Next().AsRawKey()}}, false},
// Intent range completely outside.
{[]roachpb.Span{{Key: splitKey.AsRawKey(), EndKey: roachpb.Key("q")}}, false},
// Intent inside and outside.
{[]roachpb.Span{{Key: roachpb.Key("a")}, {Key: splitKey.AsRawKey()}}, false},
} {
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
_, btH := beginTxnArgs(key, txn)
put := putArgs(putKey, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
putKey = putKey.Next() // for the next iteration
args, h := endTxnArgs(txn, true)
args.IntentSpans = test.intents
txn.Sequence++
if _, pErr := tc.SendWrappedWith(h, &args); pErr != nil {
t.Fatal(pErr)
}
var readTxn roachpb.Transaction
txnKey := keys.TransactionKey(txn.Key, txn.ID)
ok, err := engine.MVCCGetProto(context.Background(), tc.rng.store.Engine(), txnKey, hlc.ZeroTimestamp,
true /* consistent */, nil /* txn */, &readTxn)
if err != nil {
t.Fatal(err)
}
if !ok != test.expGC {
t.Errorf("%d: unexpected gc'ed: %t", i, !ok)
}
}
}
func setupResolutionTest(t *testing.T, tc testContext, key roachpb.Key,
splitKey roachpb.RKey, commit bool) (*Replica, *roachpb.Transaction) {
// Split the range and create an intent at splitKey and key.
newRng := splitTestRange(tc.store, splitKey, splitKey, t)
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
// These increments are not required, but testing feels safer when zero
// values are unexpected.
txn.Sequence++
txn.Epoch++
pArgs := putArgs(key, []byte("value"))
h := roachpb.Header{Txn: txn}
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), h, &pArgs); pErr != nil {
t.Fatal(pErr)
}
{
var ba roachpb.BatchRequest
ba.Header = h
if err := ba.SetActiveTimestamp(newRng.store.Clock().Now); err != nil {
t.Fatal(err)
}
pArgs := putArgs(splitKey.AsRawKey(), []byte("value"))
ba.Add(&pArgs)
txn.Sequence++
if _, pErr := newRng.Send(context.Background(), ba); pErr != nil {
t.Fatal(pErr)
}
}
// End the transaction and resolve the intents.
args, h := endTxnArgs(txn, commit)
args.IntentSpans = []roachpb.Span{{Key: key, EndKey: splitKey.Next().AsRawKey()}}
txn.Sequence++
if _, pErr := tc.SendWrappedWith(h, &args); pErr != nil {
t.Fatal(pErr)
}
return newRng, txn
}
// TestEndTransactionResolveOnlyLocalIntents verifies that an end transaction
// request resolves only local intents within the same batch.
func TestEndTransactionResolveOnlyLocalIntents(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tsc := TestStoreContext()
key := roachpb.Key("a")
splitKey := roachpb.RKey(key).Next()
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if filterArgs.Req.Method() == roachpb.ResolveIntentRange &&
filterArgs.Req.Header().Key.Equal(splitKey.AsRawKey()) {
return roachpb.NewErrorWithTxn(errors.Errorf("boom"), filterArgs.Hdr.Txn)
}
return nil
}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
newRng, txn := setupResolutionTest(t, tc, key, splitKey, true /* commit */)
// Check if the intent in the other range has not yet been resolved.
{
var ba roachpb.BatchRequest
gArgs := getArgs(splitKey)
ba.Add(&gArgs)
if err := ba.SetActiveTimestamp(tc.clock.Now); err != nil {
t.Fatal(err)
}
_, pErr := newRng.Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.WriteIntentError); !ok {
t.Errorf("expected write intent error, but got %s", pErr)
}
}
txn.Sequence++
hbArgs, h := heartbeatArgs(txn)
reply, pErr := tc.SendWrappedWith(h, &hbArgs)
if pErr != nil {
t.Fatal(pErr)
}
hbResp := reply.(*roachpb.HeartbeatTxnResponse)
expIntents := []roachpb.Span{{Key: splitKey.AsRawKey(), EndKey: splitKey.AsRawKey().Next()}}
if !reflect.DeepEqual(hbResp.Txn.Intents, expIntents) {
t.Fatalf("expected persisted intents %v, got %v",
expIntents, hbResp.Txn.Intents)
}
}
// TestEndTransactionDirectGC verifies that after successfully resolving the
// external intents of a transaction after EndTransaction, the transaction and
// abort cache records are purged on both the local range and non-local range.
func TestEndTransactionDirectGC(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
key := roachpb.Key("a")
splitKey := roachpb.RKey(key).Next()
tc.Start(t)
defer tc.Stop()
rightRng, txn := setupResolutionTest(t, tc, key, splitKey, false /* generate abort cache entry */)
util.SucceedsSoon(t, func() error {
if gr, _, err := tc.rng.Get(context.Background(), tc.engine, roachpb.Header{}, roachpb.GetRequest{Span: roachpb.Span{Key: keys.TransactionKey(txn.Key, txn.ID)}}); err != nil {
return err
} else if gr.Value != nil {
return errors.Errorf("txn entry still there: %+v", gr)
}
var entry roachpb.AbortCacheEntry
if aborted, err := tc.rng.abortCache.Get(context.Background(), tc.engine, txn.ID, &entry); err != nil {
t.Fatal(err)
} else if aborted {
return errors.Errorf("abort cache still populated: %v", entry)
}
if aborted, err := rightRng.abortCache.Get(context.Background(), tc.engine, txn.ID, &entry); err != nil {
t.Fatal(err)
} else if aborted {
t.Fatalf("right-hand side abort cache still populated: %v", entry)
}
return nil
})
}
// TestEndTransactionDirectGCFailure verifies that no immediate GC takes place
// if external intents can't be resolved (see also TestEndTransactionDirectGC).
func TestEndTransactionDirectGCFailure(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
key := roachpb.Key("a")
splitKey := roachpb.RKey(key).Next()
var count int64
tsc := TestStoreContext()
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if filterArgs.Req.Method() == roachpb.ResolveIntentRange &&
filterArgs.Req.Header().Key.Equal(splitKey.AsRawKey()) {
atomic.AddInt64(&count, 1)
return roachpb.NewErrorWithTxn(errors.Errorf("boom"), filterArgs.Hdr.Txn)
} else if filterArgs.Req.Method() == roachpb.GC {
t.Fatalf("unexpected GCRequest: %+v", filterArgs.Req)
}
return nil
}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
setupResolutionTest(t, tc, key, splitKey, true /* commit */)
// Now test that no GCRequest is issued. We can't test that directly (since
// it's completely asynchronous), so we first make sure ResolveIntent
// happened and subsequently issue a bogus Put which is likely to make it
// into Raft only after a rogue GCRequest (at least sporadically), which
// would trigger a Fatal from the command filter.
util.SucceedsSoon(t, func() error {
if atomic.LoadInt64(&count) == 0 {
return errors.Errorf("intent resolution not attempted yet")
} else if err := tc.store.DB().Put("panama", "banana"); err != nil {
return err
}
return nil
})
}
// TestEndTransactionDirectGC_1PC runs a test similar to TestEndTransactionDirectGC
// for the case of a transaction which is contained in a single batch.
func TestEndTransactionDirectGC_1PC(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, commit := range []bool{true, false} {
func() {
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("a")
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
bt, _ := beginTxnArgs(key, txn)
put := putArgs(key, []byte("value"))
et, etH := endTxnArgs(txn, commit)
et.IntentSpans = []roachpb.Span{{Key: key}}
var ba roachpb.BatchRequest
ba.Header = etH
ba.Add(&bt, &put, &et)
br, err := tc.Sender().Send(context.Background(), ba)
if err != nil {
t.Fatalf("commit=%t: %s", commit, err)
}
etArgs, ok := br.Responses[len(br.Responses)-1].GetInner().(*roachpb.EndTransactionResponse)
if !ok || !etArgs.OnePhaseCommit {
t.Errorf("commit=%t: expected one phase commit", commit)
}
var entry roachpb.AbortCacheEntry
if aborted, err := tc.rng.abortCache.Get(context.Background(), tc.engine, txn.ID, &entry); err != nil {
t.Fatal(err)
} else if aborted {
t.Fatalf("commit=%t: abort cache still populated: %v", commit, entry)
}
}()
}
}
func TestReplicaResolveIntentNoWait(t *testing.T) {
defer leaktest.AfterTest(t)()
var seen int32
key := roachpb.Key("zresolveme")
tsc := TestStoreContext()
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if filterArgs.Req.Method() == roachpb.ResolveIntent &&
filterArgs.Req.Header().Key.Equal(key) {
atomic.StoreInt32(&seen, 1)
}
return nil
}
tc := testContext{}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
splitKey := roachpb.RKey("aa")
setupResolutionTest(t, tc, roachpb.Key("a") /* irrelevant */, splitKey, true /* commit */)
txn := newTransaction("name", key, 1, enginepb.SERIALIZABLE, tc.clock)
txn.Status = roachpb.COMMITTED
if pErr := tc.store.intentResolver.resolveIntents(context.Background(),
[]roachpb.Intent{{
Span: roachpb.Span{Key: key},
Txn: txn.TxnMeta,
Status: txn.Status,
}}, false /* !wait */, false /* !poison; irrelevant */); pErr != nil {
t.Fatal(pErr)
}
util.SucceedsSoon(t, func() error {
if atomic.LoadInt32(&seen) > 0 {
return nil
}
return fmt.Errorf("no intent resolution on %q so far", key)
})
}
// TestAbortCachePoisonOnResolve verifies that when an intent is
// aborted, the abort cache on the respective Range is poisoned and
// the pushee is presented with a txn abort on its next contact with
// the Range in the same epoch.
func TestSequenceCachePoisonOnResolve(t *testing.T) {
defer leaktest.AfterTest(t)()
key := roachpb.Key("a")
// Isolation of the pushee and whether we're going to abort it.
// Run the actual meat of the test, which pushes the pushee and
// checks whether we get the correct behaviour as it touches the
// Range again.
run := func(abort bool, iso enginepb.IsolationType) {
tc := testContext{}
tc.Start(t)
defer tc.Stop()
pushee := newTransaction("test", key, 1, iso, tc.clock)
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pusher.Priority = 2
pushee.Priority = 1 // pusher will win
inc := func(actor *roachpb.Transaction, k roachpb.Key) (*roachpb.IncrementResponse, *roachpb.Error) {
reply, pErr := maybeWrapWithBeginTransaction(tc.store, nil, roachpb.Header{
Txn: actor,
RangeID: 1,
}, &roachpb.IncrementRequest{Span: roachpb.Span{Key: k}, Increment: 123})
if pErr != nil {
return nil, pErr
}
actor.Writing = true
actor.Sequence++
return reply.(*roachpb.IncrementResponse), nil
}
get := func(actor *roachpb.Transaction, k roachpb.Key) *roachpb.Error {
actor.Sequence++
_, pErr := client.SendWrappedWith(tc.store, nil, roachpb.Header{
Txn: actor,
RangeID: 1,
}, &roachpb.GetRequest{Span: roachpb.Span{Key: k}})
return pErr
}
// Write an intent (this also begins the pushee's transaction).
if _, pErr := inc(pushee, key); pErr != nil {
t.Fatal(pErr)
}
// Have the pusher run into the intent. That pushes our pushee and
// resolves the intent, which in turn should poison the abort cache.
var assert func(*roachpb.Error)
if abort {
// Write/Write conflict will abort pushee.
if _, pErr := inc(pusher, key); pErr != nil {
t.Fatal(pErr)
}
assert = func(pErr *roachpb.Error) {
if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); !ok {
t.Fatalf("abort=%t, iso=%s: expected txn abort, got %s", abort, iso, pErr)
}
}
} else {
// Verify we're not poisoned.
assert = func(pErr *roachpb.Error) {
if pErr != nil {
t.Fatalf("abort=%t, iso=%s: unexpected: %s", abort, iso, pErr)
}
}
}
// Our assert should be true for any reads or writes.
pErr := get(pushee, key)
assert(pErr)
_, pErr = inc(pushee, key)
assert(pErr)
// Still poisoned (on any key on the Range).
pErr = get(pushee, key.Next())
assert(pErr)
_, pErr = inc(pushee, key.Next())
assert(pErr)
// Pretend we're coming back. Increasing the epoch on an abort should
// still fail obviously, while on no abort will succeed.
pushee.Epoch++
_, pErr = inc(pushee, roachpb.Key("b"))
assert(pErr)
}
for _, abort := range []bool{false, true} {
run(abort, enginepb.SERIALIZABLE)
run(abort, enginepb.SNAPSHOT)
}
}
// TestAbortCacheError verifies that roachpb.Errors returned by checkIfTxnAborted
// have txns that are identical to txns stored in Transaction{Retry,Aborted}Error.
func TestAbortCacheError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
txn := roachpb.Transaction{}
txn.ID = uuid.NewV4()
txn.Priority = 1
txn.Sequence = 1
txn.Timestamp = hlc.Timestamp{WallTime: 1}
key := roachpb.Key("k")
ts := txn.Timestamp.Next()
priority := int32(10)
entry := roachpb.AbortCacheEntry{
Key: key,
Timestamp: ts,
Priority: priority,
}
if err := tc.rng.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil {
t.Fatal(err)
}
pErr := tc.rng.checkIfTxnAborted(context.Background(), tc.engine, txn)
if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); ok {
expected := txn.Clone()
expected.Timestamp = txn.Timestamp
expected.Priority = priority
if pErr.GetTxn() == nil || !reflect.DeepEqual(pErr.GetTxn(), &expected) {
t.Errorf("txn does not match: %s vs. %s", pErr.GetTxn(), expected)
}
} else {
t.Errorf("unexpected error: %s", pErr)
}
}
// TestPushTxnBadKey verifies that args.Key equals args.PusheeTxn.ID.
func TestPushTxnBadKey(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
pusher := newTransaction("test", roachpb.Key("a"), 1, enginepb.SERIALIZABLE, tc.clock)
pushee := newTransaction("test", roachpb.Key("b"), 1, enginepb.SERIALIZABLE, tc.clock)
args := pushTxnArgs(pusher, pushee, roachpb.PUSH_ABORT)
args.Key = pusher.Key
if _, pErr := tc.SendWrapped(&args); !testutils.IsPError(pErr, ".*should match pushee.*") {
t.Errorf("unexpected error %s", pErr)
}
}
// TestPushTxnAlreadyCommittedOrAborted verifies success
// (noop) in event that pushee is already committed or aborted.
func TestPushTxnAlreadyCommittedOrAborted(t *testing.T) {
defer leaktest.AfterTest(t)()
// This test simulates running into an open intent and resolving it using
// the transaction record. If we auto-gc'ed entries here, the entry would
// be deleted and the intents resolved instantaneously on successful commit
// (since they're on the same Range). Could split the range and have
// non-local intents if we ever wanted to get rid of this.
defer setTxnAutoGC(false)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
for i, status := range []roachpb.TransactionStatus{roachpb.COMMITTED, roachpb.ABORTED} {
key := roachpb.Key(fmt.Sprintf("key-%d", i))
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pusher.Priority = 1
pushee.Priority = 2 // pusher will lose, meaning we shouldn't push unless pushee is already ended.
// Begin the pushee's transaction.
_, btH := beginTxnArgs(key, pushee)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// End the pushee's transaction.
etArgs, h := endTxnArgs(pushee, status == roachpb.COMMITTED)
pushee.Sequence++
if _, pErr := tc.SendWrappedWith(h, &etArgs); pErr != nil {
t.Fatal(pErr)
}
// Now try to push what's already committed or aborted.
args := pushTxnArgs(pusher, pushee, roachpb.PUSH_ABORT)
resp, pErr := tc.SendWrapped(&args)
if pErr != nil {
t.Fatal(pErr)
}
reply := resp.(*roachpb.PushTxnResponse)
if reply.PusheeTxn.Status != status {
t.Errorf("expected push txn to return with status == %s; got %+v", status, reply.PusheeTxn)
}
}
}
// TestPushTxnUpgradeExistingTxn verifies that pushing
// a transaction record with a new epoch upgrades the pushee's
// epoch and timestamp if greater. In all test cases, the
// priorities are set such that the push will succeed.
func TestPushTxnUpgradeExistingTxn(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
ts1 := hlc.Timestamp{WallTime: 1}
ts2 := hlc.Timestamp{WallTime: 2}
testCases := []struct {
startTS, ts, expTS hlc.Timestamp
}{
// Noop.
{ts1, ts1, ts1},
// Move timestamp forward.
{ts1, ts2, ts2},
// Move timestamp backwards (has no effect).
{ts2, ts1, ts2},
}
for i, test := range testCases {
key := roachpb.Key(fmt.Sprintf("key-%d", i))
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee.Priority = 1
pushee.Epoch = 12345
pusher.Priority = 2 // Pusher will win
pusher.Writing = true // expected when a txn is heartbeat
// First, establish "start" of existing pushee's txn via BeginTransaction.
pushee.Timestamp = test.startTS
pushee.LastHeartbeat = &test.startTS
_, btH := beginTxnArgs(key, pushee)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// Now, attempt to push the transaction using updated timestamp.
pushee.Timestamp = test.ts
args := pushTxnArgs(pusher, pushee, roachpb.PUSH_ABORT)
resp, pErr := tc.SendWrapped(&args)
if pErr != nil {
t.Fatal(pErr)
}
reply := resp.(*roachpb.PushTxnResponse)
expTxn := pushee.Clone()
expTxn.Epoch = pushee.Epoch // no change
expTxn.Timestamp = test.expTS
expTxn.Status = roachpb.ABORTED
expTxn.LastHeartbeat = &test.startTS
expTxn.Writing = true
if !reflect.DeepEqual(expTxn, reply.PusheeTxn) {
t.Fatalf("unexpected push txn in trial %d; expected:\n%+v\ngot:\n%+v", i, expTxn, reply.PusheeTxn)
}
}
}
// TestPushTxnHeartbeatTimeout verifies that a txn which
// hasn't been heartbeat within 2x the heartbeat interval can be
// pushed/aborted.
func TestPushTxnHeartbeatTimeout(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
ts := hlc.Timestamp{WallTime: 1}
ns := base.DefaultHeartbeatInterval.Nanoseconds()
testCases := []struct {
heartbeat hlc.Timestamp // zero value indicates no heartbeat
currentTime int64 // nanoseconds
pushType roachpb.PushTxnType
expSuccess bool
}{
// Avoid using 0 as currentTime since our manualClock is at 0 and we
// don't want to have outcomes depend on random logical ticks.
{hlc.ZeroTimestamp, 1, roachpb.PUSH_TIMESTAMP, false},
{hlc.ZeroTimestamp, 1, roachpb.PUSH_ABORT, false},
{hlc.ZeroTimestamp, 1, roachpb.PUSH_TOUCH, false},
{hlc.ZeroTimestamp, 1, roachpb.PUSH_QUERY, true},
{hlc.ZeroTimestamp, ns, roachpb.PUSH_TIMESTAMP, false},
{hlc.ZeroTimestamp, ns, roachpb.PUSH_ABORT, false},
{hlc.ZeroTimestamp, ns, roachpb.PUSH_TOUCH, false},
{hlc.ZeroTimestamp, ns, roachpb.PUSH_QUERY, true},
{hlc.ZeroTimestamp, ns*2 - 1, roachpb.PUSH_TIMESTAMP, false},
{hlc.ZeroTimestamp, ns*2 - 1, roachpb.PUSH_ABORT, false},
{hlc.ZeroTimestamp, ns*2 - 1, roachpb.PUSH_TOUCH, false},
{hlc.ZeroTimestamp, ns*2 - 1, roachpb.PUSH_QUERY, true},
{hlc.ZeroTimestamp, ns * 2, roachpb.PUSH_TIMESTAMP, false},
{hlc.ZeroTimestamp, ns * 2, roachpb.PUSH_ABORT, false},
{hlc.ZeroTimestamp, ns * 2, roachpb.PUSH_TOUCH, false},
{hlc.ZeroTimestamp, ns * 2, roachpb.PUSH_QUERY, true},
{ts, ns*2 + 1, roachpb.PUSH_TIMESTAMP, false},
{ts, ns*2 + 1, roachpb.PUSH_ABORT, false},
{ts, ns*2 + 1, roachpb.PUSH_TOUCH, false},
{ts, ns*2 + 1, roachpb.PUSH_QUERY, true},
{ts, ns*2 + 2, roachpb.PUSH_TIMESTAMP, true},
{ts, ns*2 + 2, roachpb.PUSH_ABORT, true},
{ts, ns*2 + 2, roachpb.PUSH_TOUCH, true},
{ts, ns*2 + 2, roachpb.PUSH_QUERY, true},
}
for i, test := range testCases {
key := roachpb.Key(fmt.Sprintf("key-%d", i))
pushee := newTransaction(fmt.Sprintf("test-%d", i), key, 1, enginepb.SERIALIZABLE, tc.clock)
pusher := newTransaction("pusher", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee.Priority = 2
pusher.Priority = 1 // Pusher won't win based on priority.
// First, establish "start" of existing pushee's txn via BeginTransaction.
if !test.heartbeat.Equal(hlc.ZeroTimestamp) {
pushee.LastHeartbeat = &test.heartbeat
}
_, btH := beginTxnArgs(key, pushee)
btH.Timestamp = tc.rng.store.Clock().Now()
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatalf("%d: %s", i, pErr)
}
// Now, attempt to push the transaction with Now set to our current time.
args := pushTxnArgs(pusher, pushee, test.pushType)
args.Now = hlc.Timestamp{WallTime: test.currentTime}
args.PushTo = args.Now
reply, pErr := tc.SendWrapped(&args)
if test.expSuccess != (pErr == nil) {
t.Fatalf("%d: expSuccess=%t; got pErr %s, reply %+v", i,
test.expSuccess, pErr, reply)
}
if pErr != nil {
if _, ok := pErr.GetDetail().(*roachpb.TransactionPushError); !ok {
t.Errorf("%d: expected txn push error: %s", i, pErr)
}
} else if test.pushType != roachpb.PUSH_QUERY {
if txn := reply.(*roachpb.PushTxnResponse).PusheeTxn; txn.Status != roachpb.ABORTED {
t.Errorf("%d: expected aborted transaction, got %s", i, txn)
}
}
}
}
// TestPushTxnNoTxn makes sure that no Txn is returned from PushTxn and that
// it and ResolveIntent{,Range} can not be carried out in a transaction.
func TestResolveIntentPushTxnReplyTxn(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
b := tc.engine.NewBatch()
defer b.Close()
txn := newTransaction("test", roachpb.Key("test"), 1, enginepb.SERIALIZABLE, tc.clock)
txnPushee := txn.Clone()
txnPushee.Priority--
pa := pushTxnArgs(txn, &txnPushee, roachpb.PUSH_ABORT)
var ms enginepb.MVCCStats
var ra roachpb.ResolveIntentRequest
var rra roachpb.ResolveIntentRangeRequest
ctx := context.Background()
// Should not be able to push or resolve in a transaction.
if _, err := tc.rng.PushTxn(ctx, b, &ms, roachpb.Header{Txn: txn}, pa); !testutils.IsError(err, errTransactionUnsupported.Error()) {
t.Fatalf("transactional PushTxn returned unexpected error: %v", err)
}
if _, err := tc.rng.ResolveIntent(ctx, b, &ms, roachpb.Header{Txn: txn}, ra); !testutils.IsError(err, errTransactionUnsupported.Error()) {
t.Fatalf("transactional ResolveIntent returned unexpected error: %v", err)
}
if _, err := tc.rng.ResolveIntentRange(ctx, b, &ms, roachpb.Header{Txn: txn}, rra); !testutils.IsError(err, errTransactionUnsupported.Error()) {
t.Fatalf("transactional ResolveIntentRange returned unexpected error: %v", err)
}
// Should not get a transaction back from PushTxn. It used to erroneously
// return args.PusherTxn.
if reply, err := tc.rng.PushTxn(ctx, b, &ms, roachpb.Header{}, pa); err != nil {
t.Fatal(err)
} else if reply.Txn != nil {
t.Fatalf("expected nil response txn, but got %s", reply.Txn)
}
}
// TestPushTxnPriorities verifies that txns with lower
// priority are pushed; if priorities are equal, then the txns
// are ordered by txn timestamp, with the more recent timestamp
// being pushable.
// TODO(tschottdorf): we should have a randomized version of this test which
// also simulates the client proto and persisted record diverging. For example,
// clients may be using a higher timestamp for their push or the persisted
// record (which they are not using) might have a higher timestamp, and even
// in the presence of such skewed information, conflicts between two (or more)
// conflicting transactions must not deadlock (see #5685 for an example of this
// happening with older code).
func TestPushTxnPriorities(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
ts1 := hlc.Timestamp{WallTime: 1}
ts2 := hlc.Timestamp{WallTime: 2}
testCases := []struct {
pusherPriority, pusheePriority int32
pusherTS, pusheeTS hlc.Timestamp
pushType roachpb.PushTxnType
expSuccess bool
}{
// Pusher with higher priority succeeds.
{2, 1, ts1, ts1, roachpb.PUSH_TIMESTAMP, true},
{2, 1, ts1, ts1, roachpb.PUSH_ABORT, true},
// Pusher with lower priority fails.
{1, 2, ts1, ts1, roachpb.PUSH_ABORT, false},
{1, 2, ts1, ts1, roachpb.PUSH_TIMESTAMP, false},
// Pusher with lower priority fails, even with older txn timestamp.
{1, 2, ts1, ts2, roachpb.PUSH_ABORT, false},
// Pusher has lower priority, but older txn timestamp allows success if
// !abort since there's nothing to do.
{1, 2, ts1, ts2, roachpb.PUSH_TIMESTAMP, true},
// With same priorities, larger Txn ID wins. Timestamp does not matter
// (unless it implies that nothing needs to be pushed in the first
// place; see above).
// Note: in this test, the pusher has the larger ID.
{1, 1, ts1, ts1, roachpb.PUSH_ABORT, true},
{1, 1, ts1, ts1, roachpb.PUSH_TIMESTAMP, true},
{1, 1, ts2, ts1, roachpb.PUSH_ABORT, true},
{1, 1, ts2, ts1, roachpb.PUSH_TIMESTAMP, true},
// When touching, priority never wins.
{2, 1, ts1, ts1, roachpb.PUSH_TOUCH, false},
{1, 2, ts1, ts1, roachpb.PUSH_TOUCH, false},
// When updating, priority always succeeds.
{2, 1, ts1, ts1, roachpb.PUSH_QUERY, true},
{1, 2, ts1, ts1, roachpb.PUSH_QUERY, true},
}
for i, test := range testCases {
key := roachpb.Key(fmt.Sprintf("key-%d", i))
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pusher.Priority = test.pusherPriority
pushee.Priority = test.pusheePriority
pusher.Timestamp = test.pusherTS
pushee.Timestamp = test.pusheeTS
// Make sure pusher ID is greater; if priorities and timestamps are the same,
// the greater ID succeeds with push.
if bytes.Compare(pusher.ID.GetBytes(), pushee.ID.GetBytes()) < 0 {
pusher.ID, pushee.ID = pushee.ID, pusher.ID
}
_, btH := beginTxnArgs(key, pushee)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// Now, attempt to push the transaction with intent epoch set appropriately.
args := pushTxnArgs(pusher, pushee, test.pushType)
_, pErr := tc.SendWrapped(&args)
if test.expSuccess != (pErr == nil) {
t.Errorf("expected success on trial %d? %t; got err %s", i, test.expSuccess, pErr)
}
if pErr != nil {
if _, ok := pErr.GetDetail().(*roachpb.TransactionPushError); !ok {
t.Errorf("expected txn push error: %s", pErr)
}
}
}
}
// TestPushTxnPushTimestamp verifies that with args.Abort is
// false (i.e. for read/write conflict), the pushed txn keeps status
// PENDING, but has its txn Timestamp moved forward to the pusher's
// txn Timestamp + 1.
func TestPushTxnPushTimestamp(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
pusher := newTransaction("test", roachpb.Key("a"), 1, enginepb.SERIALIZABLE, tc.clock)
pushee := newTransaction("test", roachpb.Key("b"), 1, enginepb.SERIALIZABLE, tc.clock)
pusher.Priority = 2
pushee.Priority = 1 // pusher will win
pusher.Timestamp = hlc.Timestamp{WallTime: 50, Logical: 25}
pushee.Timestamp = hlc.Timestamp{WallTime: 5, Logical: 1}
key := roachpb.Key("a")
_, btH := beginTxnArgs(key, pushee)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
pushee.Writing = true
// Now, push the transaction with args.Abort=false.
args := pushTxnArgs(pusher, pushee, roachpb.PUSH_TIMESTAMP)
resp, pErr := tc.SendWrapped(&args)
if pErr != nil {
t.Errorf("unexpected error on push: %s", pErr)
}
expTS := pusher.Timestamp
expTS.Logical++
reply := resp.(*roachpb.PushTxnResponse)
if !reply.PusheeTxn.Timestamp.Equal(expTS) {
t.Errorf("expected timestamp to be pushed to %+v; got %+v", expTS, reply.PusheeTxn.Timestamp)
}
if reply.PusheeTxn.Status != roachpb.PENDING {
t.Errorf("expected pushed txn to have status PENDING; got %s", reply.PusheeTxn.Status)
}
}
// TestPushTxnPushTimestampAlreadyPushed verifies that pushing
// a timestamp forward which is already far enough forward is a simple
// noop. We do this by ensuring that priorities would otherwise make
// pushing impossible.
func TestPushTxnPushTimestampAlreadyPushed(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
pusher := newTransaction("test", roachpb.Key("a"), 1, enginepb.SERIALIZABLE, tc.clock)
pushee := newTransaction("test", roachpb.Key("b"), 1, enginepb.SERIALIZABLE, tc.clock)
pusher.Priority = 1
pushee.Priority = 2 // pusher will lose
pusher.Timestamp = hlc.Timestamp{WallTime: 50, Logical: 0}
pushee.Timestamp = hlc.Timestamp{WallTime: 50, Logical: 1}
key := roachpb.Key("a")
_, btH := beginTxnArgs(key, pushee)
put := putArgs(key, key)
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// Now, push the transaction with args.Abort=false.
args := pushTxnArgs(pusher, pushee, roachpb.PUSH_TIMESTAMP)
resp, pErr := tc.SendWrapped(&args)
if pErr != nil {
t.Errorf("unexpected pError on push: %s", pErr)
}
reply := resp.(*roachpb.PushTxnResponse)
if !reply.PusheeTxn.Timestamp.Equal(pushee.Timestamp) {
t.Errorf("expected timestamp to be equal to original %+v; got %+v", pushee.Timestamp, reply.PusheeTxn.Timestamp)
}
if reply.PusheeTxn.Status != roachpb.PENDING {
t.Errorf("expected pushed txn to have status PENDING; got %s", reply.PusheeTxn.Status)
}
}
// TestPushTxnSerializableRestart simulates a transaction which is
// started at t=0, fails serializable commit due to a read at a key
// being written at t=1, is then restarted at the updated timestamp,
// but before the txn can be retried, it's pushed to t=2, an even
// higher timestamp. The test verifies that the serializable commit
// fails yet again, preventing regression of a bug in which we blindly
// overwrote the transaction record on BeginTransaction..
func TestPushTxnSerializableRestart(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("a")
pushee := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
pushee.Priority = 1
pusher.Priority = 2 // pusher will win
// Read from the key to increment the timestamp cache.
gArgs := getArgs(key)
if _, pErr := tc.SendWrapped(&gArgs); pErr != nil {
t.Fatal(pErr)
}
// Begin the pushee's transaction & write to key.
btArgs, btH := beginTxnArgs(key, pushee)
put := putArgs(key, []byte("foo"))
resp, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put)
if pErr != nil {
t.Fatal(pErr)
}
pushee.Update(resp.Header().Txn)
// Try to end the pushee's transaction; should get a retry failure.
etArgs, h := endTxnArgs(pushee, true /* commit */)
pushee.Sequence++
_, pErr = tc.SendWrappedWith(h, &etArgs)
if _, ok := pErr.GetDetail().(*roachpb.TransactionRetryError); !ok {
t.Fatalf("expected retry error; got %s", pErr)
}
pusheeCopy := *pushee
pushee.Restart(1, 1, pusher.Timestamp)
// Next push pushee to advance timestamp of txn record.
pusher.Timestamp = tc.rng.store.Clock().Now()
args := pushTxnArgs(pusher, &pusheeCopy, roachpb.PUSH_TIMESTAMP)
if _, pErr := tc.SendWrapped(&args); pErr != nil {
t.Fatal(pErr)
}
// Try to end pushed transaction at restart timestamp, which is
// earlier than its now-pushed timestamp. Should fail.
var ba roachpb.BatchRequest
pushee.Sequence++
ba.Header.Txn = pushee
ba.Add(&btArgs)
ba.Add(&put)
ba.Add(&etArgs)
_, pErr = tc.Sender().Send(context.Background(), ba)
if _, ok := pErr.GetDetail().(*roachpb.TransactionRetryError); !ok {
t.Fatalf("expected retry error; got %s", pErr)
}
// Verify that the returned transaction has timestamp equal to the
// pushed timestamp. This verifies that the BeginTransaction found
// the pushed record and propagated it.
if txn := pErr.GetTxn(); !txn.Timestamp.Equal(pusher.Timestamp.Next()) {
t.Errorf("expected retry error txn timestamp %s; got %s", pusher.Timestamp, txn.Timestamp)
}
}
// TestReplicaResolveIntentRange verifies resolving a range of intents.
func TestReplicaResolveIntentRange(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
keys := []roachpb.Key{roachpb.Key("a"), roachpb.Key("b")}
txn := newTransaction("test", keys[0], 1, enginepb.SERIALIZABLE, tc.clock)
// Put two values transactionally.
for _, key := range keys {
pArgs := putArgs(key, []byte("value1"))
txn.Sequence++
if _, pErr := tc.SendWrappedWith(roachpb.Header{Txn: txn}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
}
// Resolve the intents.
rArgs := &roachpb.ResolveIntentRangeRequest{
Span: roachpb.Span{
Key: roachpb.Key("a"),
EndKey: roachpb.Key("c"),
},
IntentTxn: txn.TxnMeta,
Status: roachpb.COMMITTED,
}
if _, pErr := tc.SendWrapped(rArgs); pErr != nil {
t.Fatal(pErr)
}
// Do a consistent scan to verify intents have been cleared.
sArgs := scanArgs(roachpb.Key("a"), roachpb.Key("c"))
reply, pErr := tc.SendWrapped(&sArgs)
if pErr != nil {
t.Fatalf("unexpected error on scan: %s", pErr)
}
sReply := reply.(*roachpb.ScanResponse)
if len(sReply.Rows) != 2 {
t.Errorf("expected 2 rows; got %v", sReply.Rows)
}
}
func verifyRangeStats(eng engine.Engine, rangeID roachpb.RangeID, expMS enginepb.MVCCStats, t *testing.T) {
var ms enginepb.MVCCStats
if err := engine.MVCCGetRangeStats(context.Background(), eng, rangeID, &ms); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(expMS, ms) {
f, l, _ := caller.Lookup(1)
t.Errorf("%s:%d: expected stats \n %+v;\ngot \n %+v", f, l, expMS, ms)
}
}
// TestReplicaStatsComputation verifies that commands executed against a
// range update the range stat counters. The stat values are
// empirically derived; we're really just testing that they increment
// in the right ways, not the exact amounts. If the encodings change,
// will need to update this test.
func TestReplicaStatsComputation(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{
bootstrapMode: bootstrapRangeOnly,
}
tc.Start(t)
defer tc.Stop()
baseStats := initialStats()
// Add in the contribution for the range lease request.
baseStats.Add(enginepb.MVCCStats{
SysCount: 1,
SysBytes: 62,
})
// Put a value.
pArgs := putArgs([]byte("a"), []byte("value1"))
if _, pErr := tc.SendWrapped(&pArgs); pErr != nil {
t.Fatal(pErr)
}
expMS := baseStats
expMS.Add(enginepb.MVCCStats{
LiveBytes: 25,
KeyBytes: 14,
ValBytes: 11,
LiveCount: 1,
KeyCount: 1,
ValCount: 1,
})
verifyRangeStats(tc.engine, tc.rng.RangeID, expMS, t)
// Put a 2nd value transactionally.
pArgs = putArgs([]byte("b"), []byte("value2"))
// Consistent UUID needed for a deterministic SysBytes value. This is because
// a random UUID could have a 0x00 byte that would be escaped by the encoding,
// increasing the encoded size and throwing off statistics verification.
uuid, err := uuid.FromString("ea5b9590-a157-421b-8b93-a4caa2c41137")
if err != nil {
t.Fatal(err)
}
txn := newTransaction("test", pArgs.Key, 1, enginepb.SERIALIZABLE, tc.clock)
txn.Priority = 123 // So we don't have random values messing with the byte counts on encoding
txn.ID = uuid
if _, pErr := tc.SendWrappedWith(roachpb.Header{Txn: txn}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
expMS = baseStats
expMS.Add(enginepb.MVCCStats{
LiveBytes: 101,
KeyBytes: 28,
ValBytes: 73,
IntentBytes: 23,
LiveCount: 2,
KeyCount: 2,
ValCount: 2,
IntentCount: 1,
})
verifyRangeStats(tc.engine, tc.rng.RangeID, expMS, t)
// Resolve the 2nd value.
rArgs := &roachpb.ResolveIntentRequest{
Span: roachpb.Span{
Key: pArgs.Key,
},
IntentTxn: txn.TxnMeta,
Status: roachpb.COMMITTED,
}
if _, pErr := tc.SendWrapped(rArgs); pErr != nil {
t.Fatal(pErr)
}
expMS = baseStats
expMS.Add(enginepb.MVCCStats{
LiveBytes: 50,
KeyBytes: 28,
ValBytes: 22,
LiveCount: 2,
KeyCount: 2,
ValCount: 2,
})
verifyRangeStats(tc.engine, tc.rng.RangeID, expMS, t)
// Delete the 1st value.
dArgs := deleteArgs([]byte("a"))
if _, pErr := tc.SendWrapped(&dArgs); pErr != nil {
t.Fatal(pErr)
}
expMS = baseStats
expMS.Add(enginepb.MVCCStats{
LiveBytes: 25,
KeyBytes: 40,
ValBytes: 22,
LiveCount: 1,
KeyCount: 2,
ValCount: 3,
})
verifyRangeStats(tc.engine, tc.rng.RangeID, expMS, t)
}
// TestMerge verifies that the Merge command is behaving as expected. Time
// series data is used, as it is the only data type currently fully supported by
// the merge command.
func TestMerge(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := []byte("mergedkey")
args := make([]roachpb.InternalTimeSeriesData, 3)
expected := roachpb.InternalTimeSeriesData{
StartTimestampNanos: 0,
SampleDurationNanos: 1000,
Samples: make([]roachpb.InternalTimeSeriesSample, len(args)),
}
for i := 0; i < len(args); i++ {
sample := roachpb.InternalTimeSeriesSample{
Offset: int32(i),
Count: 1,
Sum: float64(i),
}
args[i] = roachpb.InternalTimeSeriesData{
StartTimestampNanos: expected.StartTimestampNanos,
SampleDurationNanos: expected.SampleDurationNanos,
Samples: []roachpb.InternalTimeSeriesSample{sample},
}
expected.Samples[i] = sample
}
for _, arg := range args {
var v roachpb.Value
if err := v.SetProto(&arg); err != nil {
t.Fatal(err)
}
mergeArgs := internalMergeArgs(key, v)
if _, pErr := tc.SendWrapped(&mergeArgs); pErr != nil {
t.Fatalf("unexpected error from Merge: %s", pErr)
}
}
getArgs := getArgs(key)
reply, pErr := tc.SendWrapped(&getArgs)
if pErr != nil {
t.Fatalf("unexpected error from Get: %s", pErr)
}
resp := reply.(*roachpb.GetResponse)
if resp.Value == nil {
t.Fatal("GetResponse had nil value")
}
var actual roachpb.InternalTimeSeriesData
if err := resp.Value.GetProto(&actual); err != nil {
t.Fatal(err)
}
if !proto.Equal(&actual, &expected) {
t.Errorf("Get did not return expected value: %v != %v", actual, expected)
}
}
// TestTruncateLog verifies that the TruncateLog command removes a
// prefix of the raft logs (modifying FirstIndex() and making them
// inaccessible via Entries()).
func TestTruncateLog(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
tc.rng.store.SetRaftLogQueueActive(false)
// Populate the log with 10 entries. Save the LastIndex after each write.
var indexes []uint64
for i := 0; i < 10; i++ {
args := incrementArgs([]byte("a"), int64(i))
if _, pErr := tc.SendWrapped(&args); pErr != nil {
t.Fatal(pErr)
}
idx, err := tc.rng.GetLastIndex()
if err != nil {
t.Fatal(err)
}
indexes = append(indexes, idx)
}
rangeID := tc.rng.RangeID
// Discard the first half of the log.
truncateArgs := truncateLogArgs(indexes[5], rangeID)
if _, pErr := tc.SendWrapped(&truncateArgs); pErr != nil {
t.Fatal(pErr)
}
// FirstIndex has changed.
firstIndex, err := tc.rng.GetFirstIndex()
if err != nil {
t.Fatal(err)
}
if firstIndex != indexes[5] {
t.Errorf("expected firstIndex == %d, got %d", indexes[5], firstIndex)
}
// We can still get what remains of the log.
tc.rng.mu.Lock()
entries, err := tc.rng.Entries(indexes[5], indexes[9], math.MaxUint64)
tc.rng.mu.Unlock()
if err != nil {
t.Fatal(err)
}
if len(entries) != int(indexes[9]-indexes[5]) {
t.Errorf("expected %d entries, got %d", indexes[9]-indexes[5], len(entries))
}
// But any range that includes the truncated entries returns an error.
tc.rng.mu.Lock()
_, err = tc.rng.Entries(indexes[4], indexes[9], math.MaxUint64)
tc.rng.mu.Unlock()
if err != raft.ErrCompacted {
t.Errorf("expected ErrCompacted, got %s", err)
}
// The term of the last truncated entry is still available.
tc.rng.mu.Lock()
term, err := tc.rng.Term(indexes[4])
tc.rng.mu.Unlock()
if err != nil {
t.Fatal(err)
}
if term == 0 {
t.Errorf("invalid term 0 for truncated entry")
}
// The terms of older entries are gone.
tc.rng.mu.Lock()
_, err = tc.rng.Term(indexes[3])
tc.rng.mu.Unlock()
if err != raft.ErrCompacted {
t.Errorf("expected ErrCompacted, got %s", err)
}
// Truncating logs that have already been truncated should not return an
// error.
truncateArgs = truncateLogArgs(indexes[3], rangeID)
if _, pErr := tc.SendWrapped(&truncateArgs); pErr != nil {
t.Fatal(pErr)
}
// Truncating logs that have the wrong rangeID included should not return
// an error but should not truncate any logs.
truncateArgs = truncateLogArgs(indexes[9], rangeID+1)
if _, pErr := tc.SendWrapped(&truncateArgs); pErr != nil {
t.Fatal(pErr)
}
tc.rng.mu.Lock()
// The term of the last truncated entry is still available.
term, err = tc.rng.Term(indexes[4])
tc.rng.mu.Unlock()
if err != nil {
t.Fatal(err)
}
if term == 0 {
t.Errorf("invalid term 0 for truncated entry")
}
}
// TestConditionFailedError tests that a ConditionFailedError correctly
// bubbles up from MVCC to Range.
func TestConditionFailedError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := []byte("k")
value := []byte("quack")
pArgs := putArgs(key, value)
if _, pErr := tc.SendWrapped(&pArgs); pErr != nil {
t.Fatal(pErr)
}
val := roachpb.MakeValueFromString("moo")
args := roachpb.ConditionalPutRequest{
Span: roachpb.Span{
Key: key,
},
Value: roachpb.MakeValueFromBytes(value),
ExpValue: &val,
}
_, pErr := tc.SendWrappedWith(roachpb.Header{Timestamp: hlc.MinTimestamp}, &args)
if cErr, ok := pErr.GetDetail().(*roachpb.ConditionFailedError); pErr == nil || !ok {
t.Fatalf("expected ConditionFailedError, got %T with content %+v",
pErr, pErr)
} else if valueBytes, err := cErr.ActualValue.GetBytes(); err != nil {
t.Fatal(err)
} else if cErr.ActualValue == nil || !bytes.Equal(valueBytes, value) {
t.Errorf("ConditionFailedError with bytes %q expected, but got %+v",
value, cErr.ActualValue)
}
}
// TestReplicaSetsEqual tests to ensure that intersectReplicaSets
// returns the correct responses.
func TestReplicaSetsEqual(t *testing.T) {
defer leaktest.AfterTest(t)()
testData := []struct {
expected bool
a []roachpb.ReplicaDescriptor
b []roachpb.ReplicaDescriptor
}{
{true, []roachpb.ReplicaDescriptor{}, []roachpb.ReplicaDescriptor{}},
{true, createReplicaSets([]roachpb.StoreID{1}), createReplicaSets([]roachpb.StoreID{1})},
{true, createReplicaSets([]roachpb.StoreID{1, 2}), createReplicaSets([]roachpb.StoreID{1, 2})},
{true, createReplicaSets([]roachpb.StoreID{1, 2}), createReplicaSets([]roachpb.StoreID{2, 1})},
{false, createReplicaSets([]roachpb.StoreID{1}), createReplicaSets([]roachpb.StoreID{2})},
{false, createReplicaSets([]roachpb.StoreID{1, 2}), createReplicaSets([]roachpb.StoreID{2})},
{false, createReplicaSets([]roachpb.StoreID{1, 2}), createReplicaSets([]roachpb.StoreID{1})},
{false, createReplicaSets([]roachpb.StoreID{}), createReplicaSets([]roachpb.StoreID{1})},
{true, createReplicaSets([]roachpb.StoreID{1, 2, 3}), createReplicaSets([]roachpb.StoreID{2, 3, 1})},
{true, createReplicaSets([]roachpb.StoreID{1, 1}), createReplicaSets([]roachpb.StoreID{1, 1})},
{false, createReplicaSets([]roachpb.StoreID{1, 1}), createReplicaSets([]roachpb.StoreID{1, 1, 1})},
{true, createReplicaSets([]roachpb.StoreID{1, 2, 3, 1, 2, 3}), createReplicaSets([]roachpb.StoreID{1, 1, 2, 2, 3, 3})},
}
for _, test := range testData {
if replicaSetsEqual(test.a, test.b) != test.expected {
t.Fatalf("unexpected replica intersection: %+v", test)
}
}
}
func TestAppliedIndex(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
var appliedIndex uint64
var sum int64
for i := int64(1); i <= 10; i++ {
args := incrementArgs([]byte("a"), i)
resp, pErr := tc.SendWrapped(&args)
if pErr != nil {
t.Fatal(pErr)
}
reply := resp.(*roachpb.IncrementResponse)
sum += i
if reply.NewValue != sum {
t.Errorf("expected %d, got %d", sum, reply.NewValue)
}
tc.rng.mu.Lock()
newAppliedIndex := tc.rng.mu.state.RaftAppliedIndex
tc.rng.mu.Unlock()
if newAppliedIndex <= appliedIndex {
t.Errorf("appliedIndex did not advance. Was %d, now %d", appliedIndex, newAppliedIndex)
}
appliedIndex = newAppliedIndex
}
}
// TestReplicaCorruption verifies that a replicaCorruptionError correctly marks
// the range as corrupt.
func TestReplicaCorruption(t *testing.T) {
defer leaktest.AfterTest(t)()
tsc := TestStoreContext()
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if filterArgs.Req.Header().Key.Equal(roachpb.Key("boom")) {
return roachpb.NewError(NewReplicaCorruptionError(errors.New("boom")))
}
return nil
}
tc := testContext{}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
// First send a regular command.
args := putArgs(roachpb.Key("test1"), []byte("value"))
if _, pErr := tc.SendWrapped(&args); pErr != nil {
t.Fatal(pErr)
}
key := roachpb.Key("boom")
// maybeSetCorrupt should have been called.
args = putArgs(key, []byte("value"))
_, pErr := tc.SendWrapped(&args)
if !testutils.IsPError(pErr, "replica corruption \\(processed=true\\)") {
t.Fatalf("unexpected error: %s", pErr)
}
// Verify replica destroyed was set.
rkey, err := keys.Addr(key)
if err != nil {
t.Fatal(err)
}
r := tc.store.LookupReplica(rkey, rkey)
r.mu.Lock()
defer r.mu.Unlock()
if r.mu.destroyed.Error() != pErr.GetDetail().Error() {
t.Fatalf("expected r.mu.destroyed == pErr.GetDetail(), instead %q != %q", r.mu.destroyed, pErr.GetDetail())
}
// Verify destroyed error was persisted.
pErr, err = loadReplicaDestroyedError(context.Background(), r.store.Engine(), r.RangeID)
if err != nil {
t.Fatal(err)
}
if r.mu.destroyed.Error() != pErr.GetDetail().Error() {
t.Fatalf("expected r.mu.destroyed == pErr.GetDetail(), instead %q != %q", r.mu.destroyed, pErr.GetDetail())
}
// TODO(bdarnell): when maybeSetCorrupt is finished verify that future commands fail too.
}
// TestChangeReplicasDuplicateError tests that a replica change that would
// use a NodeID twice in the replica configuration fails.
func TestChangeReplicasDuplicateError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
if err := tc.rng.ChangeReplicas(
context.Background(),
roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: tc.store.Ident.NodeID,
StoreID: 9999,
},
tc.rng.Desc(),
); err == nil || !strings.Contains(err.Error(), "already present") {
t.Fatalf("must not be able to add second replica to same node (err=%s)", err)
}
}
// TestReplicaDanglingMetaIntent creates a dangling intent on a meta2
// record and verifies that RangeLookup requests behave
// appropriately. Normally, the old value and a write intent error
// should be returned. If IgnoreIntents is specified, then a random
// choice of old or new is returned with no error.
// TODO(tschottdorf): add a test in which there is a dangling intent on a
// descriptor we would've otherwise discarded in a reverse scan; verify that
// we don't erroneously return that descriptor (recently fixed bug).
func TestReplicaDanglingMetaIntent(t *testing.T) {
defer leaktest.AfterTest(t)()
// Test RangeLookup with Scan.
testRangeDanglingMetaIntent(t, false)
// Test RangeLookup with ReverseScan.
testRangeDanglingMetaIntent(t, true)
}
func testRangeDanglingMetaIntent(t *testing.T, isReverse bool) {
tc := testContext{}
tc.Start(t)
defer tc.Stop()
key := roachpb.Key("a")
// Get original meta2 descriptor.
rlArgs := &roachpb.RangeLookupRequest{
Span: roachpb.Span{
Key: keys.RangeMetaKey(roachpb.RKey(key)),
},
MaxRanges: 1,
Reverse: isReverse,
}
var rlReply *roachpb.RangeLookupResponse
reply, pErr := tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, rlArgs)
if pErr != nil {
t.Fatal(pErr)
}
rlReply = reply.(*roachpb.RangeLookupResponse)
origDesc := rlReply.Ranges[0]
newDesc := origDesc
var err error
newDesc.EndKey, err = keys.Addr(key)
if err != nil {
t.Fatal(err)
}
// Write the new descriptor as an intent.
data, err := protoutil.Marshal(&newDesc)
if err != nil {
t.Fatal(err)
}
txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.clock)
// Officially begin the transaction. If not for this, the intent resolution
// machinery would simply remove the intent we write below, see #3020.
// We send directly to Replica throughout this test, so there's no danger
// of the Store aborting this transaction (i.e. we don't have to set a high
// priority).
pArgs := putArgs(keys.RangeMetaKey(roachpb.RKey(key)), data)
txn.Sequence++
if _, pErr = maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), roachpb.Header{Txn: txn}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
// Now lookup the range; should get the value. Since the lookup is
// inconsistent, there's no WriteIntentError.
// Note that 'A' < 'a'.
rlArgs.Key = keys.RangeMetaKey(roachpb.RKey{'A'})
reply, pErr = tc.SendWrappedWith(roachpb.Header{
Timestamp: hlc.MinTimestamp,
ReadConsistency: roachpb.INCONSISTENT,
}, rlArgs)
if pErr != nil {
t.Errorf("unexpected lookup error: %s", pErr)
}
rlReply = reply.(*roachpb.RangeLookupResponse)
if !reflect.DeepEqual(rlReply.Ranges[0], origDesc) {
t.Errorf("expected original descriptor %s; got %s", &origDesc, &rlReply.Ranges[0])
}
// Switch to consistent lookups, which should run into the intent.
_, pErr = tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.CONSISTENT,
}, rlArgs)
if _, ok := pErr.GetDetail().(*roachpb.WriteIntentError); !ok {
t.Fatalf("expected WriteIntentError, not %s", pErr)
}
// Try a single lookup with ConsiderIntents. Expect to see both descriptors.
// First, try this consistently, which should not be allowed.
rlArgs.ConsiderIntents = true
_, pErr = tc.SendWrapped(rlArgs)
if !testutils.IsPError(pErr, "can not read consistently and special-case intents") {
t.Fatalf("wanted specific error, not %s", pErr)
}
// After changing back to inconsistent lookups, should be good to go.
var origSeen, newSeen bool
clonedRLArgs := *rlArgs
reply, pErr = tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &clonedRLArgs)
if pErr != nil {
t.Fatal(pErr)
}
rlReply = reply.(*roachpb.RangeLookupResponse)
for _, seen := range rlReply.Ranges {
if reflect.DeepEqual(seen, origDesc) {
origSeen = true
} else if reflect.DeepEqual(seen, newDesc) {
newSeen = true
} else {
t.Errorf("expected orig/new descriptor %s/%s; got %s", &origDesc, &newDesc, &seen)
}
}
if !origSeen || !newSeen {
t.Errorf("expected to see both original and new descriptor; saw original = %t, saw new = %t", origSeen, newSeen)
}
}
// TestReplicaLookupUseReverseScan verifies the correctness of the results which are retrieved
// from RangeLookup by using ReverseScan.
func TestReplicaLookupUseReverseScan(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Test ranges: ["a","c"), ["c","f"), ["f","h") and ["h","y").
testRanges := []roachpb.RangeDescriptor{
{RangeID: 2, StartKey: roachpb.RKey("a"), EndKey: roachpb.RKey("c")},
{RangeID: 3, StartKey: roachpb.RKey("c"), EndKey: roachpb.RKey("f")},
{RangeID: 4, StartKey: roachpb.RKey("f"), EndKey: roachpb.RKey("h")},
{RangeID: 5, StartKey: roachpb.RKey("h"), EndKey: roachpb.RKey("y")},
}
// The range ["f","h") has dangling intent in meta2.
withIntentRangeIndex := 2
testCases := []struct {
key string
expected roachpb.RangeDescriptor
}{
// For testRanges[0|1|3] there is no intent. A key in the middle
// and the end key should both give us the range itself.
{key: "b", expected: testRanges[0]},
{key: "c", expected: testRanges[0]},
{key: "d", expected: testRanges[1]},
{key: "f", expected: testRanges[1]},
{key: "j", expected: testRanges[3]},
// testRanges[2] has an intent, so the inconsistent scan will read
// an old value (nil). Since we're in reverse mode, testRanges[1]
// is the result.
{key: "g", expected: testRanges[1]},
{key: "h", expected: testRanges[1]},
}
txn := newTransaction("test", roachpb.Key{}, 1, enginepb.SERIALIZABLE, tc.clock)
for i, r := range testRanges {
if i != withIntentRangeIndex {
// Write the new descriptor as an intent.
data, err := protoutil.Marshal(&r)
if err != nil {
t.Fatal(err)
}
pArgs := putArgs(keys.RangeMetaKey(roachpb.RKey(r.EndKey)), data)
txn.Sequence++
if _, pErr := tc.SendWrappedWith(roachpb.Header{Txn: txn}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
}
}
// Resolve the intents.
rArgs := &roachpb.ResolveIntentRangeRequest{
Span: roachpb.Span{
Key: keys.RangeMetaKey(roachpb.RKey("a")),
EndKey: keys.RangeMetaKey(roachpb.RKey("z")),
},
IntentTxn: txn.TxnMeta,
Status: roachpb.COMMITTED,
}
if _, pErr := tc.SendWrapped(rArgs); pErr != nil {
t.Fatal(pErr)
}
// Get original meta2 descriptor.
rlArgs := &roachpb.RangeLookupRequest{
MaxRanges: 1,
Reverse: true,
}
var rlReply *roachpb.RangeLookupResponse
// Test ReverseScan without intents.
for _, c := range testCases {
clonedRLArgs := *rlArgs
clonedRLArgs.Key = keys.RangeMetaKey(roachpb.RKey(c.key))
reply, pErr := tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &clonedRLArgs)
if pErr != nil {
t.Fatal(pErr)
}
rlReply = reply.(*roachpb.RangeLookupResponse)
seen := rlReply.Ranges[0]
if !(seen.StartKey.Equal(c.expected.StartKey) && seen.EndKey.Equal(c.expected.EndKey)) {
t.Errorf("expected descriptor %s; got %s", &c.expected, &seen)
}
}
// Write the new descriptor as an intent.
intentRange := testRanges[withIntentRangeIndex]
data, err := protoutil.Marshal(&intentRange)
if err != nil {
t.Fatal(err)
}
pArgs := putArgs(keys.RangeMetaKey(roachpb.RKey(intentRange.EndKey)), data)
txn2 := newTransaction("test", roachpb.Key{}, 1, enginepb.SERIALIZABLE, tc.clock)
if _, pErr := tc.SendWrappedWith(roachpb.Header{Txn: txn2}, &pArgs); pErr != nil {
t.Fatal(pErr)
}
// Test ReverseScan with intents.
for _, c := range testCases {
clonedRLArgs := *rlArgs
clonedRLArgs.Key = keys.RangeMetaKey(roachpb.RKey(c.key))
reply, pErr := tc.SendWrappedWith(roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &clonedRLArgs)
if pErr != nil {
t.Fatal(pErr)
}
rlReply = reply.(*roachpb.RangeLookupResponse)
seen := rlReply.Ranges[0]
if !(seen.StartKey.Equal(c.expected.StartKey) && seen.EndKey.Equal(c.expected.EndKey)) {
t.Errorf("expected descriptor %s; got %s", &c.expected, &seen)
}
}
}
func TestReplicaLookup(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
expected := []roachpb.RangeDescriptor{*tc.rng.Desc()}
testCases := []struct {
key roachpb.RKey
reverse bool
expected []roachpb.RangeDescriptor
}{
// Test with the first range (StartKey==KeyMin). Normally we look
// up this range in gossip instead of executing the RPC, but
// RangeLookup is still used when up-to-date information is
// required.
{key: roachpb.RKeyMin, reverse: false, expected: expected},
// Test with the last key in a meta prefix. This is an edge case in the
// implementation.
{key: keys.MustAddr(keys.Meta1KeyMax), reverse: false, expected: expected},
{key: keys.MustAddr(keys.Meta2KeyMax), reverse: false, expected: nil},
{key: keys.MustAddr(keys.Meta1KeyMax), reverse: true, expected: expected},
{key: keys.MustAddr(keys.Meta2KeyMax), reverse: true, expected: expected},
}
for _, c := range testCases {
resp, pErr := tc.SendWrapped(&roachpb.RangeLookupRequest{
Span: roachpb.Span{
Key: c.key.AsRawKey(),
},
MaxRanges: 1,
Reverse: c.reverse,
})
if pErr != nil {
if c.expected != nil {
t.Fatal(pErr)
}
} else {
reply := resp.(*roachpb.RangeLookupResponse)
if !reflect.DeepEqual(reply.Ranges, c.expected) {
t.Fatalf("expected %+v, got %+v", c.expected, reply.Ranges)
}
}
}
}
// TestRequestLeaderEncounterGroupDeleteError verifies that a lease request which fails with
// RaftGroupDeletedError is converted to a RangeNotFoundError in the Store.
func TestRequestLeaderEncounterGroupDeleteError(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Mock proposeRaftCommand to return an roachpb.RaftGroupDeletedError.
proposeRaftCommandFn := func(*pendingCmd) error {
return &roachpb.RaftGroupDeletedError{}
}
rng := tc.rng
rng.mu.Lock()
rng.mu.proposeRaftCommandFn = proposeRaftCommandFn
rng.mu.Unlock()
gArgs := getArgs(roachpb.Key("a"))
// Force the read command request a new lease.
clock := tc.clock
ts := clock.Update(clock.Now().Add(leaseExpiry(tc.rng), 0))
_, pErr := client.SendWrappedWith(tc.store, nil, roachpb.Header{
Timestamp: ts,
RangeID: 1,
}, &gArgs)
if _, ok := pErr.GetDetail().(*roachpb.RangeNotFoundError); !ok {
t.Fatalf("expected a RangeNotFoundError, get %s", pErr)
}
}
func TestIntentIntersect(t *testing.T) {
defer leaktest.AfterTest(t)()
iPt := roachpb.Span{
Key: roachpb.Key("asd"),
EndKey: nil,
}
iRn := roachpb.Span{
Key: roachpb.Key("c"),
EndKey: roachpb.Key("x"),
}
suffix := roachpb.RKey("abcd")
iLc := roachpb.Span{
Key: keys.MakeRangeKey(roachpb.RKey("c"), suffix, nil),
EndKey: keys.MakeRangeKey(roachpb.RKey("x"), suffix, nil),
}
kl1 := string(iLc.Key)
kl2 := string(iLc.EndKey)
for i, tc := range []struct {
intent roachpb.Span
from, to string
exp []string
}{
{intent: iPt, from: "", to: "z", exp: []string{"", "", "asd", ""}},
{intent: iRn, from: "", to: "a", exp: []string{"", "", "c", "x"}},
{intent: iRn, from: "", to: "c", exp: []string{"", "", "c", "x"}},
{intent: iRn, from: "a", to: "z", exp: []string{"c", "x"}},
{intent: iRn, from: "c", to: "d", exp: []string{"c", "d", "d", "x"}},
{intent: iRn, from: "c", to: "x", exp: []string{"c", "x"}},
{intent: iRn, from: "d", to: "x", exp: []string{"d", "x", "c", "d"}},
{intent: iRn, from: "d", to: "w", exp: []string{"d", "w", "c", "d", "w", "x"}},
{intent: iRn, from: "c", to: "w", exp: []string{"c", "w", "w", "x"}},
{intent: iRn, from: "w", to: "x", exp: []string{"w", "x", "c", "w"}},
{intent: iRn, from: "x", to: "z", exp: []string{"", "", "c", "x"}},
{intent: iRn, from: "y", to: "z", exp: []string{"", "", "c", "x"}},
// A local intent range always comes back in one piece, either inside
// or outside of the Range.
{intent: iLc, from: "a", to: "b", exp: []string{"", "", kl1, kl2}},
{intent: iLc, from: "d", to: "z", exp: []string{"", "", kl1, kl2}},
{intent: iLc, from: "f", to: "g", exp: []string{"", "", kl1, kl2}},
{intent: iLc, from: "c", to: "x", exp: []string{kl1, kl2}},
{intent: iLc, from: "a", to: "z", exp: []string{kl1, kl2}},
} {
var all []string
in, out := intersectSpan(tc.intent, roachpb.RangeDescriptor{
StartKey: roachpb.RKey(tc.from),
EndKey: roachpb.RKey(tc.to),
})
if in != nil {
all = append(all, string(in.Key), string(in.EndKey))
} else {
all = append(all, "", "")
}
for _, o := range out {
all = append(all, string(o.Key), string(o.EndKey))
}
if !reflect.DeepEqual(all, tc.exp) {
t.Errorf("%d: wanted %v, got %v", i, tc.exp, all)
}
}
}
// TestBatchErrorWithIndex tests that when an individual entry in a
// batch results in an error with an index, the index of this command
// is stored into the error.
func TestBatchErrorWithIndex(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
ba := roachpb.BatchRequest{}
// This one succeeds.
ba.Add(&roachpb.PutRequest{
Span: roachpb.Span{Key: roachpb.Key("k")},
Value: roachpb.MakeValueFromString("not nil"),
})
// This one fails with a ConditionalPutError, which will populate the
// returned error's index.
ba.Add(&roachpb.ConditionalPutRequest{
Span: roachpb.Span{Key: roachpb.Key("k")},
Value: roachpb.MakeValueFromString("irrelevant"),
ExpValue: nil, // not true after above Put
})
// This one is never executed.
ba.Add(&roachpb.GetRequest{
Span: roachpb.Span{Key: roachpb.Key("k")},
})
if _, pErr := tc.Sender().Send(context.Background(), ba); pErr == nil {
t.Fatal("expected an error")
} else if pErr.Index == nil || pErr.Index.Index != 1 || !testutils.IsPError(pErr, "unexpected value") {
t.Fatalf("invalid index or error type: %s", pErr)
}
}
// TestReplicaLoadSystemConfigSpanIntent verifies that intents on the SystemConfigSpan
// cause an error, but trigger asynchronous cleanup.
func TestReplicaLoadSystemConfigSpanIntent(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
scStartSddr, err := keys.Addr(keys.SystemConfigSpan.Key)
if err != nil {
t.Fatal(err)
}
rng := tc.store.LookupReplica(scStartSddr, nil)
if rng == nil {
t.Fatalf("no replica contains the SystemConfig span")
}
// Create a transaction and write an intent to the system
// config span.
key := keys.SystemConfigSpan.Key
_, btH := beginTxnArgs(key, newTransaction("test", key, 1, enginepb.SERIALIZABLE, rng.store.Clock()))
btH.Txn.Priority = 1 // low so it can be pushed
put := putArgs(key, []byte("foo"))
if _, pErr := maybeWrapWithBeginTransaction(tc.Sender(), context.Background(), btH, &put); pErr != nil {
t.Fatal(pErr)
}
// Abort the transaction so that the async intent resolution caused
// by loading the system config span doesn't waste any time in
// clearing the intent.
pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, rng.store.Clock())
pusher.Priority = 2 // will push successfully
pushArgs := pushTxnArgs(pusher, btH.Txn, roachpb.PUSH_ABORT)
if _, pErr := tc.SendWrapped(&pushArgs); pErr != nil {
t.Fatal(pErr)
}
// Verify that the intent trips up loading the SystemConfig data.
if _, _, err := rng.loadSystemConfigSpan(); err != errSystemConfigIntent {
t.Fatal(err)
}
// In the loop, wait until the intent is aborted. Then write a "real" value
// there and verify that we can now load the data as expected.
v := roachpb.MakeValueFromString("foo")
util.SucceedsSoon(t, func() error {
if err := engine.MVCCPut(context.Background(), rng.store.Engine(), &enginepb.MVCCStats{},
keys.SystemConfigSpan.Key, rng.store.Clock().Now(), v, nil); err != nil {
return err
}
kvs, _, err := rng.loadSystemConfigSpan()
if err != nil {
return err
}
if len(kvs) != 1 || !bytes.Equal(kvs[0].Key, keys.SystemConfigSpan.Key) {
return errors.Errorf("expected only key %s in SystemConfigSpan map: %+v", keys.SystemConfigSpan.Key, kvs)
}
return nil
})
}
func TestReplicaDestroy(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
rep, err := tc.store.GetReplica(1)
if err != nil {
t.Fatal(err)
}
// First try and fail with a stale descriptor.
origDesc := rep.Desc()
newDesc := protoutil.Clone(origDesc).(*roachpb.RangeDescriptor)
for i := range newDesc.Replicas {
if newDesc.Replicas[i].StoreID == tc.store.StoreID() {
newDesc.Replicas[i].ReplicaID++
newDesc.NextReplicaID++
break
}
}
if err := rep.setDesc(newDesc); err != nil {
t.Fatal(err)
}
if err := rep.Destroy(*origDesc); !testutils.IsError(err, "replica ID has changed") {
t.Fatalf("expected error 'replica ID has changed' but got %s", err)
}
// Now try a fresh descriptor and succeed.
if err := rep.Destroy(*rep.Desc()); err != nil {
t.Fatal(err)
}
}
func TestEntries(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
tc.rng.store.SetRaftLogQueueActive(false)
// Populate the log with 10 entries. Save the LastIndex after each write.
var indexes []uint64
for i := 0; i < 10; i++ {
args := incrementArgs([]byte("a"), int64(i))
if _, pErr := tc.SendWrapped(&args); pErr != nil {
t.Fatal(pErr)
}
idx, err := tc.rng.GetLastIndex()
if err != nil {
t.Fatal(err)
}
indexes = append(indexes, idx)
}
rng := tc.rng
rangeID := rng.RangeID
// Discard the first half of the log.
truncateArgs := truncateLogArgs(indexes[5], rangeID)
if _, pErr := tc.SendWrapped(&truncateArgs); pErr != nil {
t.Fatal(pErr)
}
for i, tc := range []struct {
lo uint64
hi uint64
maxBytes uint64
expResultCount int
expError error
}{
// Case 0: Just most of the entries.
{lo: indexes[5], hi: indexes[9], expResultCount: 4},
// Case 1: Get a single entry.
{lo: indexes[5], hi: indexes[6], expResultCount: 1},
// Case 2: Use MaxUint64 instead of 0 for maxBytes.
{lo: indexes[5], hi: indexes[9], maxBytes: math.MaxUint64, expResultCount: 4},
// Case 3: maxBytes is set low so only a single value should be
// returned.
{lo: indexes[5], hi: indexes[9], maxBytes: 1, expResultCount: 1},
// Case 4: hi value is past the last index, should return all available
// entries
{lo: indexes[5], hi: indexes[9] + 1, expResultCount: 5},
// Case 5: all values have been truncated.
{lo: indexes[1], hi: indexes[2], expError: raft.ErrCompacted},
// Case 6: hi has just been truncated.
{lo: indexes[1], hi: indexes[4], expError: raft.ErrCompacted},
// Case 7: another case where hi has just been truncated.
{lo: indexes[3], hi: indexes[4], expError: raft.ErrCompacted},
// Case 8: lo has been truncated and hi is the truncation point.
{lo: indexes[4], hi: indexes[5], expError: raft.ErrCompacted},
// Case 9: lo has been truncated but hi is available.
{lo: indexes[4], hi: indexes[9], expError: raft.ErrCompacted},
// Case 10: lo has been truncated and hi is not available.
{lo: indexes[4], hi: indexes[9] + 100, expError: raft.ErrCompacted},
// Case 11: lo has been truncated but hi is available, and maxBytes is
// set low.
{lo: indexes[4], hi: indexes[9], maxBytes: 1, expError: raft.ErrCompacted},
// Case 12: lo is available but hi isn't.
{lo: indexes[5], hi: indexes[9] + 100, expError: raft.ErrUnavailable},
// Case 13: both lo and hi are not available.
{lo: indexes[9] + 100, hi: indexes[9] + 1000, expError: raft.ErrUnavailable},
// Case 14: lo is available, hi is not, but it was cut off by maxBytes.
{lo: indexes[5], hi: indexes[9] + 1000, maxBytes: 1, expResultCount: 1},
} {
rng.mu.Lock()
ents, err := rng.Entries(tc.lo, tc.hi, tc.maxBytes)
rng.mu.Unlock()
if tc.expError == nil && err != nil {
t.Errorf("%d: expected no error, got %s", i, err)
continue
} else if err != tc.expError {
t.Errorf("%d: expected error %s, got %s", i, tc.expError, err)
continue
}
if len(ents) != tc.expResultCount {
t.Errorf("%d: expected %d entries, got %d", i, tc.expResultCount, len(ents))
}
}
// Case 15: Lo must be less than or equal to hi.
rng.mu.Lock()
if _, err := rng.Entries(indexes[9], indexes[5], 0); err == nil {
t.Errorf("15: error expected, got none")
}
rng.mu.Unlock()
// Case 16: add a gap to the indexes.
if err := engine.MVCCDelete(context.Background(), tc.store.Engine(), nil, keys.RaftLogKey(rangeID, indexes[6]), hlc.ZeroTimestamp,
nil); err != nil {
t.Fatal(err)
}
rng.mu.Lock()
defer rng.mu.Unlock()
if _, err := rng.Entries(indexes[5], indexes[9], 0); err == nil {
t.Errorf("16: error expected, got none")
}
// Case 17: don't hit the gap due to maxBytes.
ents, err := rng.Entries(indexes[5], indexes[9], 1)
if err != nil {
t.Errorf("17: expected no error, got %s", err)
}
if len(ents) != 1 {
t.Errorf("17: expected 1 entry, got %d", len(ents))
}
// Case 18: don't hit the gap due to truncation.
if _, err := rng.Entries(indexes[4], indexes[9], 0); err != raft.ErrCompacted {
t.Errorf("18: expected error %s , got %s", raft.ErrCompacted, err)
}
}
func TestTerm(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
tc.rng.store.SetRaftLogQueueActive(false)
rng := tc.rng
rangeID := rng.RangeID
// Populate the log with 10 entries. Save the LastIndex after each write.
var indexes []uint64
for i := 0; i < 10; i++ {
args := incrementArgs([]byte("a"), int64(i))
if _, pErr := tc.SendWrapped(&args); pErr != nil {
t.Fatal(pErr)
}
idx, err := tc.rng.GetLastIndex()
if err != nil {
t.Fatal(err)
}
indexes = append(indexes, idx)
}
// Discard the first half of the log.
truncateArgs := truncateLogArgs(indexes[5], rangeID)
if _, pErr := tc.SendWrapped(&truncateArgs); pErr != nil {
t.Fatal(pErr)
}
rng.mu.Lock()
defer rng.mu.Unlock()
firstIndex, err := rng.FirstIndex()
if err != nil {
t.Fatal(err)
}
if firstIndex != indexes[5] {
t.Fatalf("expected firstIndex %d to be %d", firstIndex, indexes[4])
}
// Truncated logs should return an ErrCompacted error.
if _, err := tc.rng.Term(indexes[1]); err != raft.ErrCompacted {
t.Errorf("expected ErrCompacted, got %s", err)
}
if _, err := tc.rng.Term(indexes[3]); err != raft.ErrCompacted {
t.Errorf("expected ErrCompacted, got %s", err)
}
// FirstIndex-1 should return the term of firstIndex.
firstIndexTerm, err := tc.rng.Term(firstIndex)
if err != nil {
t.Errorf("expect no error, got %s", err)
}
term, err := tc.rng.Term(indexes[4])
if err != nil {
t.Errorf("expect no error, got %s", err)
}
if term != firstIndexTerm {
t.Errorf("expected firstIndex-1's term:%d to equal that of firstIndex:%d", term, firstIndexTerm)
}
lastIndex, err := rng.LastIndex()
if err != nil {
t.Fatal(err)
}
// Last index should return correctly.
if _, err := tc.rng.Term(lastIndex); err != nil {
t.Errorf("expected no error, got %s", err)
}
// Terms for after the last index should return ErrUnavailable.
if _, err := tc.rng.Term(lastIndex + 1); err != raft.ErrUnavailable {
t.Errorf("expected ErrUnavailable, got %s", err)
}
if _, err := tc.rng.Term(indexes[9] + 1000); err != raft.ErrUnavailable {
t.Errorf("expected ErrUnavailable, got %s", err)
}
}
func TestGCIncorrectRange(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
// Split range into two ranges.
splitKey := roachpb.RKey("c")
rng1 := tc.rng
rng2 := splitTestRange(tc.store, splitKey, splitKey, t)
// Write a key to range 2 at two different timestamps so we can
// GC the earlier timestamp without needing to delete it.
key := splitKey.PrefixEnd().AsRawKey()
val := []byte("value")
putReq := putArgs(key, val)
ts1 := makeTS(1, 0)
ts2 := makeTS(2, 0)
ts1Header := roachpb.Header{Timestamp: ts1}
ts2Header := roachpb.Header{Timestamp: ts2}
if _, pErr := client.SendWrappedWith(rng2, context.Background(), ts1Header, &putReq); pErr != nil {
t.Errorf("unexpected pError on put key request: %s", pErr)
}
if _, pErr := client.SendWrappedWith(rng2, context.Background(), ts2Header, &putReq); pErr != nil {
t.Errorf("unexpected pError on put key request: %s", pErr)
}
// Send GC request to range 1 for the key on range 2, which
// should succeed even though it doesn't contain the key, because
// the request for the incorrect key will be silently dropped.
gKey := gcKey(key, ts1)
gcReq := gcArgs(rng1.Desc().StartKey, rng1.Desc().EndKey, gKey)
if _, pErr := client.SendWrappedWith(rng1, context.Background(), roachpb.Header{Timestamp: tc.clock.Now()}, &gcReq); pErr != nil {
t.Errorf("unexpected pError on garbage collection request to incorrect range: %s", pErr)
}
// Make sure the key still exists on range 2.
getReq := getArgs(key)
if res, pErr := client.SendWrappedWith(rng2, context.Background(), ts1Header, &getReq); pErr != nil {
t.Errorf("unexpected pError on get request to correct range: %s", pErr)
} else if resVal := res.(*roachpb.GetResponse).Value; resVal == nil {
t.Errorf("expected value %s to exists after GC to incorrect range but before GC to correct range, found %v", val, resVal)
}
// Send GC request to range 2 for the same key.
gcReq = gcArgs(rng2.Desc().StartKey, rng2.Desc().EndKey, gKey)
if _, pErr := client.SendWrappedWith(rng2, context.Background(), roachpb.Header{Timestamp: tc.clock.Now()}, &gcReq); pErr != nil {
t.Errorf("unexpected pError on garbage collection request to correct range: %s", pErr)
}
// Make sure the key no longer exists on range 2.
if res, pErr := client.SendWrappedWith(rng2, context.Background(), ts1Header, &getReq); pErr != nil {
t.Errorf("unexpected pError on get request to correct range: %s", pErr)
} else if resVal := res.(*roachpb.GetResponse).Value; resVal != nil {
t.Errorf("expected value at key %s to no longer exist after GC to correct range, found value %v", key, resVal)
}
}
// TestReplicaCancelRaft checks that it is possible to safely abandon Raft
// commands via a cancelable context.Context.
func TestReplicaCancelRaft(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, cancelEarly := range []bool{true, false} {
func() {
// Pick a key unlikely to be used by background processes.
key := []byte("acdfg")
ctx, cancel := context.WithCancel(context.Background())
tsc := TestStoreContext()
if !cancelEarly {
tsc.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if !filterArgs.Req.Header().Key.Equal(key) {
return nil
}
cancel()
return nil
}
}
tc := testContext{}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
if cancelEarly {
cancel()
tc.rng.mu.Lock()
tc.rng.mu.proposeRaftCommandFn = func(*pendingCmd) error {
return nil
}
tc.rng.mu.Unlock()
}
var ba roachpb.BatchRequest
ba.Add(&roachpb.GetRequest{
Span: roachpb.Span{Key: key},
})
if err := ba.SetActiveTimestamp(tc.clock.Now); err != nil {
t.Fatal(err)
}
br, pErr := tc.rng.addWriteCmd(ctx, ba)
if pErr == nil {
if !cancelEarly {
// We cancelled the context while the command was already
// being processed, so the client had to wait for successful
// execution.
return
}
t.Fatalf("expected an error, but got successful response %+v", br)
}
// If we cancelled the context early enough, we expect to receive a
// corresponding error and not wait for the command.
if !testutils.IsPError(pErr, context.Canceled.Error()) {
t.Fatalf("unexpected error: %s", pErr)
}
}()
}
}
// verify the checksum for the range and return it.
func verifyChecksum(t *testing.T, rng *Replica) []byte {
ctx := context.Background()
id := uuid.MakeV4()
rng.computeChecksumTrigger(ctx, roachpb.ComputeChecksumRequest{
ChecksumID: id,
Version: replicaChecksumVersion,
})
c, ok := rng.getChecksum(ctx, id)
if !ok {
t.Fatalf("checksum for id = %v not found", id)
}
if c.checksum == nil {
t.Fatal("couldn't compute checksum")
}
rng.verifyChecksumTrigger(
ctx,
roachpb.VerifyChecksumRequest{
ChecksumID: id,
Version: replicaChecksumVersion,
Checksum: c.checksum,
})
return c.checksum
}
// TODO(tschottdorf): this test is really frail and unidiomatic. Consider
// some better high-level check of this functionality.
func TestComputeVerifyChecksum(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
rng := tc.rng
key := roachpb.Key("a")
{
incArgs := incrementArgs(key, 23)
if _, err := tc.SendWrapped(&incArgs); err != nil {
t.Fatal(err)
}
}
// We use this helper below to gauge whether another Raft command possibly
// snuck in (in which case we recompute). We can't use the in-memory state
// because it's not updated atomically with the batch and because this
// test doesn't respect Raft ordering.
getAppliedIndex := func() uint64 {
rng.mu.Lock()
defer rng.mu.Unlock()
appliedIndex, _, err := loadAppliedIndex(context.Background(), rng.store.Engine(), rng.RangeID)
if err != nil {
t.Fatal(err)
}
return appliedIndex
}
// The following part of the test is inherently racy if other Raft commands
// get processed (which could happen due to reproposals). The loop makes
// sure that we catch this.
util.SucceedsSoon(t, func() error {
oldAppliedIndex := getAppliedIndex()
initialChecksum := verifyChecksum(t, rng)
// Getting a value will not affect the snapshot checksum.
gArgs := getArgs(roachpb.Key("a"))
if _, err := tc.SendWrapped(&gArgs); err != nil {
t.Fatal(err)
}
checksum := verifyChecksum(t, rng)
appliedIndex := getAppliedIndex()
if appliedIndex != oldAppliedIndex {
return errors.Errorf("applied index changed from %d to %d",
oldAppliedIndex, appliedIndex)
}
if !bytes.Equal(initialChecksum, checksum) {
t.Fatalf("changed checksum: e = %v, c = %v", initialChecksum, checksum)
}
return nil
})
util.SucceedsSoon(t, func() error {
oldAppliedIndex := getAppliedIndex()
initialChecksum := verifyChecksum(t, rng)
// Modifying the range will change the checksum.
incArgs := incrementArgs(key, 5)
if _, err := tc.SendWrapped(&incArgs); err != nil {
t.Fatal(err)
}
checksum := verifyChecksum(t, rng)
appliedIndex := getAppliedIndex()
if diff := appliedIndex - oldAppliedIndex; diff != 1 {
return errors.Errorf("applied index changed by %d, from %d to %d",
diff, oldAppliedIndex, appliedIndex)
}
if bytes.Equal(initialChecksum, checksum) {
t.Fatalf("same checksum: e = %v, c = %v", initialChecksum, checksum)
}
return nil
})
// Verify that a bad version/checksum sent will result in an error.
id1 := uuid.MakeV4()
rng.computeChecksumTrigger(
context.Background(),
roachpb.ComputeChecksumRequest{
ChecksumID: id1,
Version: replicaChecksumVersion,
})
// Set a callback for checksum mismatch panics.
badChecksumChan := make(chan []ReplicaSnapshotDiff, 1)
rng.store.ctx.TestingKnobs.BadChecksumPanic = func(diff []ReplicaSnapshotDiff) {
badChecksumChan <- diff
}
// First test that sending a Verification request with a bad version and
// bad checksum will return without panicking because of a bad checksum.
rng.verifyChecksumTrigger(
context.Background(),
roachpb.VerifyChecksumRequest{
ChecksumID: id1,
Version: 10000001,
Checksum: []byte("bad checksum"),
})
select {
case badChecksum := <-badChecksumChan:
t.Fatalf("bad checksum: %v", badChecksum)
default:
}
// Setting the correct version will verify the checksum see a
// checksum mismatch and trigger a rerun of the consistency check,
// but the second consistency check will succeed because the checksum
// provided in the second consistency check is the correct one.
rng.verifyChecksumTrigger(
context.Background(),
roachpb.VerifyChecksumRequest{
ChecksumID: id1,
Version: replicaChecksumVersion,
Checksum: []byte("bad checksum"),
})
select {
case badChecksum := <-badChecksumChan:
t.Fatalf("bad checksum: %v", badChecksum)
default:
}
// Repeat the same but provide a snapshot this time. This will
// result in the checksum failure not running the second consistency
// check; it will panic.
rng.verifyChecksumTrigger(
context.Background(),
roachpb.VerifyChecksumRequest{
ChecksumID: id1,
Version: replicaChecksumVersion,
Checksum: []byte("bad checksum"),
Snapshot: &roachpb.RaftSnapshotData{},
})
select {
case <-badChecksumChan:
default:
t.Fatal("expected bad checksum, but did not get one")
}
id2 := uuid.MakeV4()
// Sending a ComputeChecksum with a bad version doesn't result in a
// computed checksum.
if _, _, err := rng.ComputeChecksum(
context.Background(),
nil,
nil,
roachpb.Header{},
roachpb.ComputeChecksumRequest{
ChecksumID: id2,
Version: 23343434,
},
); err != nil {
t.Fatal(err)
}
// Sending a VerifyChecksum with a bad checksum is a noop.
rng.verifyChecksumTrigger(
context.Background(),
roachpb.VerifyChecksumRequest{
ChecksumID: id2,
Version: replicaChecksumVersion,
Checksum: []byte("bad checksum"),
Snapshot: &roachpb.RaftSnapshotData{},
})
select {
case badChecksum := <-badChecksumChan:
t.Fatalf("bad checksum: %v", badChecksum)
default:
}
}
func TestNewReplicaCorruptionError(t *testing.T) {
defer leaktest.AfterTest(t)()
for i, tc := range []struct {
errStruct *roachpb.ReplicaCorruptionError
expErr string
}{
{NewReplicaCorruptionError(errors.New("")), "replica corruption (processed=false)"},
{NewReplicaCorruptionError(errors.New("foo")), "replica corruption (processed=false): foo"},
{NewReplicaCorruptionError(errors.Wrap(errors.New("bar"), "foo")), "replica corruption (processed=false): foo: bar"},
} {
// This uses fmt.Sprint because that ends up calling Error() and is the
// intended use. A previous version of this test called String() directly
// which called the wrong (reflection-based) implementation.
if errStr := fmt.Sprint(tc.errStruct); errStr != tc.expErr {
t.Errorf("%d: expected '%s' but got '%s'", i, tc.expErr, errStr)
}
}
}
func TestDiffRange(t *testing.T) {
defer leaktest.AfterTest(t)()
if diff := diffRange(nil, nil); diff != nil {
t.Fatalf("diff of nils = %v", diff)
}
timestamp := hlc.Timestamp{WallTime: 1729, Logical: 1}
value := []byte("foo")
// Construct the two snapshots.
leaderSnapshot := &roachpb.RaftSnapshotData{
KV: []roachpb.RaftSnapshotData_KeyValue{
{Key: []byte("a"), Timestamp: timestamp, Value: value},
{Key: []byte("abc"), Timestamp: timestamp, Value: value},
{Key: []byte("abcd"), Timestamp: timestamp, Value: value},
{Key: []byte("abcde"), Timestamp: timestamp, Value: value},
{Key: []byte("abcdefg"), Timestamp: timestamp, Value: value},
{Key: []byte("abcdefg"), Timestamp: timestamp.Add(0, -1), Value: value},
{Key: []byte("abcdefgh"), Timestamp: timestamp, Value: value},
{Key: []byte("x"), Timestamp: timestamp, Value: value},
{Key: []byte("y"), Timestamp: timestamp, Value: value},
},
}
// No diff works.
if diff := diffRange(leaderSnapshot, leaderSnapshot); diff != nil {
t.Fatalf("diff of similar snapshots = %v", diff)
}
replicaSnapshot := &roachpb.RaftSnapshotData{
KV: []roachpb.RaftSnapshotData_KeyValue{
{Key: []byte("ab"), Timestamp: timestamp, Value: value},
{Key: []byte("abc"), Timestamp: timestamp, Value: value},
{Key: []byte("abcde"), Timestamp: timestamp, Value: value},
{Key: []byte("abcdef"), Timestamp: timestamp, Value: value},
{Key: []byte("abcdefg"), Timestamp: timestamp.Add(0, 1), Value: value},
{Key: []byte("abcdefg"), Timestamp: timestamp, Value: value},
{Key: []byte("abcdefgh"), Timestamp: timestamp, Value: value},
{Key: []byte("x"), Timestamp: timestamp, Value: []byte("bar")},
{Key: []byte("z"), Timestamp: timestamp, Value: value},
},
}
// The expected diff.
eDiff := []ReplicaSnapshotDiff{
{LeaseHolder: true, Key: []byte("a"), Timestamp: timestamp, Value: value},
{LeaseHolder: false, Key: []byte("ab"), Timestamp: timestamp, Value: value},
{LeaseHolder: true, Key: []byte("abcd"), Timestamp: timestamp, Value: value},
{LeaseHolder: false, Key: []byte("abcdef"), Timestamp: timestamp, Value: value},
{LeaseHolder: false, Key: []byte("abcdefg"), Timestamp: timestamp.Add(0, 1), Value: value},
{LeaseHolder: true, Key: []byte("abcdefg"), Timestamp: timestamp.Add(0, -1), Value: value},
{LeaseHolder: true, Key: []byte("x"), Timestamp: timestamp, Value: value},
{LeaseHolder: false, Key: []byte("x"), Timestamp: timestamp, Value: []byte("bar")},
{LeaseHolder: true, Key: []byte("y"), Timestamp: timestamp, Value: value},
{LeaseHolder: false, Key: []byte("z"), Timestamp: timestamp, Value: value},
}
diff := diffRange(leaderSnapshot, replicaSnapshot)
if diff == nil {
t.Fatalf("differing snapshots didn't reveal diff %v", diff)
}
if len(eDiff) != len(diff) {
t.Fatalf("expected diff length different from diff (%d vs %d) , %v vs %v", len(eDiff), len(diff), eDiff, diff)
}
for i, e := range eDiff {
v := diff[i]
if e.LeaseHolder != v.LeaseHolder || !bytes.Equal(e.Key, v.Key) || !e.Timestamp.Equal(v.Timestamp) || !bytes.Equal(e.Value, v.Value) {
t.Fatalf("diff varies at row %d, %v vs %v", i, e, v)
}
}
}
func TestAsyncSnapshot(t *testing.T) {
defer leaktest.AfterTest(t)()
tsc := TestStoreContext()
tsc.BlockingSnapshotDuration = 0
tc := testContext{}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
// Lock the replica manually instead of going through GetSnapshot()
// because we want to test the underlying async functionality.
tc.rng.mu.Lock()
_, err := tc.rng.Snapshot()
tc.rng.mu.Unlock()
// In async operation, the first call never succeeds.
if err != raft.ErrSnapshotTemporarilyUnavailable {
t.Fatalf("expected ErrSnapshotTemporarilyUnavailable, got %s", err)
}
// It will eventually succeed.
util.SucceedsSoon(t, func() error {
tc.rng.mu.Lock()
snap, err := tc.rng.Snapshot()
tc.rng.mu.Unlock()
if err != nil {
return err
}
if len(snap.Data) == 0 {
return errors.Errorf("snapshot is empty")
}
return nil
})
}
func TestAsyncSnapshotMaxAge(t *testing.T) {
defer leaktest.AfterTest(t)()
tsc := TestStoreContext()
tsc.BlockingSnapshotDuration = 0
tsc.AsyncSnapshotMaxAge = time.Millisecond
tc := testContext{}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
// Lock the replica manually instead of going through GetSnapshot()
// because we want to test the underlying async functionality.
tc.rng.mu.Lock()
_, err := tc.rng.Snapshot()
tc.rng.mu.Unlock()
// In async operation, the first call never succeeds.
if err != raft.ErrSnapshotTemporarilyUnavailable {
t.Fatalf("expected ErrSnapshotTemporarilyUnavailable, got %s", err)
}
// Wait for the snapshot to be generated and abandoned.
time.Sleep(100 * time.Millisecond)
tc.rng.mu.Lock()
defer tc.rng.mu.Unlock()
// The channel was closed without producing a result.
snap, ok := <-tc.rng.mu.snapshotChan
if ok {
t.Fatalf("expected channel to be closed but got result: %v", snap)
}
}
func TestSyncSnapshot(t *testing.T) {
defer leaktest.AfterTest(t)()
tsc := TestStoreContext()
tsc.BlockingSnapshotDuration = time.Second
tc := testContext{}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
// With enough time in BlockingSnapshotDuration, we succeed on the
// first try.
tc.rng.mu.Lock()
snap, err := tc.rng.Snapshot()
tc.rng.mu.Unlock()
if err != nil {
t.Fatal(err)
}
if len(snap.Data) == 0 {
t.Fatal("snapshot is empty")
}
}
// TestReplicaIDChangePending verifies that on a replica ID change, pending
// commands are re-proposed on the new raft group.
func TestReplicaIDChangePending(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
rng := tc.rng
// Stop the command from being proposed to the raft group and being removed.
rng.mu.Lock()
rng.mu.proposeRaftCommandFn = func(p *pendingCmd) error { return nil }
rng.mu.Unlock()
// Add a command to the pending list.
magicTS := tc.clock.Now()
ba := roachpb.BatchRequest{}
ba.Timestamp = magicTS
ba.Add(&roachpb.GetRequest{
Span: roachpb.Span{
Key: roachpb.Key("a"),
},
})
_, _, err := rng.proposeRaftCommand(context.Background(), ba)
if err != nil {
t.Fatal(err)
}
// Set the raft command handler so we can tell if the command has been
// re-proposed.
commandProposed := make(chan struct{}, 1)
rng.mu.Lock()
defer rng.mu.Unlock()
rng.mu.proposeRaftCommandFn = func(p *pendingCmd) error {
if p.raftCmd.Cmd.Timestamp.Equal(magicTS) {
commandProposed <- struct{}{}
}
return nil
}
// Set the ReplicaID on the replica.
if err := rng.setReplicaIDLocked(2); err != nil {
t.Fatal(err)
}
select {
case <-commandProposed:
default:
t.Fatal("command was not re-proposed")
}
}
// runWrongIndexTest runs a reproposal or refurbishment test, optionally
// simulating an error during the renewal of the command. If repropose is
// false, refurbishes instead.
// If withErr is true, injects an error when the reproposal or refurbishment
// takes place.
func runWrongIndexTest(t *testing.T, repropose bool, withErr bool, expProposals int32) {
var tc testContext
tc.Start(t)
defer tc.Stop()
prefix := fmt.Sprintf("repropose=%t withErr=%t: ", repropose, withErr)
fatalf := func(msg string, args ...interface{}) {
t.Fatal(errors.Errorf(prefix+msg, args...))
}
type magicKey struct{}
var c int32 // updated atomically
tc.rng.mu.Lock()
tc.rng.mu.proposeRaftCommandFn = func(cmd *pendingCmd) error {
if v := cmd.ctx.Value(magicKey{}); v != nil {
curAttempt := atomic.AddInt32(&c, 1)
if (repropose || curAttempt == 2) && withErr {
return errors.New("boom")
}
}
return defaultProposeRaftCommandLocked(tc.rng, cmd)
}
tc.rng.mu.Unlock()
if pErr := tc.rng.redirectOnOrAcquireLease(context.Background()); pErr != nil {
fatalf("%s", pErr)
}
pArg := putArgs(roachpb.Key("a"), []byte("asd"))
{
var ba roachpb.BatchRequest
ba.Add(&pArg)
ba.Timestamp = tc.clock.Now()
if _, pErr := tc.Sender().Send(context.Background(), ba); pErr != nil {
t.Fatal(pErr)
}
}
tc.rng.mu.Lock()
ai := tc.rng.mu.state.LeaseAppliedIndex
tc.rng.mu.Unlock()
if ai < 1 {
t.Fatal("committed a batch, but still at lease index zero")
}
wrongIndex := ai - 1 // will chose this as MaxLeaseIndex
log.Infof(context.Background(), "test begins")
var ba roachpb.BatchRequest
ba.Timestamp = tc.clock.Now()
ba.Add(&pArg)
repDesc, err := tc.rng.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
ch := func() chan roachpb.ResponseWithError {
tc.rng.mu.Lock()
defer tc.rng.mu.Unlock()
// Make a new command, but pretend it didn't increment the assignment
// counter. This leaks some implementation, but not too much.
preAssigned := tc.rng.mu.lastAssignedLeaseIndex
cmd := tc.rng.prepareRaftCommandLocked(
context.WithValue(context.Background(), magicKey{}, "foo"),
makeIDKey(), repDesc, ba)
cmd.raftCmd.MaxLeaseIndex = preAssigned
tc.rng.mu.lastAssignedLeaseIndex = preAssigned
if err != nil {
fatalf("%s", err)
}
cmd.raftCmd.MaxLeaseIndex = wrongIndex
tc.rng.insertRaftCommandLocked(cmd)
if repropose {
if err := tc.rng.refreshPendingCmdsLocked(noReason, 0); err != nil {
fatalf("%s", err)
}
} else if err := tc.rng.proposePendingCmdLocked(cmd); err != nil {
fatalf("%s", err)
}
return cmd.done
}()
var errStr string
if repropose {
errStr = "boom"
} else {
errStr = "observed at lease index"
}
if rwe := <-ch; rwe.Err != nil != withErr ||
(withErr && !testutils.IsPError(rwe.Err, errStr)) {
fatalf("%s", rwe.Err)
}
if n := atomic.LoadInt32(&c); n != expProposals {
fatalf("expected %d proposals, got %d", expProposals, n)
}
}
// Making the test more fun for human eyes.
const (
propose = false
repropose = true
noErr = false
withErr = true
)
func TestReplicaRefurbishOnWrongIndex_ReproposeNoError(t *testing.T) {
defer leaktest.AfterTest(t)()
// Set up a command at wrong index, but don't propose it but
// immediately call the repropose logic, which should refurbish it.
runWrongIndexTest(t, repropose, noErr, 1)
}
func TestReplicaRefurbishOnWrongIndex_ReproposeError(t *testing.T) {
defer leaktest.AfterTest(t)()
// Like its NoError variant, but the reproposal errors out and is
// received by the client.
runWrongIndexTest(t, repropose, withErr, 1)
}
func TestReplicaRefurbishOnWrongIndex_ProposeNoError(t *testing.T) {
defer leaktest.AfterTest(t)()
// Propose a command at a past index and let the application of the command
// refurbish it successfully.
runWrongIndexTest(t, propose, noErr, 2)
}
func TestReplicaRefurbishOnWrongIndex_ProposeError(t *testing.T) {
defer leaktest.AfterTest(t)()
// Propose a command at a past index and let the application of the command
// refurbish it. Refurbishing fails; asserts that the client receives
// the error.
runWrongIndexTest(t, propose, withErr, 2)
}
// TestReplicaCancelRaftCommandProgress creates a number of Raft commands and
// immediately abandons some of them, while proposing the remaining ones. It
// then verifies that all the non-abandoned commands get applied (which would
// not be the case if gaps in the applied index posed an issue).
func TestReplicaCancelRaftCommandProgress(t *testing.T) {
defer leaktest.AfterTest(t)()
var tc testContext
tc.Start(t)
defer tc.Stop()
rng := tc.rng
repDesc, err := rng.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
const num = 10
var chs []chan roachpb.ResponseWithError
func() {
rng.mu.Lock()
defer rng.mu.Unlock()
for i := 0; i < num; i++ {
var ba roachpb.BatchRequest
ba.Timestamp = tc.clock.Now()
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{
Key: roachpb.Key(fmt.Sprintf("k%d", i))}})
cmd := rng.prepareRaftCommandLocked(context.Background(), makeIDKey(), repDesc, ba)
rng.insertRaftCommandLocked(cmd)
// We actually propose the command only if we don't
// cancel it to simulate the case in which Raft loses
// the command and it isn't reproposed due to the
// client abandoning it.
if rand.Intn(2) == 0 {
log.Infof(context.Background(), "abandoning command %d", i)
delete(rng.mu.pendingCmds, cmd.idKey)
} else if err := rng.proposePendingCmdLocked(cmd); err != nil {
t.Fatal(err)
} else {
chs = append(chs, cmd.done)
}
}
}()
for _, ch := range chs {
if rwe := <-ch; rwe.Err != nil {
t.Fatal(rwe.Err)
}
}
}
// TestReplicaBurstPendingCommandsAndRepropose verifies that a burst of
// proposed commands assigns a correct sequence of required indexes,
// and then goes and checks that a reproposal (without prior proposal) results
// in these commands applying at the computed indexes.
func TestReplicaBurstPendingCommandsAndRepropose(t *testing.T) {
defer leaktest.AfterTest(t)()
t.Skip("TODO(bdarnell): https://github.com/cockroachdb/cockroach/issues/8422")
var tc testContext
tc.Start(t)
defer tc.Stop()
const num = 10
repDesc, err := tc.rng.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
type magicKey struct{}
var seenCmds []int
tc.rng.mu.Lock()
tc.rng.mu.proposeRaftCommandFn = func(cmd *pendingCmd) error {
if v := cmd.ctx.Value(magicKey{}); v != nil {
seenCmds = append(seenCmds, int(cmd.raftCmd.MaxLeaseIndex))
}
return defaultProposeRaftCommandLocked(tc.rng, cmd)
}
tc.rng.mu.Unlock()
if pErr := tc.rng.redirectOnOrAcquireLease(context.Background()); pErr != nil {
t.Fatal(pErr)
}
expIndexes := make([]int, 0, num)
chs := func() []chan roachpb.ResponseWithError {
tc.rng.mu.Lock()
defer tc.rng.mu.Unlock()
chs := make([]chan roachpb.ResponseWithError, 0, num)
origIndexes := make([]int, 0, num)
for i := 0; i < num; i++ {
expIndexes = append(expIndexes, i+1)
ctx := context.WithValue(context.Background(), magicKey{}, "foo")
var ba roachpb.BatchRequest
ba.Timestamp = tc.clock.Now()
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{
Key: roachpb.Key(fmt.Sprintf("k%d", i))}})
cmd := tc.rng.prepareRaftCommandLocked(ctx, makeIDKey(), repDesc, ba)
tc.rng.insertRaftCommandLocked(cmd)
chs = append(chs, cmd.done)
}
for _, p := range tc.rng.mu.pendingCmds {
if v := p.ctx.Value(magicKey{}); v != nil {
origIndexes = append(origIndexes, int(p.raftCmd.MaxLeaseIndex))
}
}
sort.Ints(origIndexes)
if !reflect.DeepEqual(expIndexes, origIndexes) {
t.Fatalf("wanted required indexes %v, got %v", expIndexes, origIndexes)
}
if err := tc.rng.refreshPendingCmdsLocked(noReason, 0); err != nil {
t.Fatal(err)
}
return chs
}()
for _, ch := range chs {
if pErr := (<-ch).Err; pErr != nil {
t.Fatal(pErr)
}
}
if !reflect.DeepEqual(seenCmds, expIndexes) {
t.Fatalf("expected indexes %v, got %v", expIndexes, seenCmds)
}
util.SucceedsSoon(t, func() error {
tc.rng.mu.Lock()
defer tc.rng.mu.Unlock()
nonePending := len(tc.rng.mu.pendingCmds) == 0
c := int(tc.rng.mu.lastAssignedLeaseIndex) - int(tc.rng.mu.state.LeaseAppliedIndex)
if nonePending && c > 0 {
return fmt.Errorf("no pending cmds, but have required index offset %d", c)
}
if nonePending {
return nil
}
return errors.New("still pending commands")
})
}
func TestReplicaRefreshPendingCommandsTicks(t *testing.T) {
defer leaktest.AfterTest(t)()
var tc testContext
tc.Start(t)
defer tc.Stop()
// Grab processRaftMu in order to block normal raft replica processing. This
// test is ticking the replicas manually and doesn't want the store to be
// doing so concurrently.
tc.store.processRaftMu.Lock()
defer tc.store.processRaftMu.Unlock()
r := tc.rng
repDesc, err := r.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
electionTicks := tc.store.ctx.RaftElectionTimeoutTicks
{
// The verifications of the reproposal counts below rely on r.mu.ticks
// starting with a value of 0 (modulo electionTicks). Move the replica into
// that state in case the replica was ticked before we grabbed
// processRaftMu.
r.mu.Lock()
ticks := r.mu.ticks
r.mu.Unlock()
for ; (ticks % electionTicks) != 0; ticks++ {
if err := r.tick(); err != nil {
t.Fatal(err)
}
}
}
// We tick the replica 2*RaftElectionTimeoutTicks. RaftElectionTimeoutTicks
// is special in that it controls how often pending commands are reproposed
// or refurbished.
for i := 0; i < 2*electionTicks; i++ {
// Add another pending command on each iteration.
r.mu.Lock()
id := fmt.Sprintf("%08d", i)
var ba roachpb.BatchRequest
ba.Timestamp = tc.clock.Now()
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: roachpb.Key(id)}})
cmd := r.prepareRaftCommandLocked(context.Background(),
storagebase.CmdIDKey(id), repDesc, ba)
r.insertRaftCommandLocked(cmd)
if err := r.proposePendingCmdLocked(cmd); err != nil {
t.Fatal(err)
}
// Build a map from command key to proposed-at-ticks.
m := map[storagebase.CmdIDKey]int{}
for id, p := range r.mu.pendingCmds {
m[id] = p.proposedAtTicks
}
r.mu.Unlock()
// Tick raft.
if err := r.tick(); err != nil {
t.Fatal(err)
}
// Gather up the reproprosed commands.
r.mu.Lock()
var reproposed []*pendingCmd
for id, p := range r.mu.pendingCmds {
if m[id] != p.proposedAtTicks {
reproposed = append(reproposed, p)
}
}
ticks := r.mu.ticks
r.mu.Unlock()
// Reproposals are only performed every electionTicks. We'll need to fix
// this test if that changes.
if (ticks % electionTicks) == 0 {
if len(reproposed) != i-1 {
t.Fatalf("%d: expected %d reproprosed commands, but found %+v", i, i-1, reproposed)
}
} else {
if len(reproposed) != 0 {
t.Fatalf("%d: expected no reproprosed commands, but found %+v", i, reproposed)
}
}
}
}
// TestReplicaDoubleRefurbish exercises a code path in which a command is seen
// fit for refurbishment, but has already been refurbished earlier (with that
// command being in-flight). See #7185.
func TestReplicaDoubleRefurbish(t *testing.T) {
defer leaktest.AfterTest(t)()
var tc testContext
tc.Start(t)
defer tc.Stop()
var ba roachpb.BatchRequest
ba.Timestamp = tc.clock.Now()
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: roachpb.Key("r")}})
repDesc, err := tc.rng.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
// Make a Raft command; we'll set things up so that it will be considered
// for refurbishment multiple times.
tc.rng.mu.Lock()
cmd := tc.rng.prepareRaftCommandLocked(context.Background(), makeIDKey(), repDesc, ba)
ch := cmd.done // must not use cmd outside of mutex
tc.rng.mu.Unlock()
{
// Send some random request to advance the lease applied counter to
// make `cmd` refurbish when we put it into Raft.
var ba roachpb.BatchRequest
ba.Timestamp = tc.clock.Now()
pArgs := putArgs(roachpb.Key("foo"), []byte("bar"))
ba.Add(&pArgs)
if _, pErr := tc.rng.Send(context.Background(), ba); pErr != nil {
t.Fatal(pErr)
}
}
const num = 10
func() {
tc.rng.mu.Lock()
defer tc.rng.mu.Unlock()
// Insert the command and propose it ten times. Before the commit
// which introduced this test, the first application would repropose,
// and the second would decide to not repropose, but accidentally send
// the error to the client, so that the successful refurbishment would
// be the second result received by the client.
tc.rng.insertRaftCommandLocked(cmd)
for i := 0; i < num; i++ {
if err := tc.rng.proposePendingCmdLocked(cmd); err != nil {
t.Fatal(err)
}
}
}()
var i int
for resp := range ch {
i++
if i != 1 {
t.Fatalf("received more than one response on the done channel: %+v", resp)
}
}
}
// TestCommandTimeThreshold verifies that commands outside the replica GC
// threshold fail.
func TestCommandTimeThreshold(t *testing.T) {
defer leaktest.AfterTest(t)()
tc := testContext{}
tc.Start(t)
defer tc.Stop()
ts := makeTS(1, 0)
ts2 := makeTS(2, 0)
ts3 := makeTS(3, 0)
key := roachpb.Key("a")
keycp := roachpb.Key("c")
va := []byte("a")
vb := []byte("b")
// Verify a Get works.
gArgs := getArgs(key)
if _, err := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts,
}, &gArgs); err != nil {
t.Fatalf("could not get data: %s", err)
}
// Verify a later Get works.
if _, err := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts3,
}, &gArgs); err != nil {
t.Fatalf("could not get data: %s", err)
}
// Put some data for use with CP later on.
pArgs := putArgs(keycp, va)
if _, err := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts,
}, &pArgs); err != nil {
t.Fatalf("could not put data: %s", err)
}
// Do a GC.
gcr := roachpb.GCRequest{
Threshold: ts2,
}
if _, err := tc.SendWrapped(&gcr); err != nil {
t.Fatal(err)
}
// Do the same Get, which should now fail.
if _, err := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts,
}, &gArgs); err == nil {
t.Fatal("expected failure")
} else if err.String() != "batch timestamp 0.000000001,0 must be after replica GC threshold 0.000000002,0" {
t.Fatalf("unexpected error: %s", err)
}
// Verify a later Get works.
if _, err := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts3,
}, &gArgs); err != nil {
t.Fatalf("could not get data: %s", err)
}
// Verify an early CPut fails.
cpArgs := cPutArgs(keycp, vb, va)
if _, err := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts2,
}, &cpArgs); err == nil {
t.Fatal("expected failure")
} else if err.String() != "batch timestamp 0.000000002,0 must be after replica GC threshold 0.000000002,0" {
t.Fatalf("unexpected error: %s", err)
}
// Verify a later CPut works.
if _, err := tc.SendWrappedWith(roachpb.Header{
Timestamp: ts3,
}, &cpArgs); err != nil {
t.Fatalf("could not cput data: %s", err)
}
}
// TestReserveAndApplySnapshot checks to see if a snapshot is correctly applied
// and that its reservation is removed.
func TestReserveAndApplySnapshot(t *testing.T) {
defer leaktest.AfterTest(t)()
tsc := TestStoreContext()
tc := testContext{}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
checkReservations := func(t *testing.T, expected int) {
tc.store.bookie.mu.Lock()
defer tc.store.bookie.mu.Unlock()
if e, a := expected, len(tc.store.bookie.mu.reservationsByRangeID); e != a {
t.Fatalf("wrong number of reservations - expected:%d, actual:%d", e, a)
}
}
key := roachpb.RKey("a")
firstRng := tc.store.LookupReplica(key, nil)
snap, err := firstRng.GetSnapshot(context.Background())
if err != nil {
t.Fatal(err)
}
tc.store.metrics.Available.Update(tc.store.bookie.maxReservedBytes)
// Note that this is an artificial scenario in which we're adding a
// reservation for a replica that is already on the range. This test is
// designed to test the filling of the reservation specifically and in
// normal operation there should not be a reservation for an existing
// replica.
req := ReservationRequest{
StoreRequestHeader: StoreRequestHeader{
StoreID: tc.store.StoreID(),
NodeID: tc.store.nodeDesc.NodeID,
},
RangeID: firstRng.RangeID,
RangeSize: 10,
}
if !tc.store.Reserve(context.Background(), req).Reserved {
t.Fatalf("Can't reserve the replica")
}
checkReservations(t, 1)
// Apply a snapshot and check the reservation was filled. Note that this
// out-of-band application could be a root cause if this test ever crashes.
if err := firstRng.applySnapshot(context.Background(), snap, raftpb.HardState{}); err != nil {
t.Fatal(err)
}
checkReservations(t, 0)
}
|
package main
import (
"fmt"
"time"
"strconv"
"github.com/JokerQyou/rpi"
"github.com/JokerQyou/rpi/pcd8544"
"github.com/kidoman/embd"
"github.com/kidoman/embd/sensor/bmp085"
_ "github.com/kidoman/embd/host/rpi"
)
const (
SCLK = 0
DIN = 1
DC = 2
CS = 3
RST = 4
CONTRAST = 60
)
func init() {
rpi.WiringPiSetup()
pcd8544.LCDInit(SCLK, DIN, DC, CS, RST, CONTRAST)
pcd8544.LCDclear()
pcd8544.LCDdisplay()
}
func gpio_cleanup() {
pcd8544.LCDclear()
pcd8544.LCDdisplay()
rpi.PinMode(SCLK, rpi.INPUT)
rpi.PinMode(DIN, rpi.INPUT)
rpi.PinMode(DC, rpi.INPUT)
rpi.PinMode(CS, rpi.INPUT)
rpi.PinMode(RST, rpi.INPUT)
}
func get_time() (string, string) {
t := time.Now()
return t.Format("15:04:05"), t.Format("01-02 Mon")
}
func main() {
if err := embd.InitI2C(); err != nil {
panic(err)
}
defer embd.CloseI2C()
bus := embd.NewI2CBus(1)
baro := bmp085.New(bus)
defer baro.Close()
keep_running := true
for keep_running {
// Get temperature
temp, err := baro.Temperature()
if err != nil {
gpio_cleanup()
panic(err)
}
temp_str := fmt.Sprint(strconv.FormatFloat(temp, 'f', 2, 64), "°C")
pcd8544.LCDdrawrect(6 - 1, 6 - 1, pcd8544.LCDWIDTH - 6, pcd8544.LCDHEIGHT - 6, pcd8544.BLACK)
time_str, date_str := get_time()
pcd8544.LCDdrawstring(20, 12, time_str)
pcd8544.LCDdrawstring(18, 24, date_str)
pcd8544.LCDdrawstring(20, 36, temp_str)
pcd8544.LCDdisplay()
// wait for 1 sec
time.Sleep(time.Second)
}
}
Temporarily opt-out degree symbol
package main
import (
"fmt"
"time"
"strconv"
"github.com/JokerQyou/rpi"
"github.com/JokerQyou/rpi/pcd8544"
"github.com/kidoman/embd"
"github.com/kidoman/embd/sensor/bmp085"
_ "github.com/kidoman/embd/host/rpi"
)
const (
SCLK = 0
DIN = 1
DC = 2
CS = 3
RST = 4
CONTRAST = 60
)
func init() {
rpi.WiringPiSetup()
pcd8544.LCDInit(SCLK, DIN, DC, CS, RST, CONTRAST)
pcd8544.LCDclear()
pcd8544.LCDdisplay()
}
func gpio_cleanup() {
pcd8544.LCDclear()
pcd8544.LCDdisplay()
rpi.PinMode(SCLK, rpi.INPUT)
rpi.PinMode(DIN, rpi.INPUT)
rpi.PinMode(DC, rpi.INPUT)
rpi.PinMode(CS, rpi.INPUT)
rpi.PinMode(RST, rpi.INPUT)
}
func get_time() (string, string) {
t := time.Now()
return t.Format("15:04:05"), t.Format("01-02 Mon")
}
func main() {
if err := embd.InitI2C(); err != nil {
panic(err)
}
defer embd.CloseI2C()
bus := embd.NewI2CBus(1)
baro := bmp085.New(bus)
defer baro.Close()
keep_running := true
for keep_running {
// Get temperature
temp, err := baro.Temperature()
if err != nil {
gpio_cleanup()
panic(err)
}
temp_str := fmt.Sprint(strconv.FormatFloat(temp, 'f', 2, 64), " C")
pcd8544.LCDdrawrect(6 - 1, 6 - 1, pcd8544.LCDWIDTH - 6, pcd8544.LCDHEIGHT - 6, pcd8544.BLACK)
time_str, date_str := get_time()
pcd8544.LCDdrawstring(20, 12, time_str)
pcd8544.LCDdrawstring(18, 24, date_str)
pcd8544.LCDdrawstring(20, 36, temp_str)
pcd8544.LCDdisplay()
// wait for 1 sec
time.Sleep(time.Second)
}
}
|
package commander
import (
"errors"
"flag"
"fmt"
"os"
"strings"
)
var ErrUnrecognizedCommand = errors.New("No command executed")
// Command wraps together the short command name, the description
// for a command, the commands Flags and the function that will handle
// the command.
type Command struct {
Command string
Description string
FlagSet *flag.FlagSet
F func(args []string) error
}
// NewCommand creates a new comandeer Command struct with the given parameters.
func NewCommand(cmd, description string, flagset *flag.FlagSet, f func(args []string) error) *Command {
return &Command{ cmd, description, flagset, f }
}
// CommandFunction returns a command
type CommandFunction func() *Command
// Execute takes an args array, and executes the appropriate command from the
// array of commandFunctions. If nil is passed as the args array, os.Args is used
// by default.
func Execute(args []string, commandFns ...CommandFunction) error {
if nil==args {
args = os.Args[1:]
}
commands := make(map[string]*Command, len(commandFns))
for _, c := range commandFns {
cmd := c()
commands[strings.ToLower(cmd.Command)] = cmd
}
if 0==len(args) || strings.ToLower(args[0])=="help" {
if 1<len(args) {
for _, c := range args[1:] {
cmd, ok := commands[strings.ToLower(c)]
if !ok {
fmt.Println("Unrecognized sub-command: ", cmd)
continue
}
if nil!=cmd.FlagSet {
cmd.FlagSet.PrintDefaults()
} else {
fmt.Printf("%s takes no arguments: %s", cmd.Command, cmd.Description)
}
}
return nil
}
fmt.Println(`Commands are:`)
for _, c := range commands {
fmt.Printf("%s\t\t%s\n", c.Command, c.Description)
}
return nil
}
c, ok := commands[strings.ToLower(args[0])]
if !ok {
return ErrUnrecognizedCommand
}
args = args[1:]
if nil!=c.FlagSet {
c.FlagSet.Parse(args)
}
return c.F(args)
}
MightExecute handles no command execution
package commander
import (
"errors"
"flag"
"fmt"
"os"
"strings"
)
var ErrUnrecognizedCommand = errors.New("No command executed")
// Command wraps together the short command name, the description
// for a command, the commands Flags and the function that will handle
// the command.
type Command struct {
Command string
Description string
FlagSet *flag.FlagSet
F func(args []string) error
}
// NewCommand creates a new comandeer Command struct with the given parameters.
func NewCommand(cmd, description string, flagset *flag.FlagSet, f func(args []string) error) *Command {
return &Command{ cmd, description, flagset, f }
}
// CommandFunction returns a command
type CommandFunction func() *Command
// Might returns an error only if it was generated in the processing of a command.
// If no command was recognized, it returns nil.
// This is useful in a situation where you might execute a command, but don't mind if no command
// is executed, but want to catch an error if a command fails. This is coded as:
//
// if err=commander.MightExecute(...); nil!=err {
// panic(err)
// }
func MightExecute(args []string, commandFns ...CommandFunction) error {
err := Execute(args, commandFns...)
if nil==e || e==ErrUnrecognizedCommand {
return nil
}
return err
}
// Execute takes an args array, and executes the appropriate command from the
// array of commandFunctions. If nil is passed as the args array, os.Args is used
// by default.
func Execute(args []string, commandFns ...CommandFunction) error {
if nil==args {
args = os.Args[1:]
}
commands := make(map[string]*Command, len(commandFns))
for _, c := range commandFns {
cmd := c()
commands[strings.ToLower(cmd.Command)] = cmd
}
if 0==len(args) || strings.ToLower(args[0])=="help" {
if 1<len(args) {
for _, c := range args[1:] {
cmd, ok := commands[strings.ToLower(c)]
if !ok {
fmt.Println("Unrecognized sub-command: ", cmd)
continue
}
if nil!=cmd.FlagSet {
cmd.FlagSet.PrintDefaults()
} else {
fmt.Printf("%s takes no arguments: %s", cmd.Command, cmd.Description)
}
}
return nil
}
fmt.Println(`Commands are:`)
for _, c := range commands {
fmt.Printf("%s\t\t%s\n", c.Command, c.Description)
}
return nil
}
c, ok := commands[strings.ToLower(args[0])]
if !ok {
return ErrUnrecognizedCommand
}
args = args[1:]
if nil!=c.FlagSet {
c.FlagSet.Parse(args)
}
return c.F(args)
}
|
package stripe
import (
"net/url"
"strconv"
)
type InvoiceItem struct {
Id string `json:"id"`
Object string `json:"object"`
Livemode bool `json:"livemode"`
Amount int64 `json:"amount"`
Currency string `json:"currency"`
Customer string `json:"customer"`
Date int64 `json:"date"`
Proration bool `json:"proration"`
Description string `json:"description"`
Invoice string `json:"invoice"`
Metadata Metadata `json:"metadata"`
}
type InvoiceItemListResponse struct {
Object string `json:"object"`
Url string `json:"url"`
Count int `json:"count"`
Data []*InvoiceItem `json:"data"`
}
type InvoiceItemClient struct{}
// Create creates an invoice item.
//
// For more information: https://stripe.com/docs/api#create_invoice_item
func (c *InvoiceItemClient) Create(params *InvoiceItemParams) (*InvoiceItem, error) {
item := InvoiceItem{}
values := url.Values{}
parseInvoiceItemParams(params, &values)
err := post("/invoiceitems", values, &item)
return &item, err
}
// Retrieve loads an invoice item.
//
// For more information: https://stripe.com/docs/api#retrieve_invoice_item
func (c *InvoiceItemClient) Retrieve(id string) (*InvoiceItem, error) {
item := InvoiceItem{}
err := get("/invoiceitems/"+id, nil, &item)
return &item, err
}
// Update updates an invoice item.
//
// For more information: https://stripe.com/docs/api#update_invoice_item
func (c *InvoiceItemClient) Update(id string, params *InvoiceItemParams) (*InvoiceItem, error) {
item := InvoiceItem{}
values := url.Values{}
parseInvoiceItemParams(params, &values)
err := post("/invoiceitems/"+id, values, &item)
return &item, err
}
// Delete deletes an invoice item.
//
// For more information: https://stripe.com/docs/api#delete_invoice_item
func (c *InvoiceItemClient) Delete(id string) (*DeleteResponse, error) {
response := DeleteResponse{}
err := delete("/invoiceitems/"+id, nil, &response)
return &response, err
}
// List lists the first 10 invoice items. It calls ListCount with 10 as the
// count and 0 as the offset, which are the defaults in the Stripe API.
//
// For more information: https://stripe.com/docs/api#list_invoice_items
func (c *InvoiceItemClient) List() (*InvoiceItemListResponse, error) {
return c.ListCount(10, 0)
}
// ListCount lists `count` invoice items starting at `offset`.
//
// For more information: https://stripe.com/docs/api#list_invoice_items
func (c *InvoiceItemClient) ListCount(count, offset int) (*InvoiceItemListResponse, error) {
response := InvoiceItemListResponse{}
params := url.Values{
"count": {strconv.Itoa(count)},
"offset": {strconv.Itoa(offset)},
}
err := get("/invoiceitems", params, &response)
return &response, err
}
// parseInvoiceItemParams takes a pointer to a InvoiceItemParams and a pointer
// to a url.Values. It iterates over everything in the InvoiceItemParams struct
// and Adds what is there to the url.Values.
func parseInvoiceItemParams(params *InvoiceItemParams, values *url.Values) {
// Use parseMetaData from metadata.go to setup the metadata param
if params.Metadata != nil {
parseMetadata(params.Metadata, values)
}
addParamsToValues(params, values)
}
updated invoice items from #List and #ListCount to #All and #AllWithFilters
package stripe
import (
"net/url"
)
type InvoiceItem struct {
Id string `json:"id"`
Object string `json:"object"`
Livemode bool `json:"livemode"`
Amount int64 `json:"amount"`
Currency string `json:"currency"`
Customer string `json:"customer"`
Date int64 `json:"date"`
Proration bool `json:"proration"`
Description string `json:"description"`
Invoice string `json:"invoice"`
Metadata Metadata `json:"metadata"`
}
type InvoiceItemListResponse struct {
Object string `json:"object"`
Url string `json:"url"`
Count int `json:"count"`
Data []*InvoiceItem `json:"data"`
}
type InvoiceItemClient struct{}
// Create creates an invoice item.
//
// For more information: https://stripe.com/docs/api#create_invoice_item
func (c *InvoiceItemClient) Create(params *InvoiceItemParams) (*InvoiceItem, error) {
item := InvoiceItem{}
values := url.Values{}
parseInvoiceItemParams(params, &values)
err := post("/invoiceitems", values, &item)
return &item, err
}
// Retrieve loads an invoice item.
//
// For more information: https://stripe.com/docs/api#retrieve_invoice_item
func (c *InvoiceItemClient) Retrieve(id string) (*InvoiceItem, error) {
item := InvoiceItem{}
err := get("/invoiceitems/"+id, nil, &item)
return &item, err
}
// Update updates an invoice item.
//
// For more information: https://stripe.com/docs/api#update_invoice_item
func (c *InvoiceItemClient) Update(id string, params *InvoiceItemParams) (*InvoiceItem, error) {
item := InvoiceItem{}
values := url.Values{}
parseInvoiceItemParams(params, &values)
err := post("/invoiceitems/"+id, values, &item)
return &item, err
}
// Delete deletes an invoice item.
//
// For more information: https://stripe.com/docs/api#delete_invoice_item
func (c *InvoiceItemClient) Delete(id string) (*DeleteResponse, error) {
response := DeleteResponse{}
err := delete("/invoiceitems/"+id, nil, &response)
return &response, err
}
// All lists the first 10 invoice items. It calls AllWithFilters with a blank
// Filters so all defaults are used.
//
// For more information: https://stripe.com/docs/api#list_invoice_items
func (c *InvoiceItemClient) All() (*InvoiceItemListResponse, error) {
return c.AllWithFilters(Filters{})
}
// AllWithFilters takes a Filters and applies all valid filters for the action.
//
// For more information: https://stripe.com/docs/api#list_invoice_items
func (c *InvoiceItemClient) AllWithFilters(filters Filters) (*InvoiceItemListResponse, error) {
response := InvoiceItemListResponse{}
values := url.Values{}
addFiltersToValues([]string{"count", "offset"}, filters, &values)
err := get("/invoiceitems", values, &response)
return &response, err
}
// parseInvoiceItemParams takes a pointer to a InvoiceItemParams and a pointer
// to a url.Values. It iterates over everything in the InvoiceItemParams struct
// and Adds what is there to the url.Values.
func parseInvoiceItemParams(params *InvoiceItemParams, values *url.Values) {
// Use parseMetaData from metadata.go to setup the metadata param
if params.Metadata != nil {
parseMetadata(params.Metadata, values)
}
addParamsToValues(params, values)
}
|
package sample
import (
"fmt"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
)
// BucketRefererSample shows how to set, get and delete the bucket referer.
func BucketRefererSample() {
// New client
client, err := oss.New(endpoint, accessID, accessKey)
if err != nil {
HandleError(err)
}
// Create the bucket with default parameters
err = client.CreateBucket(bucketName)
if err != nil {
HandleError(err)
}
var referers = []string{
"http://www.aliyun.com",
"http://www.???.aliyuncs.com",
"http://www.*.com",
}
// Case 1: Set referers. The referers are with wildcards ? and * which could represent one and zero to multiple characters
err = client.SetBucketReferer(bucketName, referers, false)
if err != nil {
HandleError(err)
}
// Case 2: Clear referers
referers = []string{}
err = client.SetBucketReferer(bucketName, referers, true)
if err != nil {
HandleError(err)
}
// Get bucket referer configuration
gbr, err := client.GetBucketReferqer(bucketName)
if err != nil {
HandleError(err)
}
fmt.Println("Bucket Referers:", gbr.RefererList,
"AllowEmptyReferer:", gbr.AllowEmptyReferer)
// Delete bucket
err = client.DeleteBucket(bucketName)
if err != nil {
HandleError(err)
}
fmt.Println("BucketRefererSample completed")
}
use the correct method name "GetBucketReferer" in the sample
package sample
import (
"fmt"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
)
// BucketRefererSample shows how to set, get and delete the bucket referer.
func BucketRefererSample() {
// New client
client, err := oss.New(endpoint, accessID, accessKey)
if err != nil {
HandleError(err)
}
// Create the bucket with default parameters
err = client.CreateBucket(bucketName)
if err != nil {
HandleError(err)
}
var referers = []string{
"http://www.aliyun.com",
"http://www.???.aliyuncs.com",
"http://www.*.com",
}
// Case 1: Set referers. The referers are with wildcards ? and * which could represent one and zero to multiple characters
err = client.SetBucketReferer(bucketName, referers, false)
if err != nil {
HandleError(err)
}
// Case 2: Clear referers
referers = []string{}
err = client.SetBucketReferer(bucketName, referers, true)
if err != nil {
HandleError(err)
}
// Get bucket referer configuration
gbr, err := client.GetBucketReferer(bucketName)
if err != nil {
HandleError(err)
}
fmt.Println("Bucket Referers:", gbr.RefererList,
"AllowEmptyReferer:", gbr.AllowEmptyReferer)
// Delete bucket
err = client.DeleteBucket(bucketName)
if err != nil {
HandleError(err)
}
fmt.Println("BucketRefererSample completed")
}
|
package libcarina
import (
"archive/zip"
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"path"
"strings"
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/rackspace"
)
// BetaEndpoint reflects the default endpoint for this library
const BetaEndpoint = "https://app.getcarina.com"
const mimetypeJSON = "application/json"
const authHeaderKey = "X-Auth-Token"
const userAgent = "getcarina/libcarina"
// ZipURLResponse is the response that comes back from the zip endpoint
type ZipURLResponse struct {
URL string `json:"zip_url"`
}
// ClusterClient accesses Carina directly
type ClusterClient struct {
Client *http.Client
Username string
Token string
Endpoint string
}
// ErrorResponse is the JSON formatted error response from Carina
type ErrorResponse struct {
Error string `json:"error"`
}
// Cluster is a cluster of Docker nodes
type Cluster struct {
// ID of the cluster
ID string `json:"id"`
// Name of the cluster
Name string `json:"name"`
// COE (container orchestration engine) used by the cluster
COE string `json:"coe"`
// Nodes in the cluster
Nodes int `json:"node_count,omitempty"`
// Status of the cluster
Status string `json:"status,omitempty"`
}
// Credentials holds the keys to the kingdom
type Credentials struct {
README []byte
Cert []byte
Key []byte
CA []byte
CAKey []byte
DockerEnv []byte
DockerCmd []byte
DockerPS1 []byte
DockerHost string
Files map[string][]byte
DockerFish []byte
}
type Quotas struct {
MaxClusters int `json:"max_clusters"`
MaxNodesPerCluster int `json:"max_nodes_per_cluster"`
}
func newClusterClient(endpoint string, ao gophercloud.AuthOptions) (*ClusterClient, error) {
provider, err := rackspace.AuthenticatedClient(ao)
if err != nil {
return nil, err
}
return &ClusterClient{
Client: &http.Client{},
Username: ao.Username,
Token: provider.TokenID,
Endpoint: endpoint,
}, nil
}
// NewClusterClient create a new clusterclient by API Key
func NewClusterClient(endpoint, username, apikey string) (*ClusterClient, error) {
ao := gophercloud.AuthOptions{
Username: username,
APIKey: apikey,
IdentityEndpoint: rackspace.RackspaceUSIdentity,
}
return newClusterClient(endpoint, ao)
}
// NewRequest handles a request using auth used by Carina
func (c *ClusterClient) NewRequest(method string, uri string, body io.Reader) (*http.Response, error) {
req, err := http.NewRequest(method, c.Endpoint+uri, body)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", userAgent)
req.Header.Add("Content-Type", mimetypeJSON)
req.Header.Add("Accept", mimetypeJSON)
req.Header.Add(authHeaderKey, c.Token)
resp, err := c.Client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode >= 400 {
if resp.Body == nil {
return nil, errors.New(resp.Status)
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, errors.New(resp.Status)
}
return nil, errors.New(string(b))
}
return resp, nil
}
// List the current clusters
func (c *ClusterClient) List() ([]Cluster, error) {
resp, err := c.NewRequest("GET", "/bays", nil)
if err != nil {
return nil, err
}
var result struct {
Clusters []Cluster `json:"bays"`
}
err = json.NewDecoder(resp.Body).Decode(&result)
if err != nil {
return nil, err
}
return result.Clusters, nil
}
func clusterFromResponse(resp *http.Response, err error) (*Cluster, error) {
if err != nil {
return nil, err
}
cluster := new(Cluster)
err = json.NewDecoder(resp.Body).Decode(&cluster)
if err != nil {
return nil, err
}
return cluster, nil
}
// Get a cluster by cluster name
func (c *ClusterClient) Get(clusterID string) (*Cluster, error) {
uri := path.Join("/bays", clusterID)
resp, err := c.NewRequest("GET", uri, nil)
return clusterFromResponse(resp, err)
}
// Create a new cluster with cluster options
func (c *ClusterClient) Create(clusterOpts Cluster) (*Cluster, error) {
clusterOptsJSON, err := json.Marshal(clusterOpts)
if err != nil {
return nil, err
}
body := bytes.NewReader(clusterOptsJSON)
uri := path.Join("/clusters", c.Username)
resp, err := c.NewRequest("POST", uri, body)
return clusterFromResponse(resp, err)
}
// GetZipURL returns the URL for downloading credentials
func (c *ClusterClient) GetZipURL(clusterName string) (string, error) {
uri := path.Join("/clusters", c.Username, clusterName, "zip")
resp, err := c.NewRequest("GET", uri, nil)
if err != nil {
return "", err
}
var zipURLResp ZipURLResponse
err = json.NewDecoder(resp.Body).Decode(&zipURLResp)
if err != nil {
return "", err
}
return zipURLResp.URL, nil
}
// GetCredentials returns a Credentials struct for the given cluster name
func (c *ClusterClient) GetCredentials(clusterName string) (*Credentials, error) {
url, err := c.GetZipURL(clusterName)
if err != nil {
return nil, err
}
zr, err := fetchZip(c.Client, url)
if err != nil || len(zr.File) < 6 {
return nil, err
}
// fetch the contents for each credential/note
creds := new(Credentials)
creds.Files = make(map[string][]byte)
for _, zf := range zr.File {
_, fname := path.Split(zf.Name)
fi := zf.FileInfo()
if fi.IsDir() {
// Explicitly skip past directories (the UUID directory from a previous release)
continue
}
rc, err := zf.Open()
if err != nil {
return nil, err
}
b, err := ioutil.ReadAll(rc)
if err != nil {
return nil, err
}
creds.Files[fname] = b
switch fname {
case "ca.pem":
creds.CA = b
case "README.md":
creds.README = b
case "ca-key.pem":
creds.CAKey = b
case "docker.env":
creds.DockerEnv = b
case "cert.pem":
creds.Cert = b
case "key.pem":
creds.Key = b
case "docker.ps1":
creds.DockerPS1 = b
case "docker.cmd":
creds.DockerCmd = b
case "docker.fish":
creds.DockerFish = b
}
}
sourceLines := strings.Split(string(creds.DockerEnv), "\n")
for _, line := range sourceLines {
if strings.Index(line, "export ") == 0 {
varDecl := strings.TrimRight(line[7:], "\n")
eqLocation := strings.Index(varDecl, "=")
varName := varDecl[:eqLocation]
varValue := varDecl[eqLocation+1:]
switch varName {
case "DOCKER_HOST":
creds.DockerHost = varValue
}
}
}
return creds, nil
}
// GetDockerConfig returns the hostname and tls.Config for a given clustername
func (c *ClusterClient) GetDockerConfig(clusterName string) (hostname string, tlsConfig *tls.Config, err error) {
creds, err := c.GetCredentials(clusterName)
if err != nil {
return "", nil, err
}
tlsConfig, err = creds.GetTLSConfig()
return creds.DockerHost, tlsConfig, err
}
// GetTLSConfig returns a tls.Config for a credential set
func (creds *Credentials) GetTLSConfig() (*tls.Config, error) {
// TLS config
var tlsConfig tls.Config
tlsConfig.InsecureSkipVerify = true
certPool := x509.NewCertPool()
certPool.AppendCertsFromPEM(creds.CA)
tlsConfig.RootCAs = certPool
keypair, err := tls.X509KeyPair(creds.Cert, creds.Key)
if err != nil {
return &tlsConfig, err
}
tlsConfig.Certificates = []tls.Certificate{keypair}
return &tlsConfig, nil
}
func fetchZip(client *http.Client, zipurl string) (*zip.Reader, error) {
req, err := http.NewRequest("GET", zipurl, nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", userAgent)
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, errors.New(resp.Status)
}
return nil, errors.New(string(b))
}
buf := &bytes.Buffer{}
_, err = io.Copy(buf, resp.Body)
if err != nil {
return nil, err
}
b := bytes.NewReader(buf.Bytes())
return zip.NewReader(b, int64(b.Len()))
}
// Grow increases a cluster by the provided number of nodes
func (c *ClusterClient) Grow(clusterName string, nodes int) (*Cluster, error) {
incr := map[string]int{
"nodes": nodes,
}
growthRequest, err := json.Marshal(incr)
if err != nil {
return nil, err
}
r := bytes.NewReader(growthRequest)
uri := path.Join("/clusters", c.Username, clusterName, "grow")
resp, err := c.NewRequest("POST", uri, r)
return clusterFromResponse(resp, err)
}
// SetAutoScale enables or disables autoscale on an already running cluster
func (c *ClusterClient) SetAutoScale(clusterName string, autoscale bool) (*Cluster, error) {
setAutoscale := "false"
if autoscale {
setAutoscale = "true"
}
uri := path.Join("/clusters", c.Username, clusterName, "autoscale", setAutoscale)
resp, err := c.NewRequest("PUT", uri, nil)
return clusterFromResponse(resp, err)
}
const rebuildSwarmAction = "rebuild-swarm"
type actionRequest struct {
Action string `json:"action"`
}
func (c *ClusterClient) doAction(clusterName, action string) (*Cluster, error) {
actionReq, err := json.Marshal(actionRequest{Action: action})
if err != nil {
return nil, err
}
r := bytes.NewReader(actionReq)
uri := path.Join("/clusters", c.Username, clusterName, "action")
resp, err := c.NewRequest("POST", uri, r)
return clusterFromResponse(resp, err)
}
// Rebuild creates a wholly new Swarm cluster
func (c *ClusterClient) Rebuild(clusterName string) (*Cluster, error) {
return c.doAction(clusterName, rebuildSwarmAction)
}
// Delete nukes a cluster out of existence
func (c *ClusterClient) Delete(clusterName string) (*Cluster, error) {
uri := path.Join("/clusters", c.Username, clusterName)
resp, err := c.NewRequest("DELETE", uri, nil)
return clusterFromResponse(resp, err)
}
func quotasFromResponse(resp *http.Response) (*Quotas, error) {
quotas := new(Quotas)
err := json.NewDecoder(resp.Body).Decode("as)
if err != nil {
return nil, err
}
return quotas, nil
}
func (c *ClusterClient) GetQuotas() (*Quotas, error) {
uri := path.Join("/quotas", c.Username)
resp, err := c.NewRequest("GET", uri, nil)
if err != nil {
return nil, err
}
return quotasFromResponse(resp)
}
Add HostType to Cluster
package libcarina
import (
"archive/zip"
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"path"
"strings"
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/rackspace"
)
// BetaEndpoint reflects the default endpoint for this library
const BetaEndpoint = "https://app.getcarina.com"
const mimetypeJSON = "application/json"
const authHeaderKey = "X-Auth-Token"
const userAgent = "getcarina/libcarina"
// ZipURLResponse is the response that comes back from the zip endpoint
type ZipURLResponse struct {
URL string `json:"zip_url"`
}
// ClusterClient accesses Carina directly
type ClusterClient struct {
Client *http.Client
Username string
Token string
Endpoint string
}
// ErrorResponse is the JSON formatted error response from Carina
type ErrorResponse struct {
Error string `json:"error"`
}
// Cluster is a cluster of Docker nodes
type Cluster struct {
// ID of the cluster
ID string `json:"id"`
// Name of the cluster
Name string `json:"name"`
// COE (container orchestration engine) used by the cluster
COE string `json:"coe"`
// Underlying type of the host nodes, such as lxc or vm
HostType string `json:"host_type"`
// Nodes in the cluster
Nodes int `json:"node_count,omitempty"`
// Status of the cluster
Status string `json:"status,omitempty"`
}
// Credentials holds the keys to the kingdom
type Credentials struct {
README []byte
Cert []byte
Key []byte
CA []byte
CAKey []byte
DockerEnv []byte
DockerCmd []byte
DockerPS1 []byte
DockerHost string
Files map[string][]byte
DockerFish []byte
}
type Quotas struct {
MaxClusters int `json:"max_clusters"`
MaxNodesPerCluster int `json:"max_nodes_per_cluster"`
}
func newClusterClient(endpoint string, ao gophercloud.AuthOptions) (*ClusterClient, error) {
provider, err := rackspace.AuthenticatedClient(ao)
if err != nil {
return nil, err
}
return &ClusterClient{
Client: &http.Client{},
Username: ao.Username,
Token: provider.TokenID,
Endpoint: endpoint,
}, nil
}
// NewClusterClient create a new clusterclient by API Key
func NewClusterClient(endpoint, username, apikey string) (*ClusterClient, error) {
ao := gophercloud.AuthOptions{
Username: username,
APIKey: apikey,
IdentityEndpoint: rackspace.RackspaceUSIdentity,
}
return newClusterClient(endpoint, ao)
}
// NewRequest handles a request using auth used by Carina
func (c *ClusterClient) NewRequest(method string, uri string, body io.Reader) (*http.Response, error) {
req, err := http.NewRequest(method, c.Endpoint+uri, body)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", userAgent)
req.Header.Add("Content-Type", mimetypeJSON)
req.Header.Add("Accept", mimetypeJSON)
req.Header.Add(authHeaderKey, c.Token)
resp, err := c.Client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode >= 400 {
if resp.Body == nil {
return nil, errors.New(resp.Status)
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, errors.New(resp.Status)
}
return nil, errors.New(string(b))
}
return resp, nil
}
// List the current clusters
func (c *ClusterClient) List() ([]Cluster, error) {
resp, err := c.NewRequest("GET", "/bays", nil)
if err != nil {
return nil, err
}
var result struct {
Clusters []Cluster `json:"bays"`
}
err = json.NewDecoder(resp.Body).Decode(&result)
if err != nil {
return nil, err
}
return result.Clusters, nil
}
func clusterFromResponse(resp *http.Response, err error) (*Cluster, error) {
if err != nil {
return nil, err
}
cluster := new(Cluster)
err = json.NewDecoder(resp.Body).Decode(&cluster)
if err != nil {
return nil, err
}
return cluster, nil
}
// Get a cluster by cluster name
func (c *ClusterClient) Get(clusterID string) (*Cluster, error) {
uri := path.Join("/bays", clusterID)
resp, err := c.NewRequest("GET", uri, nil)
return clusterFromResponse(resp, err)
}
// Create a new cluster with cluster options
func (c *ClusterClient) Create(clusterOpts Cluster) (*Cluster, error) {
clusterOptsJSON, err := json.Marshal(clusterOpts)
if err != nil {
return nil, err
}
body := bytes.NewReader(clusterOptsJSON)
uri := path.Join("/clusters", c.Username)
resp, err := c.NewRequest("POST", uri, body)
return clusterFromResponse(resp, err)
}
// GetZipURL returns the URL for downloading credentials
func (c *ClusterClient) GetZipURL(clusterName string) (string, error) {
uri := path.Join("/clusters", c.Username, clusterName, "zip")
resp, err := c.NewRequest("GET", uri, nil)
if err != nil {
return "", err
}
var zipURLResp ZipURLResponse
err = json.NewDecoder(resp.Body).Decode(&zipURLResp)
if err != nil {
return "", err
}
return zipURLResp.URL, nil
}
// GetCredentials returns a Credentials struct for the given cluster name
func (c *ClusterClient) GetCredentials(clusterName string) (*Credentials, error) {
url, err := c.GetZipURL(clusterName)
if err != nil {
return nil, err
}
zr, err := fetchZip(c.Client, url)
if err != nil || len(zr.File) < 6 {
return nil, err
}
// fetch the contents for each credential/note
creds := new(Credentials)
creds.Files = make(map[string][]byte)
for _, zf := range zr.File {
_, fname := path.Split(zf.Name)
fi := zf.FileInfo()
if fi.IsDir() {
// Explicitly skip past directories (the UUID directory from a previous release)
continue
}
rc, err := zf.Open()
if err != nil {
return nil, err
}
b, err := ioutil.ReadAll(rc)
if err != nil {
return nil, err
}
creds.Files[fname] = b
switch fname {
case "ca.pem":
creds.CA = b
case "README.md":
creds.README = b
case "ca-key.pem":
creds.CAKey = b
case "docker.env":
creds.DockerEnv = b
case "cert.pem":
creds.Cert = b
case "key.pem":
creds.Key = b
case "docker.ps1":
creds.DockerPS1 = b
case "docker.cmd":
creds.DockerCmd = b
case "docker.fish":
creds.DockerFish = b
}
}
sourceLines := strings.Split(string(creds.DockerEnv), "\n")
for _, line := range sourceLines {
if strings.Index(line, "export ") == 0 {
varDecl := strings.TrimRight(line[7:], "\n")
eqLocation := strings.Index(varDecl, "=")
varName := varDecl[:eqLocation]
varValue := varDecl[eqLocation+1:]
switch varName {
case "DOCKER_HOST":
creds.DockerHost = varValue
}
}
}
return creds, nil
}
// GetDockerConfig returns the hostname and tls.Config for a given clustername
func (c *ClusterClient) GetDockerConfig(clusterName string) (hostname string, tlsConfig *tls.Config, err error) {
creds, err := c.GetCredentials(clusterName)
if err != nil {
return "", nil, err
}
tlsConfig, err = creds.GetTLSConfig()
return creds.DockerHost, tlsConfig, err
}
// GetTLSConfig returns a tls.Config for a credential set
func (creds *Credentials) GetTLSConfig() (*tls.Config, error) {
// TLS config
var tlsConfig tls.Config
tlsConfig.InsecureSkipVerify = true
certPool := x509.NewCertPool()
certPool.AppendCertsFromPEM(creds.CA)
tlsConfig.RootCAs = certPool
keypair, err := tls.X509KeyPair(creds.Cert, creds.Key)
if err != nil {
return &tlsConfig, err
}
tlsConfig.Certificates = []tls.Certificate{keypair}
return &tlsConfig, nil
}
func fetchZip(client *http.Client, zipurl string) (*zip.Reader, error) {
req, err := http.NewRequest("GET", zipurl, nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", userAgent)
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, errors.New(resp.Status)
}
return nil, errors.New(string(b))
}
buf := &bytes.Buffer{}
_, err = io.Copy(buf, resp.Body)
if err != nil {
return nil, err
}
b := bytes.NewReader(buf.Bytes())
return zip.NewReader(b, int64(b.Len()))
}
// Grow increases a cluster by the provided number of nodes
func (c *ClusterClient) Grow(clusterName string, nodes int) (*Cluster, error) {
incr := map[string]int{
"nodes": nodes,
}
growthRequest, err := json.Marshal(incr)
if err != nil {
return nil, err
}
r := bytes.NewReader(growthRequest)
uri := path.Join("/clusters", c.Username, clusterName, "grow")
resp, err := c.NewRequest("POST", uri, r)
return clusterFromResponse(resp, err)
}
// SetAutoScale enables or disables autoscale on an already running cluster
func (c *ClusterClient) SetAutoScale(clusterName string, autoscale bool) (*Cluster, error) {
setAutoscale := "false"
if autoscale {
setAutoscale = "true"
}
uri := path.Join("/clusters", c.Username, clusterName, "autoscale", setAutoscale)
resp, err := c.NewRequest("PUT", uri, nil)
return clusterFromResponse(resp, err)
}
const rebuildSwarmAction = "rebuild-swarm"
type actionRequest struct {
Action string `json:"action"`
}
func (c *ClusterClient) doAction(clusterName, action string) (*Cluster, error) {
actionReq, err := json.Marshal(actionRequest{Action: action})
if err != nil {
return nil, err
}
r := bytes.NewReader(actionReq)
uri := path.Join("/clusters", c.Username, clusterName, "action")
resp, err := c.NewRequest("POST", uri, r)
return clusterFromResponse(resp, err)
}
// Rebuild creates a wholly new Swarm cluster
func (c *ClusterClient) Rebuild(clusterName string) (*Cluster, error) {
return c.doAction(clusterName, rebuildSwarmAction)
}
// Delete nukes a cluster out of existence
func (c *ClusterClient) Delete(clusterName string) (*Cluster, error) {
uri := path.Join("/clusters", c.Username, clusterName)
resp, err := c.NewRequest("DELETE", uri, nil)
return clusterFromResponse(resp, err)
}
func quotasFromResponse(resp *http.Response) (*Quotas, error) {
quotas := new(Quotas)
err := json.NewDecoder(resp.Body).Decode("as)
if err != nil {
return nil, err
}
return quotas, nil
}
func (c *ClusterClient) GetQuotas() (*Quotas, error) {
uri := path.Join("/quotas", c.Username)
resp, err := c.NewRequest("GET", uri, nil)
if err != nil {
return nil, err
}
return quotasFromResponse(resp)
}
|
//
// Copyright (c) 2017 Cavium
//
// SPDX-License-Identifier: Apache-2.0
//
package distro
import (
"io"
"net/http"
"github.com/go-zoo/bone"
)
func replyPing(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/text; charset=utf-8")
w.WriteHeader(http.StatusOK)
str := `pong`
io.WriteString(w, str)
}
func replyNotifyRegistrations(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/text; charset=utf-8")
w.WriteHeader(http.StatusOK)
io.WriteString(w, "")
RefreshRegistrations()
}
// HTTPServer function
func HTTPServer() http.Handler {
mux := bone.New()
mux.Get("/api/v1/ping", http.HandlerFunc(replyPing))
mux.Get("/api/v1/notify/registrations", http.HandlerFunc(replyNotifyRegistrations))
return mux
}
Update notification update api to PUT
Signed-off-by: Federico Claramonte <9aaaa8bfe6a7a51765b462c528e1446dcf049286@caviumnetworks.com>
//
// Copyright (c) 2017 Cavium
//
// SPDX-License-Identifier: Apache-2.0
//
package distro
import (
"io"
"net/http"
"github.com/go-zoo/bone"
)
func replyPing(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/text; charset=utf-8")
w.WriteHeader(http.StatusOK)
str := `pong`
io.WriteString(w, str)
}
func replyNotifyRegistrations(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/text; charset=utf-8")
w.WriteHeader(http.StatusOK)
io.WriteString(w, "")
RefreshRegistrations()
}
// HTTPServer function
func HTTPServer() http.Handler {
mux := bone.New()
mux.Get("/api/v1/ping", http.HandlerFunc(replyPing))
mux.Put("/api/v1/notify/registrations", http.HandlerFunc(replyNotifyRegistrations))
return mux
}
|
package distro
import (
zmq "github.com/pebbe/zmq4"
"go.uber.org/zap"
)
func initZmq() {
q, _ := zmq.NewSocket(zmq.SUB)
defer q.Close()
logger.Info("Connecting to zmq...")
q.Connect("tcp://localhost:32768")
logger.Info("Connected to zmq")
q.SetSubscribe("")
for {
msg, err := q.RecvMessage(0)
logger.Info("Received zmq msg")
if err != nil {
id, _ := q.GetIdentity()
logger.Error("Error getting mesage", zap.String("id", id))
} else {
for _, str := range msg {
// Why the offset of 7?? zmq v3 vs v4 ?
event := parseEvent(str[7:])
logger.Debug("Event received", zap.Any("event", event))
}
}
}
}
Remove log
Signed-off-by: Federico Claramonte <9aaaa8bfe6a7a51765b462c528e1446dcf049286@caviumnetworks.com>
package distro
import (
zmq "github.com/pebbe/zmq4"
"go.uber.org/zap"
)
func initZmq() {
q, _ := zmq.NewSocket(zmq.SUB)
defer q.Close()
logger.Info("Connecting to zmq...")
q.Connect("tcp://localhost:32768")
logger.Info("Connected to zmq")
q.SetSubscribe("")
for {
msg, err := q.RecvMessage(0)
if err != nil {
id, _ := q.GetIdentity()
logger.Error("Error getting mesage", zap.String("id", id))
} else {
for _, str := range msg {
// Why the offset of 7?? zmq v3 vs v4 ?
event := parseEvent(str[7:])
logger.Debug("Event received", zap.Any("event", event))
}
}
}
}
|
package main
import (
"fmt"
"io"
"net"
"strconv"
)
func startServer(listenPort int, backends *Backends) {
port := strconv.Itoa(listenPort)
fmt.Println("Starting server on port ", port)
addr, _ := net.ResolveTCPAddr("tcp", ":"+port)
listener, err := net.ListenTCP("tcp", addr)
if err != nil {
fmt.Println("Could not listen on port because:", err.Error())
return
}
for {
con, err := listener.Accept()
if err != nil {
fmt.Println("Error occured accepting a connection", err.Error())
}
go handleConnection(con, backends.NextAddress())
}
}
func handleConnection(cli_conn net.Conn, srv_addr string) {
srv_conn, err := net.Dial("tcp", srv_addr)
if err != nil {
fmt.Printf("Could not connect to server (%s), connection dropping\n", srv_addr)
return
}
go io.Copy(cli_conn, srv_conn)
io.Copy(srv_conn, cli_conn)
}
close the connections after copying data
package main
import (
"fmt"
"io"
"net"
"strconv"
)
func startServer(listenPort int, backends *Backends) {
port := strconv.Itoa(listenPort)
fmt.Println("Starting server on port ", port)
addr, _ := net.ResolveTCPAddr("tcp", ":"+port)
listener, err := net.ListenTCP("tcp", addr)
if err != nil {
fmt.Println("Could not listen on port because:", err.Error())
return
}
for {
con, err := listener.Accept()
if err != nil {
fmt.Println("Error occured accepting a connection", err.Error())
}
go handleConnection(con, backends.NextAddress())
}
}
func handleConnection(cli_conn net.Conn, srv_addr string) {
srv_conn, err := net.Dial("tcp", srv_addr)
if err != nil {
fmt.Printf("Could not connect to server (%s), connection dropping\n", srv_addr)
return
}
go io.Copy(cli_conn, srv_conn)
io.Copy(srv_conn, cli_conn)
srv_conn.Close()
cli_conn.Close()
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"fmt"
"reflect"
"sync"
"time"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
)
var (
FakeVersion = "0.1.0"
FakeRuntimeName = "fakeRuntime"
FakePodSandboxIP = "192.168.192.168"
)
type FakePodSandbox struct {
// PodSandboxStatus contains the runtime information for a sandbox.
runtimeapi.PodSandboxStatus
// RuntimeHandler is the runtime handler that was issued with the RunPodSandbox request.
RuntimeHandler string
}
type FakeContainer struct {
// ContainerStatus contains the runtime information for a container.
runtimeapi.ContainerStatus
// the sandbox id of this container
SandboxID string
}
type FakeRuntimeService struct {
sync.Mutex
Called []string
Errors map[string][]error
FakeStatus *runtimeapi.RuntimeStatus
Containers map[string]*FakeContainer
Sandboxes map[string]*FakePodSandbox
FakeContainerStats map[string]*runtimeapi.ContainerStats
}
func (r *FakeRuntimeService) GetContainerID(sandboxID, name string, attempt uint32) (string, error) {
r.Lock()
defer r.Unlock()
for id, c := range r.Containers {
if c.SandboxID == sandboxID && c.Metadata.Name == name && c.Metadata.Attempt == attempt {
return id, nil
}
}
return "", fmt.Errorf("container (name, attempt, sandboxID)=(%q, %d, %q) not found", name, attempt, sandboxID)
}
func (r *FakeRuntimeService) SetFakeSandboxes(sandboxes []*FakePodSandbox) {
r.Lock()
defer r.Unlock()
r.Sandboxes = make(map[string]*FakePodSandbox)
for _, sandbox := range sandboxes {
sandboxID := sandbox.Id
r.Sandboxes[sandboxID] = sandbox
}
}
func (r *FakeRuntimeService) SetFakeContainers(containers []*FakeContainer) {
r.Lock()
defer r.Unlock()
r.Containers = make(map[string]*FakeContainer)
for _, c := range containers {
containerID := c.Id
r.Containers[containerID] = c
}
}
func (r *FakeRuntimeService) AssertCalls(calls []string) error {
r.Lock()
defer r.Unlock()
if !reflect.DeepEqual(calls, r.Called) {
return fmt.Errorf("expected %#v, got %#v", calls, r.Called)
}
return nil
}
func (r *FakeRuntimeService) GetCalls() []string {
r.Lock()
defer r.Unlock()
return append([]string{}, r.Called...)
}
func (r *FakeRuntimeService) InjectError(f string, err error) {
r.Lock()
defer r.Unlock()
r.Errors[f] = append(r.Errors[f], err)
}
// caller of popError must grab a lock.
func (r *FakeRuntimeService) popError(f string) error {
if r.Errors == nil {
return nil
}
errs := r.Errors[f]
if len(errs) == 0 {
return nil
}
err, errs := errs[0], errs[1:]
return err
}
func NewFakeRuntimeService() *FakeRuntimeService {
return &FakeRuntimeService{
Called: make([]string, 0),
Errors: make(map[string][]error),
Containers: make(map[string]*FakeContainer),
Sandboxes: make(map[string]*FakePodSandbox),
FakeContainerStats: make(map[string]*runtimeapi.ContainerStats),
}
}
func (r *FakeRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "Version")
return &runtimeapi.VersionResponse{
Version: FakeVersion,
RuntimeName: FakeRuntimeName,
RuntimeVersion: FakeVersion,
RuntimeApiVersion: FakeVersion,
}, nil
}
func (r *FakeRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "Status")
return r.FakeStatus, nil
}
func (r *FakeRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "RunPodSandbox")
// PodSandboxID should be randomized for real container runtime, but here just use
// fixed name from BuildSandboxName() for easily making fake sandboxes.
podSandboxID := BuildSandboxName(config.Metadata)
createdAt := time.Now().UnixNano()
r.Sandboxes[podSandboxID] = &FakePodSandbox{
PodSandboxStatus: runtimeapi.PodSandboxStatus{
Id: podSandboxID,
Metadata: config.Metadata,
State: runtimeapi.PodSandboxState_SANDBOX_READY,
CreatedAt: createdAt,
Network: &runtimeapi.PodSandboxNetworkStatus{
Ip: FakePodSandboxIP,
},
Labels: config.Labels,
Annotations: config.Annotations,
},
RuntimeHandler: runtimeHandler,
}
return podSandboxID, nil
}
func (r *FakeRuntimeService) StopPodSandbox(podSandboxID string) error {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "StopPodSandbox")
if s, ok := r.Sandboxes[podSandboxID]; ok {
s.State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
} else {
return fmt.Errorf("pod sandbox %s not found", podSandboxID)
}
return nil
}
func (r *FakeRuntimeService) RemovePodSandbox(podSandboxID string) error {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "RemovePodSandbox")
// Remove the pod sandbox
delete(r.Sandboxes, podSandboxID)
return nil
}
func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "PodSandboxStatus")
s, ok := r.Sandboxes[podSandboxID]
if !ok {
return nil, fmt.Errorf("pod sandbox %q not found", podSandboxID)
}
status := s.PodSandboxStatus
return &status, nil
}
func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ListPodSandbox")
result := make([]*runtimeapi.PodSandbox, 0)
for id, s := range r.Sandboxes {
if filter != nil {
if filter.Id != "" && filter.Id != id {
continue
}
if filter.State != nil && filter.GetState().State != s.State {
continue
}
if filter.LabelSelector != nil && !filterInLabels(filter.LabelSelector, s.GetLabels()) {
continue
}
}
result = append(result, &runtimeapi.PodSandbox{
Id: s.Id,
Metadata: s.Metadata,
State: s.State,
CreatedAt: s.CreatedAt,
Labels: s.Labels,
Annotations: s.Annotations,
})
}
return result, nil
}
func (r *FakeRuntimeService) PortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "PortForward")
return &runtimeapi.PortForwardResponse{}, nil
}
func (r *FakeRuntimeService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "CreateContainer")
// ContainerID should be randomized for real container runtime, but here just use
// fixed BuildContainerName() for easily making fake containers.
containerID := BuildContainerName(config.Metadata, podSandboxID)
createdAt := time.Now().UnixNano()
createdState := runtimeapi.ContainerState_CONTAINER_CREATED
imageRef := config.Image.Image
r.Containers[containerID] = &FakeContainer{
ContainerStatus: runtimeapi.ContainerStatus{
Id: containerID,
Metadata: config.Metadata,
Image: config.Image,
ImageRef: imageRef,
CreatedAt: createdAt,
State: createdState,
Labels: config.Labels,
Annotations: config.Annotations,
},
SandboxID: podSandboxID,
}
return containerID, nil
}
func (r *FakeRuntimeService) StartContainer(containerID string) error {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "StartContainer")
c, ok := r.Containers[containerID]
if !ok {
return fmt.Errorf("container %s not found", containerID)
}
// Set container to running.
c.State = runtimeapi.ContainerState_CONTAINER_RUNNING
c.StartedAt = time.Now().UnixNano()
return nil
}
func (r *FakeRuntimeService) StopContainer(containerID string, timeout int64) error {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "StopContainer")
c, ok := r.Containers[containerID]
if !ok {
return fmt.Errorf("container %q not found", containerID)
}
// Set container to exited state.
finishedAt := time.Now().UnixNano()
exitedState := runtimeapi.ContainerState_CONTAINER_EXITED
c.State = exitedState
c.FinishedAt = finishedAt
return nil
}
func (r *FakeRuntimeService) RemoveContainer(containerID string) error {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "RemoveContainer")
// Remove the container
delete(r.Containers, containerID)
return nil
}
func (r *FakeRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ListContainers")
result := make([]*runtimeapi.Container, 0)
for _, s := range r.Containers {
if filter != nil {
if filter.Id != "" && filter.Id != s.Id {
continue
}
if filter.PodSandboxId != "" && filter.PodSandboxId != s.SandboxID {
continue
}
if filter.State != nil && filter.GetState().State != s.State {
continue
}
if filter.LabelSelector != nil && !filterInLabels(filter.LabelSelector, s.GetLabels()) {
continue
}
}
result = append(result, &runtimeapi.Container{
Id: s.Id,
CreatedAt: s.CreatedAt,
PodSandboxId: s.SandboxID,
Metadata: s.Metadata,
State: s.State,
Image: s.Image,
ImageRef: s.ImageRef,
Labels: s.Labels,
Annotations: s.Annotations,
})
}
return result, nil
}
func (r *FakeRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ContainerStatus")
c, ok := r.Containers[containerID]
if !ok {
return nil, fmt.Errorf("container %q not found", containerID)
}
status := c.ContainerStatus
return &status, nil
}
func (r *FakeRuntimeService) UpdateContainerResources(string, *runtimeapi.LinuxContainerResources) error {
return nil
}
func (r *FakeRuntimeService) ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ExecSync")
return nil, nil, nil
}
func (r *FakeRuntimeService) Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "Exec")
return &runtimeapi.ExecResponse{}, nil
}
func (r *FakeRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "Attach")
return &runtimeapi.AttachResponse{}, nil
}
func (r *FakeRuntimeService) UpdateRuntimeConfig(runtimeCOnfig *runtimeapi.RuntimeConfig) error {
return nil
}
func (r *FakeRuntimeService) SetFakeContainerStats(containerStats []*runtimeapi.ContainerStats) {
r.Lock()
defer r.Unlock()
r.FakeContainerStats = make(map[string]*runtimeapi.ContainerStats)
for _, s := range containerStats {
r.FakeContainerStats[s.Attributes.Id] = s
}
}
func (r *FakeRuntimeService) ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ContainerStats")
s, found := r.FakeContainerStats[containerID]
if !found {
return nil, fmt.Errorf("no stats for container %q", containerID)
}
return s, nil
}
func (r *FakeRuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ListContainerStats")
var result []*runtimeapi.ContainerStats
for _, c := range r.Containers {
if filter != nil {
if filter.Id != "" && filter.Id != c.Id {
continue
}
if filter.PodSandboxId != "" && filter.PodSandboxId != c.SandboxID {
continue
}
if filter.LabelSelector != nil && !filterInLabels(filter.LabelSelector, c.GetLabels()) {
continue
}
}
s, found := r.FakeContainerStats[c.Id]
if !found {
continue
}
result = append(result, s)
}
return result, nil
}
func (r *FakeRuntimeService) ReopenContainerLog(containerID string) error {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ReopenContainerLog")
if err := r.popError("ReopenContainerLog"); err != nil {
return err
}
return nil
}
Store runtimeHandler for the PodSandboxStatus in FakeRuntimeService
Include the RuntimeHandler in ListPodSandbox
Signed-off-by: Aldo Culquicondor <94e5f339759e47893768aa235c40153de60154af@google.com>
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"fmt"
"reflect"
"sync"
"time"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
)
var (
FakeVersion = "0.1.0"
FakeRuntimeName = "fakeRuntime"
FakePodSandboxIP = "192.168.192.168"
)
type FakePodSandbox struct {
// PodSandboxStatus contains the runtime information for a sandbox.
runtimeapi.PodSandboxStatus
// RuntimeHandler is the runtime handler that was issued with the RunPodSandbox request.
RuntimeHandler string
}
type FakeContainer struct {
// ContainerStatus contains the runtime information for a container.
runtimeapi.ContainerStatus
// the sandbox id of this container
SandboxID string
}
type FakeRuntimeService struct {
sync.Mutex
Called []string
Errors map[string][]error
FakeStatus *runtimeapi.RuntimeStatus
Containers map[string]*FakeContainer
Sandboxes map[string]*FakePodSandbox
FakeContainerStats map[string]*runtimeapi.ContainerStats
}
func (r *FakeRuntimeService) GetContainerID(sandboxID, name string, attempt uint32) (string, error) {
r.Lock()
defer r.Unlock()
for id, c := range r.Containers {
if c.SandboxID == sandboxID && c.Metadata.Name == name && c.Metadata.Attempt == attempt {
return id, nil
}
}
return "", fmt.Errorf("container (name, attempt, sandboxID)=(%q, %d, %q) not found", name, attempt, sandboxID)
}
func (r *FakeRuntimeService) SetFakeSandboxes(sandboxes []*FakePodSandbox) {
r.Lock()
defer r.Unlock()
r.Sandboxes = make(map[string]*FakePodSandbox)
for _, sandbox := range sandboxes {
sandboxID := sandbox.Id
r.Sandboxes[sandboxID] = sandbox
}
}
func (r *FakeRuntimeService) SetFakeContainers(containers []*FakeContainer) {
r.Lock()
defer r.Unlock()
r.Containers = make(map[string]*FakeContainer)
for _, c := range containers {
containerID := c.Id
r.Containers[containerID] = c
}
}
func (r *FakeRuntimeService) AssertCalls(calls []string) error {
r.Lock()
defer r.Unlock()
if !reflect.DeepEqual(calls, r.Called) {
return fmt.Errorf("expected %#v, got %#v", calls, r.Called)
}
return nil
}
func (r *FakeRuntimeService) GetCalls() []string {
r.Lock()
defer r.Unlock()
return append([]string{}, r.Called...)
}
func (r *FakeRuntimeService) InjectError(f string, err error) {
r.Lock()
defer r.Unlock()
r.Errors[f] = append(r.Errors[f], err)
}
// caller of popError must grab a lock.
func (r *FakeRuntimeService) popError(f string) error {
if r.Errors == nil {
return nil
}
errs := r.Errors[f]
if len(errs) == 0 {
return nil
}
err, errs := errs[0], errs[1:]
return err
}
func NewFakeRuntimeService() *FakeRuntimeService {
return &FakeRuntimeService{
Called: make([]string, 0),
Errors: make(map[string][]error),
Containers: make(map[string]*FakeContainer),
Sandboxes: make(map[string]*FakePodSandbox),
FakeContainerStats: make(map[string]*runtimeapi.ContainerStats),
}
}
func (r *FakeRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "Version")
return &runtimeapi.VersionResponse{
Version: FakeVersion,
RuntimeName: FakeRuntimeName,
RuntimeVersion: FakeVersion,
RuntimeApiVersion: FakeVersion,
}, nil
}
func (r *FakeRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "Status")
return r.FakeStatus, nil
}
func (r *FakeRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "RunPodSandbox")
// PodSandboxID should be randomized for real container runtime, but here just use
// fixed name from BuildSandboxName() for easily making fake sandboxes.
podSandboxID := BuildSandboxName(config.Metadata)
createdAt := time.Now().UnixNano()
r.Sandboxes[podSandboxID] = &FakePodSandbox{
PodSandboxStatus: runtimeapi.PodSandboxStatus{
Id: podSandboxID,
Metadata: config.Metadata,
State: runtimeapi.PodSandboxState_SANDBOX_READY,
CreatedAt: createdAt,
Network: &runtimeapi.PodSandboxNetworkStatus{
Ip: FakePodSandboxIP,
},
Labels: config.Labels,
Annotations: config.Annotations,
RuntimeHandler: runtimeHandler,
},
RuntimeHandler: runtimeHandler,
}
return podSandboxID, nil
}
func (r *FakeRuntimeService) StopPodSandbox(podSandboxID string) error {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "StopPodSandbox")
if s, ok := r.Sandboxes[podSandboxID]; ok {
s.State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
} else {
return fmt.Errorf("pod sandbox %s not found", podSandboxID)
}
return nil
}
func (r *FakeRuntimeService) RemovePodSandbox(podSandboxID string) error {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "RemovePodSandbox")
// Remove the pod sandbox
delete(r.Sandboxes, podSandboxID)
return nil
}
func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "PodSandboxStatus")
s, ok := r.Sandboxes[podSandboxID]
if !ok {
return nil, fmt.Errorf("pod sandbox %q not found", podSandboxID)
}
status := s.PodSandboxStatus
return &status, nil
}
func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ListPodSandbox")
result := make([]*runtimeapi.PodSandbox, 0)
for id, s := range r.Sandboxes {
if filter != nil {
if filter.Id != "" && filter.Id != id {
continue
}
if filter.State != nil && filter.GetState().State != s.State {
continue
}
if filter.LabelSelector != nil && !filterInLabels(filter.LabelSelector, s.GetLabels()) {
continue
}
}
result = append(result, &runtimeapi.PodSandbox{
Id: s.Id,
Metadata: s.Metadata,
State: s.State,
CreatedAt: s.CreatedAt,
Labels: s.Labels,
Annotations: s.Annotations,
RuntimeHandler: s.RuntimeHandler,
})
}
return result, nil
}
func (r *FakeRuntimeService) PortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "PortForward")
return &runtimeapi.PortForwardResponse{}, nil
}
func (r *FakeRuntimeService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "CreateContainer")
// ContainerID should be randomized for real container runtime, but here just use
// fixed BuildContainerName() for easily making fake containers.
containerID := BuildContainerName(config.Metadata, podSandboxID)
createdAt := time.Now().UnixNano()
createdState := runtimeapi.ContainerState_CONTAINER_CREATED
imageRef := config.Image.Image
r.Containers[containerID] = &FakeContainer{
ContainerStatus: runtimeapi.ContainerStatus{
Id: containerID,
Metadata: config.Metadata,
Image: config.Image,
ImageRef: imageRef,
CreatedAt: createdAt,
State: createdState,
Labels: config.Labels,
Annotations: config.Annotations,
},
SandboxID: podSandboxID,
}
return containerID, nil
}
func (r *FakeRuntimeService) StartContainer(containerID string) error {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "StartContainer")
c, ok := r.Containers[containerID]
if !ok {
return fmt.Errorf("container %s not found", containerID)
}
// Set container to running.
c.State = runtimeapi.ContainerState_CONTAINER_RUNNING
c.StartedAt = time.Now().UnixNano()
return nil
}
func (r *FakeRuntimeService) StopContainer(containerID string, timeout int64) error {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "StopContainer")
c, ok := r.Containers[containerID]
if !ok {
return fmt.Errorf("container %q not found", containerID)
}
// Set container to exited state.
finishedAt := time.Now().UnixNano()
exitedState := runtimeapi.ContainerState_CONTAINER_EXITED
c.State = exitedState
c.FinishedAt = finishedAt
return nil
}
func (r *FakeRuntimeService) RemoveContainer(containerID string) error {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "RemoveContainer")
// Remove the container
delete(r.Containers, containerID)
return nil
}
func (r *FakeRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ListContainers")
result := make([]*runtimeapi.Container, 0)
for _, s := range r.Containers {
if filter != nil {
if filter.Id != "" && filter.Id != s.Id {
continue
}
if filter.PodSandboxId != "" && filter.PodSandboxId != s.SandboxID {
continue
}
if filter.State != nil && filter.GetState().State != s.State {
continue
}
if filter.LabelSelector != nil && !filterInLabels(filter.LabelSelector, s.GetLabels()) {
continue
}
}
result = append(result, &runtimeapi.Container{
Id: s.Id,
CreatedAt: s.CreatedAt,
PodSandboxId: s.SandboxID,
Metadata: s.Metadata,
State: s.State,
Image: s.Image,
ImageRef: s.ImageRef,
Labels: s.Labels,
Annotations: s.Annotations,
})
}
return result, nil
}
func (r *FakeRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ContainerStatus")
c, ok := r.Containers[containerID]
if !ok {
return nil, fmt.Errorf("container %q not found", containerID)
}
status := c.ContainerStatus
return &status, nil
}
func (r *FakeRuntimeService) UpdateContainerResources(string, *runtimeapi.LinuxContainerResources) error {
return nil
}
func (r *FakeRuntimeService) ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ExecSync")
return nil, nil, nil
}
func (r *FakeRuntimeService) Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "Exec")
return &runtimeapi.ExecResponse{}, nil
}
func (r *FakeRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "Attach")
return &runtimeapi.AttachResponse{}, nil
}
func (r *FakeRuntimeService) UpdateRuntimeConfig(runtimeCOnfig *runtimeapi.RuntimeConfig) error {
return nil
}
func (r *FakeRuntimeService) SetFakeContainerStats(containerStats []*runtimeapi.ContainerStats) {
r.Lock()
defer r.Unlock()
r.FakeContainerStats = make(map[string]*runtimeapi.ContainerStats)
for _, s := range containerStats {
r.FakeContainerStats[s.Attributes.Id] = s
}
}
func (r *FakeRuntimeService) ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ContainerStats")
s, found := r.FakeContainerStats[containerID]
if !found {
return nil, fmt.Errorf("no stats for container %q", containerID)
}
return s, nil
}
func (r *FakeRuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ListContainerStats")
var result []*runtimeapi.ContainerStats
for _, c := range r.Containers {
if filter != nil {
if filter.Id != "" && filter.Id != c.Id {
continue
}
if filter.PodSandboxId != "" && filter.PodSandboxId != c.SandboxID {
continue
}
if filter.LabelSelector != nil && !filterInLabels(filter.LabelSelector, c.GetLabels()) {
continue
}
}
s, found := r.FakeContainerStats[c.Id]
if !found {
continue
}
result = append(result, s)
}
return result, nil
}
func (r *FakeRuntimeService) ReopenContainerLog(containerID string) error {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ReopenContainerLog")
if err := r.popError("ReopenContainerLog"); err != nil {
return err
}
return nil
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package parser implements a parser for Go source files. Input may be
// provided in a variety of forms (see the various Parse* functions); the
// output is an abstract syntax tree (AST) representing the Go source. The
// parser is invoked through one of the Parse* functions.
//
package parser
import (
"fmt"
"go/ast"
"go/scanner"
"go/token"
"strconv"
"strings"
"unicode"
)
// The parser structure holds the parser's internal state.
type parser struct {
file *token.File
errors scanner.ErrorList
scanner scanner.Scanner
// Tracing/debugging
mode Mode // parsing mode
trace bool // == (mode & Trace != 0)
indent int // indentation used for tracing output
// Comments
comments []*ast.CommentGroup
leadComment *ast.CommentGroup // last lead comment
lineComment *ast.CommentGroup // last line comment
// Next token
pos token.Pos // token position
tok token.Token // one token look-ahead
lit string // token literal
// Error recovery
// (used to limit the number of calls to syncXXX functions
// w/o making scanning progress - avoids potential endless
// loops across multiple parser functions during error recovery)
syncPos token.Pos // last synchronization position
syncCnt int // number of calls to syncXXX without progress
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
inRhs bool // if set, the parser is parsing a rhs expression
// Ordinary identifier scopes
pkgScope *ast.Scope // pkgScope.Outer == nil
topScope *ast.Scope // top-most scope; may be pkgScope
unresolved []*ast.Ident // unresolved identifiers
imports []*ast.ImportSpec // list of imports
// Label scopes
// (maintained by open/close LabelScope)
labelScope *ast.Scope // label scope for current function
targetStack [][]*ast.Ident // stack of unresolved labels
}
func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
p.file = fset.AddFile(filename, fset.Base(), len(src))
var m scanner.Mode
if mode&ParseComments != 0 {
m = scanner.ScanComments
}
eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
p.scanner.Init(p.file, src, eh, m)
p.mode = mode
p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
p.next()
}
// ----------------------------------------------------------------------------
// Scoping support
func (p *parser) openScope() {
p.topScope = ast.NewScope(p.topScope)
}
func (p *parser) closeScope() {
p.topScope = p.topScope.Outer
}
func (p *parser) openLabelScope() {
p.labelScope = ast.NewScope(p.labelScope)
p.targetStack = append(p.targetStack, nil)
}
func (p *parser) closeLabelScope() {
// resolve labels
n := len(p.targetStack) - 1
scope := p.labelScope
for _, ident := range p.targetStack[n] {
ident.Obj = scope.Lookup(ident.Name)
if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
}
}
// pop label scope
p.targetStack = p.targetStack[0:n]
p.labelScope = p.labelScope.Outer
}
func (p *parser) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
for _, ident := range idents {
assert(ident.Obj == nil, "identifier already declared or resolved")
obj := ast.NewObj(kind, ident.Name)
// remember the corresponding declaration for redeclaration
// errors and global variable resolution/typechecking phase
obj.Decl = decl
obj.Data = data
ident.Obj = obj
if ident.Name != "_" {
if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
prevDecl := ""
if pos := alt.Pos(); pos.IsValid() {
prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
}
p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
}
}
}
}
func (p *parser) shortVarDecl(decl *ast.AssignStmt, list []ast.Expr) {
// Go spec: A short variable declaration may redeclare variables
// provided they were originally declared in the same block with
// the same type, and at least one of the non-blank variables is new.
n := 0 // number of new variables
for _, x := range list {
if ident, isIdent := x.(*ast.Ident); isIdent {
assert(ident.Obj == nil, "identifier already declared or resolved")
obj := ast.NewObj(ast.Var, ident.Name)
// remember corresponding assignment for other tools
obj.Decl = decl
ident.Obj = obj
if ident.Name != "_" {
if alt := p.topScope.Insert(obj); alt != nil {
ident.Obj = alt // redeclaration
} else {
n++ // new declaration
}
}
} else {
p.errorExpected(x.Pos(), "identifier on left side of :=")
}
}
if n == 0 && p.mode&DeclarationErrors != 0 {
p.error(list[0].Pos(), "no new variables on left side of :=")
}
}
// The unresolved object is a sentinel to mark identifiers that have been added
// to the list of unresolved identifiers. The sentinel is only used for verifying
// internal consistency.
var unresolved = new(ast.Object)
// If x is an identifier, tryResolve attempts to resolve x by looking up
// the object it denotes. If no object is found and collectUnresolved is
// set, x is marked as unresolved and collected in the list of unresolved
// identifiers.
//
func (p *parser) tryResolve(x ast.Expr, collectUnresolved bool) {
// nothing to do if x is not an identifier or the blank identifier
ident, _ := x.(*ast.Ident)
if ident == nil {
return
}
assert(ident.Obj == nil, "identifier already declared or resolved")
if ident.Name == "_" {
return
}
// try to resolve the identifier
for s := p.topScope; s != nil; s = s.Outer {
if obj := s.Lookup(ident.Name); obj != nil {
ident.Obj = obj
return
}
}
// all local scopes are known, so any unresolved identifier
// must be found either in the file scope, package scope
// (perhaps in another file), or universe scope --- collect
// them so that they can be resolved later
if collectUnresolved {
ident.Obj = unresolved
p.unresolved = append(p.unresolved, ident)
}
}
func (p *parser) resolve(x ast.Expr) {
p.tryResolve(x, true)
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *parser) printTrace(a ...interface{}) {
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
pos := p.file.Position(p.pos)
fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
i := 2 * p.indent
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."))
func un(p *parser) {
p.indent--
p.printTrace(")")
}
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
// when tracing as it provides a more readable output. The
// very first token (!p.pos.IsValid()) is not initialized
// (it is token.ILLEGAL), so don't print it .
if p.trace && p.pos.IsValid() {
s := p.tok.String()
switch {
case p.tok.IsLiteral():
p.printTrace(s, p.lit)
case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"")
default:
p.printTrace(s)
}
}
p.pos, p.tok, p.lit = p.scanner.Scan()
}
// Consume a comment and return it and the line on which it ends.
func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
// /*-style comments may end on a different line than where they start.
// Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.file.Line(p.pos)
if p.lit[1] == '*' {
// don't use range here - no need to decode Unicode code points
for i := 0; i < len(p.lit); i++ {
if p.lit[i] == '\n' {
endline++
}
}
}
comment = &ast.Comment{Slash: p.pos, Text: p.lit}
p.next0()
return
}
// Consume a group of adjacent comments, add it to the parser's
// comments list, and return it together with the line at which
// the last comment in the group ends. A non-comment token or n
// empty lines terminate a comment group.
//
func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
var list []*ast.Comment
endline = p.file.Line(p.pos)
for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
var comment *ast.Comment
comment, endline = p.consumeComment()
list = append(list, comment)
}
// add comment group to the comments list
comments = &ast.CommentGroup{List: list}
p.comments = append(p.comments, comments)
return
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// and line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
//
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
prev := p.pos
p.next0()
if p.tok == token.COMMENT {
var comment *ast.CommentGroup
var endline int
if p.file.Line(p.pos) == p.file.Line(prev) {
// The comment is on same line as the previous token; it
// cannot be a lead comment but may be a line comment.
comment, endline = p.consumeCommentGroup(0)
if p.file.Line(p.pos) != endline {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = comment
}
}
// consume successor comments, if any
endline = -1
for p.tok == token.COMMENT {
comment, endline = p.consumeCommentGroup(1)
}
if endline+1 == p.file.Line(p.pos) {
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = comment
}
}
}
// A bailout panic is raised to indicate early termination.
type bailout struct{}
func (p *parser) error(pos token.Pos, msg string) {
epos := p.file.Position(pos)
// If AllErrors is not set, discard errors reported on the same line
// as the last recorded error and stop parsing if there are more than
// 10 errors.
if p.mode&AllErrors == 0 {
n := len(p.errors)
if n > 0 && p.errors[n-1].Pos.Line == epos.Line {
return // discard - likely a spurious error
}
if n > 10 {
panic(bailout{})
}
}
p.errors.Add(epos, msg)
}
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
// the error happened at the current position;
// make the error message more specific
if p.tok == token.SEMICOLON && p.lit == "\n" {
msg += ", found newline"
} else {
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
msg += " " + p.lit
}
}
}
p.error(pos, msg)
}
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, "'"+tok.String()+"'")
}
p.next() // make progress
return pos
}
// expectClosing is like expect but provides a better error message
// for the common case of a missing comma before a newline.
//
func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
p.error(p.pos, "missing ',' before newline in "+context)
p.next()
}
return p.expect(tok)
}
func (p *parser) expectSemi() {
// semicolon is optional before a closing ')' or '}'
if p.tok != token.RPAREN && p.tok != token.RBRACE {
if p.tok == token.SEMICOLON {
p.next()
} else {
p.errorExpected(p.pos, "';'")
syncStmt(p)
}
}
}
func (p *parser) atComma(context string) bool {
if p.tok == token.COMMA {
return true
}
if p.tok == token.SEMICOLON && p.lit == "\n" {
p.error(p.pos, "missing ',' before newline in "+context)
return true // "insert" the comma and continue
}
return false
}
func assert(cond bool, msg string) {
if !cond {
panic("go/parser internal error: " + msg)
}
}
// syncStmt advances to the next statement.
// Used for synchronization after an error.
//
func syncStmt(p *parser) {
for {
switch p.tok {
case token.BREAK, token.CONST, token.CONTINUE, token.DEFER,
token.FALLTHROUGH, token.FOR, token.GO, token.GOTO,
token.IF, token.RETURN, token.SELECT, token.SWITCH,
token.TYPE, token.VAR:
// Return only if parser made some progress since last
// sync or if it has not reached 10 sync calls without
// progress. Otherwise consume at least one token to
// avoid an endless parser loop (it is possible that
// both parseOperand and parseStmt call syncStmt and
// correctly do not advance, thus the need for the
// invocation limit p.syncCnt).
if p.pos == p.syncPos && p.syncCnt < 10 {
p.syncCnt++
return
}
if p.pos > p.syncPos {
p.syncPos = p.pos
p.syncCnt = 0
return
}
// Reaching here indicates a parser bug, likely an
// incorrect token list in this function, but it only
// leads to skipping of possibly correct code if a
// previous error is present, and thus is preferred
// over a non-terminating parse.
case token.EOF:
return
}
p.next()
}
}
// syncDecl advances to the next declaration.
// Used for synchronization after an error.
//
func syncDecl(p *parser) {
for {
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
// see comments in syncStmt
if p.pos == p.syncPos && p.syncCnt < 10 {
p.syncCnt++
return
}
if p.pos > p.syncPos {
p.syncPos = p.pos
p.syncCnt = 0
return
}
case token.EOF:
return
}
p.next()
}
}
// ----------------------------------------------------------------------------
// Identifiers
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
if p.tok == token.IDENT {
name = p.lit
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{NamePos: pos, Name: name}
}
func (p *parser) parseIdentList() (list []*ast.Ident) {
if p.trace {
defer un(trace(p, "IdentList"))
}
list = append(list, p.parseIdent())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseIdent())
}
return
}
// ----------------------------------------------------------------------------
// Common productions
// If lhs is set, result list elements which are identifiers are not resolved.
func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ExpressionList"))
}
list = append(list, p.checkExpr(p.parseExpr(lhs)))
for p.tok == token.COMMA {
p.next()
list = append(list, p.checkExpr(p.parseExpr(lhs)))
}
return
}
func (p *parser) parseLhsList() []ast.Expr {
old := p.inRhs
p.inRhs = false
list := p.parseExprList(true)
switch p.tok {
case token.DEFINE:
// lhs of a short variable declaration
// but doesn't enter scope until later:
// caller must call p.shortVarDecl(p.makeIdentList(list))
// at appropriate time.
case token.COLON:
// lhs of a label declaration or a communication clause of a select
// statement (parseLhsList is not called when parsing the case clause
// of a switch statement):
// - labels are declared by the caller of parseLhsList
// - for communication clauses, if there is a stand-alone identifier
// followed by a colon, we have a syntax error; there is no need
// to resolve the identifier in that case
default:
// identifiers must be declared elsewhere
for _, x := range list {
p.resolve(x)
}
}
p.inRhs = old
return list
}
func (p *parser) parseRhsList() []ast.Expr {
old := p.inRhs
p.inRhs = true
list := p.parseExprList(false)
p.inRhs = old
return list
}
// ----------------------------------------------------------------------------
// Types
func (p *parser) parseType() ast.Expr {
if p.trace {
defer un(trace(p, "Type"))
}
typ := p.tryType()
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
return &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
// If the result is an identifier, it is not resolved.
func (p *parser) parseTypeName() ast.Expr {
if p.trace {
defer un(trace(p, "TypeName"))
}
ident := p.parseIdent()
// don't resolve ident yet - it may be a parameter or field name
if p.tok == token.PERIOD {
// ident is a package name
p.next()
p.resolve(ident)
sel := p.parseIdent()
return &ast.SelectorExpr{X: ident, Sel: sel}
}
return ident
}
func (p *parser) parseArrayType() ast.Expr {
if p.trace {
defer un(trace(p, "ArrayType"))
}
lbrack := p.expect(token.LBRACK)
var len ast.Expr
// always permit ellipsis for more fault-tolerant parsing
if p.tok == token.ELLIPSIS {
len = &ast.Ellipsis{Ellipsis: p.pos}
p.next()
} else if p.tok != token.RBRACK {
len = p.parseRhs()
}
p.expect(token.RBRACK)
elt := p.parseType()
return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
}
func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
idents := make([]*ast.Ident, len(list))
for i, x := range list {
ident, isIdent := x.(*ast.Ident)
if !isIdent {
if _, isBad := x.(*ast.BadExpr); !isBad {
// only report error if it's a new one
p.errorExpected(x.Pos(), "identifier")
}
ident = &ast.Ident{NamePos: x.Pos(), Name: "_"}
}
idents[i] = ident
}
return idents
}
func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"))
}
doc := p.leadComment
// FieldDecl
list, typ := p.parseVarList(false)
// Tag
var tag *ast.BasicLit
if p.tok == token.STRING {
tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
}
// analyze case
var idents []*ast.Ident
if typ != nil {
// IdentifierList Type
idents = p.makeIdentList(list)
} else {
// ["*"] TypeName (AnonymousField)
typ = list[0] // we always have at least one element
if n := len(list); n > 1 || !isTypeName(deref(typ)) {
pos := typ.Pos()
p.errorExpected(pos, "anonymous field")
typ = &ast.BadExpr{From: pos, To: list[n-1].End()}
}
}
p.expectSemi() // call before accessing p.linecomment
field := &ast.Field{Doc: doc, Names: idents, Type: typ, Tag: tag, Comment: p.lineComment}
p.declare(field, nil, scope, ast.Var, idents...)
p.resolve(typ)
return field
}
func (p *parser) parseStructType() *ast.StructType {
if p.trace {
defer un(trace(p, "StructType"))
}
pos := p.expect(token.STRUCT)
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // struct scope
var list []*ast.Field
for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
// a field declaration cannot start with a '(' but we accept
// it here for more robust parsing and better error messages
// (parseFieldDecl will check and complain if necessary)
list = append(list, p.parseFieldDecl(scope))
}
rbrace := p.expect(token.RBRACE)
return &ast.StructType{
Struct: pos,
Fields: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parsePointerType() *ast.StarExpr {
if p.trace {
defer un(trace(p, "PointerType"))
}
star := p.expect(token.MUL)
base := p.parseType()
return &ast.StarExpr{Star: star, X: base}
}
// If the result is an identifier, it is not resolved.
func (p *parser) tryVarType(isParam bool) ast.Expr {
if isParam && p.tok == token.ELLIPSIS {
pos := p.pos
p.next()
typ := p.tryIdentOrType() // don't use parseType so we can provide better error message
if typ != nil {
p.resolve(typ)
} else {
p.error(pos, "'...' parameter is missing type")
typ = &ast.BadExpr{From: pos, To: p.pos}
}
return &ast.Ellipsis{Ellipsis: pos, Elt: typ}
}
return p.tryIdentOrType()
}
// If the result is an identifier, it is not resolved.
func (p *parser) parseVarType(isParam bool) ast.Expr {
typ := p.tryVarType(isParam)
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
typ = &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
// If any of the results are identifiers, they are not resolved.
func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
if p.trace {
defer un(trace(p, "VarList"))
}
// a list of identifiers looks like a list of type names
//
// parse/tryVarType accepts any type (including parenthesized
// ones) even though the syntax does not permit them here: we
// accept them all for more robust parsing and complain later
for typ := p.parseVarType(isParam); typ != nil; {
list = append(list, typ)
if p.tok != token.COMMA {
break
}
p.next()
typ = p.tryVarType(isParam) // maybe nil as in: func f(int,) {}
}
// if we had a list of identifiers, it must be followed by a type
typ = p.tryVarType(isParam)
return
}
func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
if p.trace {
defer un(trace(p, "ParameterList"))
}
// ParameterDecl
list, typ := p.parseVarList(ellipsisOk)
// analyze case
if typ != nil {
// IdentifierList Type
idents := p.makeIdentList(list)
field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, nil, scope, ast.Var, idents...)
p.resolve(typ)
if p.tok == token.COMMA {
p.next()
}
for p.tok != token.RPAREN && p.tok != token.EOF {
idents := p.parseIdentList()
typ := p.parseVarType(ellipsisOk)
field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, nil, scope, ast.Var, idents...)
p.resolve(typ)
if !p.atComma("parameter list") {
break
}
p.next()
}
} else {
// Type { "," Type } (anonymous parameters)
params = make([]*ast.Field, len(list))
for i, typ := range list {
p.resolve(typ)
params[i] = &ast.Field{Type: typ}
}
}
return
}
func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
if p.trace {
defer un(trace(p, "Parameters"))
}
var params []*ast.Field
lparen := p.expect(token.LPAREN)
if p.tok != token.RPAREN {
params = p.parseParameterList(scope, ellipsisOk)
}
rparen := p.expect(token.RPAREN)
return &ast.FieldList{Opening: lparen, List: params, Closing: rparen}
}
func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Result"))
}
if p.tok == token.LPAREN {
return p.parseParameters(scope, false)
}
typ := p.tryType()
if typ != nil {
list := make([]*ast.Field, 1)
list[0] = &ast.Field{Type: typ}
return &ast.FieldList{List: list}
}
return nil
}
func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
if p.trace {
defer un(trace(p, "Signature"))
}
params = p.parseParameters(scope, true)
results = p.parseResult(scope)
return
}
func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
if p.trace {
defer un(trace(p, "FuncType"))
}
pos := p.expect(token.FUNC)
scope := ast.NewScope(p.topScope) // function scope
params, results := p.parseSignature(scope)
return &ast.FuncType{Func: pos, Params: params, Results: results}, scope
}
func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"))
}
doc := p.leadComment
var idents []*ast.Ident
var typ ast.Expr
x := p.parseTypeName()
if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
// method
idents = []*ast.Ident{ident}
scope := ast.NewScope(nil) // method scope
params, results := p.parseSignature(scope)
typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
} else {
// embedded interface
typ = x
p.resolve(typ)
}
p.expectSemi() // call before accessing p.linecomment
spec := &ast.Field{Doc: doc, Names: idents, Type: typ, Comment: p.lineComment}
p.declare(spec, nil, scope, ast.Fun, idents...)
return spec
}
func (p *parser) parseInterfaceType() *ast.InterfaceType {
if p.trace {
defer un(trace(p, "InterfaceType"))
}
pos := p.expect(token.INTERFACE)
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // interface scope
var list []*ast.Field
for p.tok == token.IDENT {
list = append(list, p.parseMethodSpec(scope))
}
rbrace := p.expect(token.RBRACE)
return &ast.InterfaceType{
Interface: pos,
Methods: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parseMapType() *ast.MapType {
if p.trace {
defer un(trace(p, "MapType"))
}
pos := p.expect(token.MAP)
p.expect(token.LBRACK)
key := p.parseType()
p.expect(token.RBRACK)
value := p.parseType()
return &ast.MapType{Map: pos, Key: key, Value: value}
}
func (p *parser) parseChanType() *ast.ChanType {
if p.trace {
defer un(trace(p, "ChanType"))
}
pos := p.pos
dir := ast.SEND | ast.RECV
var arrow token.Pos
if p.tok == token.CHAN {
p.next()
if p.tok == token.ARROW {
arrow = p.pos
p.next()
dir = ast.SEND
}
} else {
arrow = p.expect(token.ARROW)
p.expect(token.CHAN)
dir = ast.RECV
}
value := p.parseType()
return &ast.ChanType{Begin: pos, Arrow: arrow, Dir: dir, Value: value}
}
// If the result is an identifier, it is not resolved.
func (p *parser) tryIdentOrType() ast.Expr {
switch p.tok {
case token.IDENT:
return p.parseTypeName()
case token.LBRACK:
return p.parseArrayType()
case token.STRUCT:
return p.parseStructType()
case token.MUL:
return p.parsePointerType()
case token.FUNC:
typ, _ := p.parseFuncType()
return typ
case token.INTERFACE:
return p.parseInterfaceType()
case token.MAP:
return p.parseMapType()
case token.CHAN, token.ARROW:
return p.parseChanType()
case token.LPAREN:
lparen := p.pos
p.next()
typ := p.parseType()
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
}
// no type found
return nil
}
func (p *parser) tryType() ast.Expr {
typ := p.tryIdentOrType()
if typ != nil {
p.resolve(typ)
}
return typ
}
// ----------------------------------------------------------------------------
// Blocks
func (p *parser) parseStmtList() (list []ast.Stmt) {
if p.trace {
defer un(trace(p, "StatementList"))
}
for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseStmt())
}
return
}
func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
if p.trace {
defer un(trace(p, "Body"))
}
lbrace := p.expect(token.LBRACE)
p.topScope = scope // open function scope
p.openLabelScope()
list := p.parseStmtList()
p.closeLabelScope()
p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
func (p *parser) parseBlockStmt() *ast.BlockStmt {
if p.trace {
defer un(trace(p, "BlockStmt"))
}
lbrace := p.expect(token.LBRACE)
p.openScope()
list := p.parseStmtList()
p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
// ----------------------------------------------------------------------------
// Expressions
func (p *parser) parseFuncTypeOrLit() ast.Expr {
if p.trace {
defer un(trace(p, "FuncTypeOrLit"))
}
typ, scope := p.parseFuncType()
if p.tok != token.LBRACE {
// function type only
return typ
}
p.exprLev++
body := p.parseBody(scope)
p.exprLev--
return &ast.FuncLit{Type: typ, Body: body}
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
// If lhs is set and the result is an identifier, it is not resolved.
//
func (p *parser) parseOperand(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Operand"))
}
switch p.tok {
case token.IDENT:
x := p.parseIdent()
if !lhs {
p.resolve(x)
}
return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
return x
case token.LPAREN:
lparen := p.pos
p.next()
p.exprLev++
x := p.parseRhsOrType() // types may be parenthesized: (some type)
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
case token.FUNC:
return p.parseFuncTypeOrLit()
}
if typ := p.tryIdentOrType(); typ != nil {
// could be type for composite literal or conversion
_, isIdent := typ.(*ast.Ident)
assert(!isIdent, "type cannot be identifier")
return typ
}
// we have an error
pos := p.pos
p.errorExpected(pos, "operand")
syncStmt(p)
return &ast.BadExpr{From: pos, To: p.pos}
}
func (p *parser) parseSelector(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "Selector"))
}
sel := p.parseIdent()
return &ast.SelectorExpr{X: x, Sel: sel}
}
func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "TypeAssertion"))
}
p.expect(token.LPAREN)
var typ ast.Expr
if p.tok == token.TYPE {
// type switch: typ == nil
p.next()
} else {
typ = p.parseType()
}
p.expect(token.RPAREN)
return &ast.TypeAssertExpr{X: x, Type: typ}
}
func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "IndexOrSlice"))
}
lbrack := p.expect(token.LBRACK)
p.exprLev++
var low, high ast.Expr
isSlice := false
if p.tok != token.COLON {
low = p.parseRhs()
}
if p.tok == token.COLON {
isSlice = true
p.next()
if p.tok != token.RBRACK {
high = p.parseRhs()
}
}
p.exprLev--
rbrack := p.expect(token.RBRACK)
if isSlice {
return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: low, High: high, Rbrack: rbrack}
}
return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: low, Rbrack: rbrack}
}
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
if p.trace {
defer un(trace(p, "CallOrConversion"))
}
lparen := p.expect(token.LPAREN)
p.exprLev++
var list []ast.Expr
var ellipsis token.Pos
for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
list = append(list, p.parseRhsOrType()) // builtins may expect a type: make(some type, ...)
if p.tok == token.ELLIPSIS {
ellipsis = p.pos
p.next()
}
if !p.atComma("argument list") {
break
}
p.next()
}
p.exprLev--
rparen := p.expectClosing(token.RPAREN, "argument list")
return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
}
func (p *parser) parseElement(keyOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "Element"))
}
if p.tok == token.LBRACE {
return p.parseLiteralValue(nil)
}
// Because the parser doesn't know the composite literal type, it cannot
// know if a key that's an identifier is a struct field name or a name
// denoting a value. The former is not resolved by the parser or the
// resolver.
//
// Instead, _try_ to resolve such a key if possible. If it resolves,
// it a) has correctly resolved, or b) incorrectly resolved because
// the key is a struct field with a name matching another identifier.
// In the former case we are done, and in the latter case we don't
// care because the type checker will do a separate field lookup.
//
// If the key does not resolve, it a) must be defined at the top
// level in another file of the same package, the universe scope, or be
// undeclared; or b) it is a struct field. In the former case, the type
// checker can do a top-level lookup, and in the latter case it will do
// a separate field lookup.
x := p.checkExpr(p.parseExpr(keyOk))
if keyOk {
if p.tok == token.COLON {
colon := p.pos
p.next()
// Try to resolve the key but don't collect it
// as unresolved identifier if it fails so that
// we don't get (possibly false) errors about
// undeclared names.
p.tryResolve(x, false)
return &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseElement(false)}
}
p.resolve(x) // not a key
}
return x
}
func (p *parser) parseElementList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ElementList"))
}
for p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseElement(true))
if !p.atComma("composite literal") {
break
}
p.next()
}
return
}
func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "LiteralValue"))
}
lbrace := p.expect(token.LBRACE)
var elts []ast.Expr
p.exprLev++
if p.tok != token.RBRACE {
elts = p.parseElementList()
}
p.exprLev--
rbrace := p.expectClosing(token.RBRACE, "composite literal")
return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
}
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
switch unparen(x).(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.BasicLit:
case *ast.FuncLit:
case *ast.CompositeLit:
case *ast.ParenExpr:
panic("unreachable")
case *ast.SelectorExpr:
case *ast.IndexExpr:
case *ast.SliceExpr:
case *ast.TypeAssertExpr:
// If t.Type == nil we have a type assertion of the form
// y.(type), which is only allowed in type switch expressions.
// It's hard to exclude those but for the case where we are in
// a type switch. Instead be lenient and test this in the type
// checker.
case *ast.CallExpr:
case *ast.StarExpr:
case *ast.UnaryExpr:
case *ast.BinaryExpr:
default:
// all other nodes are not proper expressions
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{From: x.Pos(), To: x.End()}
}
return x
}
// isTypeName returns true iff x is a (qualified) TypeName.
func isTypeName(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
default:
return false // all other nodes are not type names
}
return true
}
// isLiteralType returns true iff x is a legal composite literal type.
func isLiteralType(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
case *ast.ArrayType:
case *ast.StructType:
case *ast.MapType:
default:
return false // all other nodes are not legal composite literal types
}
return true
}
// If x is of the form *T, deref returns T, otherwise it returns x.
func deref(x ast.Expr) ast.Expr {
if p, isPtr := x.(*ast.StarExpr); isPtr {
x = p.X
}
return x
}
// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
func unparen(x ast.Expr) ast.Expr {
if p, isParen := x.(*ast.ParenExpr); isParen {
x = unparen(p.X)
}
return x
}
// checkExprOrType checks that x is an expression or a type
// (and not a raw type such as [...]T).
//
func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
switch t := unparen(x).(type) {
case *ast.ParenExpr:
panic("unreachable")
case *ast.UnaryExpr:
case *ast.ArrayType:
if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
p.error(len.Pos(), "expected array length, found '...'")
x = &ast.BadExpr{From: x.Pos(), To: x.End()}
}
}
// all other nodes are expressions or types
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "PrimaryExpr"))
}
x := p.parseOperand(lhs)
L:
for {
switch p.tok {
case token.PERIOD:
p.next()
if lhs {
p.resolve(x)
}
switch p.tok {
case token.IDENT:
x = p.parseSelector(p.checkExpr(x))
case token.LPAREN:
x = p.parseTypeAssertion(p.checkExpr(x))
default:
pos := p.pos
p.errorExpected(pos, "selector or type assertion")
p.next() // make progress
x = &ast.BadExpr{From: pos, To: p.pos}
}
case token.LBRACK:
if lhs {
p.resolve(x)
}
x = p.parseIndexOrSlice(p.checkExpr(x))
case token.LPAREN:
if lhs {
p.resolve(x)
}
x = p.parseCallOrConversion(p.checkExprOrType(x))
case token.LBRACE:
if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
if lhs {
p.resolve(x)
}
x = p.parseLiteralValue(x)
} else {
break L
}
default:
break L
}
lhs = false // no need to try to resolve again
}
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "UnaryExpr"))
}
switch p.tok {
case token.ADD, token.SUB, token.NOT, token.XOR, token.AND:
pos, op := p.pos, p.tok
p.next()
x := p.parseUnaryExpr(false)
return &ast.UnaryExpr{OpPos: pos, Op: op, X: p.checkExpr(x)}
case token.ARROW:
// channel type or receive expression
arrow := p.pos
p.next()
// If the next token is token.CHAN we still don't know if it
// is a channel type or a receive operation - we only know
// once we have found the end of the unary expression. There
// are two cases:
//
// <- type => (<-type) must be channel type
// <- expr => <-(expr) is a receive from an expression
//
// In the first case, the arrow must be re-associated with
// the channel type parsed already:
//
// <- (chan type) => (<-chan type)
// <- (chan<- type) => (<-chan (<-type))
x := p.parseUnaryExpr(false)
// determine which case we have
if typ, ok := x.(*ast.ChanType); ok {
// (<-type)
// re-associate position info and <-
dir := ast.SEND
for ok && dir == ast.SEND {
if typ.Dir == ast.RECV {
// error: (<-type) is (<-(<-chan T))
p.errorExpected(typ.Arrow, "'chan'")
}
arrow, typ.Begin, typ.Arrow = typ.Arrow, arrow, arrow
dir, typ.Dir = typ.Dir, ast.RECV
typ, ok = typ.Value.(*ast.ChanType)
}
if dir == ast.SEND {
p.errorExpected(arrow, "channel type")
}
return x
}
// <-(expr)
return &ast.UnaryExpr{OpPos: arrow, Op: token.ARROW, X: p.checkExpr(x)}
case token.MUL:
// pointer type or unary "*" expression
pos := p.pos
p.next()
x := p.parseUnaryExpr(false)
return &ast.StarExpr{Star: pos, X: p.checkExprOrType(x)}
}
return p.parsePrimaryExpr(lhs)
}
func (p *parser) tokPrec() (token.Token, int) {
tok := p.tok
if p.inRhs && tok == token.ASSIGN {
tok = token.EQL
}
return tok, tok.Precedence()
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
if p.trace {
defer un(trace(p, "BinaryExpr"))
}
x := p.parseUnaryExpr(lhs)
for _, prec := p.tokPrec(); prec >= prec1; prec-- {
for {
op, oprec := p.tokPrec()
if oprec != prec {
break
}
pos := p.expect(op)
if lhs {
p.resolve(x)
lhs = false
}
y := p.parseBinaryExpr(false, prec+1)
x = &ast.BinaryExpr{X: p.checkExpr(x), OpPos: pos, Op: op, Y: p.checkExpr(y)}
}
}
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
// The result may be a type or even a raw type ([...]int). Callers must
// check the result (using checkExpr or checkExprOrType), depending on
// context.
func (p *parser) parseExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Expression"))
}
return p.parseBinaryExpr(lhs, token.LowestPrec+1)
}
func (p *parser) parseRhs() ast.Expr {
old := p.inRhs
p.inRhs = true
x := p.checkExpr(p.parseExpr(false))
p.inRhs = old
return x
}
func (p *parser) parseRhsOrType() ast.Expr {
old := p.inRhs
p.inRhs = true
x := p.checkExprOrType(p.parseExpr(false))
p.inRhs = old
return x
}
// ----------------------------------------------------------------------------
// Statements
// Parsing modes for parseSimpleStmt.
const (
basic = iota
labelOk
rangeOk
)
// parseSimpleStmt returns true as 2nd result if it parsed the assignment
// of a range clause (with mode == rangeOk). The returned statement is an
// assignment with a right-hand side that is a single unary expression of
// the form "range x". No guarantees are given for the left-hand side.
func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
if p.trace {
defer un(trace(p, "SimpleStmt"))
}
x := p.parseLhsList()
switch p.tok {
case
token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
// assignment statement, possibly part of a range clause
pos, tok := p.pos, p.tok
p.next()
var y []ast.Expr
isRange := false
if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
pos := p.pos
p.next()
y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
isRange = true
} else {
y = p.parseRhsList()
}
as := &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}
if tok == token.DEFINE {
p.shortVarDecl(as, x)
}
return as, isRange
}
if len(x) > 1 {
p.errorExpected(x[0].Pos(), "1 expression")
// continue with first expression
}
switch p.tok {
case token.COLON:
// labeled statement
colon := p.pos
p.next()
if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
// Go spec: The scope of a label is the body of the function
// in which it is declared and excludes the body of any nested
// function.
stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
p.declare(stmt, nil, p.labelScope, ast.Lbl, label)
return stmt, false
}
// The label declaration typically starts at x[0].Pos(), but the label
// declaration may be erroneous due to a token after that position (and
// before the ':'). If SpuriousErrors is not set, the (only) error re-
// ported for the line is the illegal label error instead of the token
// before the ':' that caused the problem. Thus, use the (latest) colon
// position for error reporting.
p.error(colon, "illegal label declaration")
return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
case token.ARROW:
// send statement
arrow := p.pos
p.next()
y := p.parseRhs()
return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
case token.INC, token.DEC:
// increment or decrement
s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
p.next()
return s, false
}
// expression
return &ast.ExprStmt{X: x[0]}, false
}
func (p *parser) parseCallExpr() *ast.CallExpr {
x := p.parseRhsOrType() // could be a conversion: (some type)(x)
if call, isCall := x.(*ast.CallExpr); isCall {
return call
}
if _, isBad := x.(*ast.BadExpr); !isBad {
// only report error if it's a new one
p.errorExpected(x.Pos(), "function/method call")
}
return nil
}
func (p *parser) parseGoStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "GoStmt"))
}
pos := p.expect(token.GO)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
}
return &ast.GoStmt{Go: pos, Call: call}
}
func (p *parser) parseDeferStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "DeferStmt"))
}
pos := p.expect(token.DEFER)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
}
return &ast.DeferStmt{Defer: pos, Call: call}
}
func (p *parser) parseReturnStmt() *ast.ReturnStmt {
if p.trace {
defer un(trace(p, "ReturnStmt"))
}
pos := p.pos
p.expect(token.RETURN)
var x []ast.Expr
if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
x = p.parseRhsList()
}
p.expectSemi()
return &ast.ReturnStmt{Return: pos, Results: x}
}
func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
if p.trace {
defer un(trace(p, "BranchStmt"))
}
pos := p.expect(tok)
var label *ast.Ident
if tok != token.FALLTHROUGH && p.tok == token.IDENT {
label = p.parseIdent()
// add to list of unresolved targets
n := len(p.targetStack) - 1
p.targetStack[n] = append(p.targetStack[n], label)
}
p.expectSemi()
return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
}
func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
if s == nil {
return nil
}
if es, isExpr := s.(*ast.ExprStmt); isExpr {
return p.checkExpr(es.X)
}
p.error(s.Pos(), "expected condition, found simple statement")
return &ast.BadExpr{From: s.Pos(), To: s.End()}
}
func (p *parser) parseIfStmt() *ast.IfStmt {
if p.trace {
defer un(trace(p, "IfStmt"))
}
pos := p.expect(token.IF)
p.openScope()
defer p.closeScope()
var s ast.Stmt
var x ast.Expr
{
prevLev := p.exprLev
p.exprLev = -1
if p.tok == token.SEMICOLON {
p.next()
x = p.parseRhs()
} else {
s, _ = p.parseSimpleStmt(basic)
if p.tok == token.SEMICOLON {
p.next()
x = p.parseRhs()
} else {
x = p.makeExpr(s)
s = nil
}
}
p.exprLev = prevLev
}
body := p.parseBlockStmt()
var else_ ast.Stmt
if p.tok == token.ELSE {
p.next()
else_ = p.parseStmt()
} else {
p.expectSemi()
}
return &ast.IfStmt{If: pos, Init: s, Cond: x, Body: body, Else: else_}
}
func (p *parser) parseTypeList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "TypeList"))
}
list = append(list, p.parseType())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseType())
}
return
}
func (p *parser) parseCaseClause(typeSwitch bool) *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"))
}
pos := p.pos
var list []ast.Expr
if p.tok == token.CASE {
p.next()
if typeSwitch {
list = p.parseTypeList()
} else {
list = p.parseRhsList()
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
p.openScope()
body := p.parseStmtList()
p.closeScope()
return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
}
func isTypeSwitchAssert(x ast.Expr) bool {
a, ok := x.(*ast.TypeAssertExpr)
return ok && a.Type == nil
}
func isTypeSwitchGuard(s ast.Stmt) bool {
switch t := s.(type) {
case *ast.ExprStmt:
// x.(nil)
return isTypeSwitchAssert(t.X)
case *ast.AssignStmt:
// v := x.(nil)
return len(t.Lhs) == 1 && t.Tok == token.DEFINE && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0])
}
return false
}
func (p *parser) parseSwitchStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "SwitchStmt"))
}
pos := p.expect(token.SWITCH)
p.openScope()
defer p.closeScope()
var s1, s2 ast.Stmt
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
if p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.LBRACE {
// A TypeSwitchGuard may declare a variable in addition
// to the variable declared in the initial SimpleStmt.
// Introduce extra scope to avoid redeclaration errors:
//
// switch t := 0; t := x.(T) { ... }
//
// (this code is not valid Go because the first t
// cannot be accessed and thus is never used, the extra
// scope is needed for the correct error message).
//
// If we don't have a type switch, s2 must be an expression.
// Having the extra nested but empty scope won't affect it.
p.openScope()
defer p.closeScope()
s2, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
typeSwitch := isTypeSwitchGuard(s2)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCaseClause(typeSwitch))
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
if typeSwitch {
return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
}
return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2), Body: body}
}
func (p *parser) parseCommClause() *ast.CommClause {
if p.trace {
defer un(trace(p, "CommClause"))
}
p.openScope()
pos := p.pos
var comm ast.Stmt
if p.tok == token.CASE {
p.next()
lhs := p.parseLhsList()
if p.tok == token.ARROW {
// SendStmt
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
arrow := p.pos
p.next()
rhs := p.parseRhs()
comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
} else {
// RecvStmt
if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
// RecvStmt with assignment
if len(lhs) > 2 {
p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
// continue with first two expressions
lhs = lhs[0:2]
}
pos := p.pos
p.next()
rhs := p.parseRhs()
as := &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
if tok == token.DEFINE {
p.shortVarDecl(as, lhs)
}
comm = as
} else {
// lhs must be single receive operation
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
comm = &ast.ExprStmt{X: lhs[0]}
}
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
p.closeScope()
return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
}
func (p *parser) parseSelectStmt() *ast.SelectStmt {
if p.trace {
defer un(trace(p, "SelectStmt"))
}
pos := p.expect(token.SELECT)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCommClause())
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
return &ast.SelectStmt{Select: pos, Body: body}
}
func (p *parser) parseForStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "ForStmt"))
}
pos := p.expect(token.FOR)
p.openScope()
defer p.closeScope()
var s1, s2, s3 ast.Stmt
var isRange bool
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s2, isRange = p.parseSimpleStmt(rangeOk)
}
if !isRange && p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
p.expectSemi()
if p.tok != token.LBRACE {
s3, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
body := p.parseBlockStmt()
p.expectSemi()
if isRange {
as := s2.(*ast.AssignStmt)
// check lhs
var key, value ast.Expr
switch len(as.Lhs) {
case 2:
key, value = as.Lhs[0], as.Lhs[1]
case 1:
key = as.Lhs[0]
default:
p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions")
return &ast.BadStmt{From: pos, To: body.End()}
}
// parseSimpleStmt returned a right-hand side that
// is a single unary expression of the form "range x"
x := as.Rhs[0].(*ast.UnaryExpr).X
return &ast.RangeStmt{
For: pos,
Key: key,
Value: value,
TokPos: as.TokPos,
Tok: as.Tok,
X: x,
Body: body,
}
}
// regular for statement
return &ast.ForStmt{
For: pos,
Init: s1,
Cond: p.makeExpr(s2),
Post: s3,
Body: body,
}
}
func (p *parser) parseStmt() (s ast.Stmt) {
if p.trace {
defer un(trace(p, "Statement"))
}
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
s = &ast.DeclStmt{Decl: p.parseDecl(syncStmt)}
case
// tokens that may start an expression
token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
token.LBRACK, token.STRUCT, // composite types
token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
s, _ = p.parseSimpleStmt(labelOk)
// because of the required look-ahead, labeled statements are
// parsed by parseSimpleStmt - don't expect a semicolon after
// them
if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
p.expectSemi()
}
case token.GO:
s = p.parseGoStmt()
case token.DEFER:
s = p.parseDeferStmt()
case token.RETURN:
s = p.parseReturnStmt()
case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
s = p.parseBranchStmt(p.tok)
case token.LBRACE:
s = p.parseBlockStmt()
p.expectSemi()
case token.IF:
s = p.parseIfStmt()
case token.SWITCH:
s = p.parseSwitchStmt()
case token.SELECT:
s = p.parseSelectStmt()
case token.FOR:
s = p.parseForStmt()
case token.SEMICOLON:
s = &ast.EmptyStmt{Semicolon: p.pos}
p.next()
case token.RBRACE:
// a semicolon may be omitted before a closing "}"
s = &ast.EmptyStmt{Semicolon: p.pos}
default:
// no statement found
pos := p.pos
p.errorExpected(pos, "statement")
syncStmt(p)
s = &ast.BadStmt{From: pos, To: p.pos}
}
return
}
// ----------------------------------------------------------------------------
// Declarations
type parseSpecFunction func(p *parser, doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec
func isValidImport(lit string) bool {
const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
s, _ := strconv.Unquote(lit) // go/scanner returns a legal string literal
for _, r := range s {
if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
return false
}
}
return s != ""
}
func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
}
var ident *ast.Ident
switch p.tok {
case token.PERIOD:
ident = &ast.Ident{NamePos: p.pos, Name: "."}
p.next()
case token.IDENT:
ident = p.parseIdent()
}
var path *ast.BasicLit
if p.tok == token.STRING {
if !isValidImport(p.lit) {
p.error(p.pos, "invalid import path: "+p.lit)
}
path = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
} else {
p.expect(token.STRING) // use expect() error handling
}
p.expectSemi() // call before accessing p.linecomment
// collect imports
spec := &ast.ImportSpec{
Doc: doc,
Name: ident,
Path: path,
Comment: p.lineComment,
}
p.imports = append(p.imports, spec)
return spec
}
func (p *parser) parseValueSpec(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec {
if p.trace {
defer un(trace(p, keyword.String()+"Spec"))
}
idents := p.parseIdentList()
typ := p.tryType()
var values []ast.Expr
if p.tok == token.ASSIGN || keyword == token.CONST && (typ != nil || iota == 0) || keyword == token.VAR && typ == nil {
p.expect(token.ASSIGN)
values = p.parseRhsList()
}
p.expectSemi() // call before accessing p.linecomment
// Go spec: The scope of a constant or variable identifier declared inside
// a function begins at the end of the ConstSpec or VarSpec and ends at
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.ValueSpec{
Doc: doc,
Names: idents,
Type: typ,
Values: values,
Comment: p.lineComment,
}
kind := ast.Con
if keyword == token.VAR {
kind = ast.Var
}
p.declare(spec, iota, p.topScope, kind, idents...)
return spec
}
func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "TypeSpec"))
}
ident := p.parseIdent()
// Go spec: The scope of a type identifier declared inside a function begins
// at the identifier in the TypeSpec and ends at the end of the innermost
// containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.TypeSpec{Doc: doc, Name: ident}
p.declare(spec, nil, p.topScope, ast.Typ, ident)
spec.Type = p.parseType()
p.expectSemi() // call before accessing p.linecomment
spec.Comment = p.lineComment
return spec
}
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
if p.trace {
defer un(trace(p, "GenDecl("+keyword.String()+")"))
}
doc := p.leadComment
pos := p.expect(keyword)
var lparen, rparen token.Pos
var list []ast.Spec
if p.tok == token.LPAREN {
lparen = p.pos
p.next()
for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
list = append(list, f(p, p.leadComment, keyword, iota))
}
rparen = p.expect(token.RPAREN)
p.expectSemi()
} else {
list = append(list, f(p, nil, keyword, 0))
}
return &ast.GenDecl{
Doc: doc,
TokPos: pos,
Tok: keyword,
Lparen: lparen,
Specs: list,
Rparen: rparen,
}
}
func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Receiver"))
}
par := p.parseParameters(scope, false)
// must have exactly one receiver
if par.NumFields() != 1 {
p.errorExpected(par.Opening, "exactly one receiver")
par.List = []*ast.Field{{Type: &ast.BadExpr{From: par.Opening, To: par.Closing + 1}}}
return par
}
// recv type must be of the form ["*"] identifier
recv := par.List[0]
base := deref(recv.Type)
if _, isIdent := base.(*ast.Ident); !isIdent {
if _, isBad := base.(*ast.BadExpr); !isBad {
// only report error if it's a new one
p.errorExpected(base.Pos(), "(unqualified) identifier")
}
par.List = []*ast.Field{
{Type: &ast.BadExpr{From: recv.Pos(), To: recv.End()}},
}
}
return par
}
func (p *parser) parseFuncDecl() *ast.FuncDecl {
if p.trace {
defer un(trace(p, "FunctionDecl"))
}
doc := p.leadComment
pos := p.expect(token.FUNC)
scope := ast.NewScope(p.topScope) // function scope
var recv *ast.FieldList
if p.tok == token.LPAREN {
recv = p.parseReceiver(scope)
}
ident := p.parseIdent()
params, results := p.parseSignature(scope)
var body *ast.BlockStmt
if p.tok == token.LBRACE {
body = p.parseBody(scope)
}
p.expectSemi()
decl := &ast.FuncDecl{
Doc: doc,
Recv: recv,
Name: ident,
Type: &ast.FuncType{
Func: pos,
Params: params,
Results: results,
},
Body: body,
}
if recv == nil {
// Go spec: The scope of an identifier denoting a constant, type,
// variable, or function (but not method) declared at top level
// (outside any function) is the package block.
//
// init() functions cannot be referred to and there may
// be more than one - don't put them in the pkgScope
if ident.Name != "init" {
p.declare(decl, nil, p.pkgScope, ast.Fun, ident)
}
}
return decl
}
func (p *parser) parseDecl(sync func(*parser)) ast.Decl {
if p.trace {
defer un(trace(p, "Declaration"))
}
var f parseSpecFunction
switch p.tok {
case token.CONST, token.VAR:
f = (*parser).parseValueSpec
case token.TYPE:
f = (*parser).parseTypeSpec
case token.FUNC:
return p.parseFuncDecl()
default:
pos := p.pos
p.errorExpected(pos, "declaration")
sync(p)
return &ast.BadDecl{From: pos, To: p.pos}
}
return p.parseGenDecl(p.tok, f)
}
// ----------------------------------------------------------------------------
// Source files
func (p *parser) parseFile() *ast.File {
if p.trace {
defer un(trace(p, "File"))
}
// Don't bother parsing the rest if we had errors scanning the first token.
// Likely not a Go source file at all.
if p.errors.Len() != 0 {
return nil
}
// package clause
doc := p.leadComment
pos := p.expect(token.PACKAGE)
// Go spec: The package clause is not a declaration;
// the package name does not appear in any scope.
ident := p.parseIdent()
if ident.Name == "_" {
p.error(p.pos, "invalid package name _")
}
p.expectSemi()
// Don't bother parsing the rest if we had errors parsing the package clause.
// Likely not a Go source file at all.
if p.errors.Len() != 0 {
return nil
}
p.openScope()
p.pkgScope = p.topScope
var decls []ast.Decl
if p.mode&PackageClauseOnly == 0 {
// import decls
for p.tok == token.IMPORT {
decls = append(decls, p.parseGenDecl(token.IMPORT, (*parser).parseImportSpec))
}
if p.mode&ImportsOnly == 0 {
// rest of package body
for p.tok != token.EOF {
decls = append(decls, p.parseDecl(syncDecl))
}
}
}
p.closeScope()
assert(p.topScope == nil, "unbalanced scopes")
assert(p.labelScope == nil, "unbalanced label scopes")
// resolve global identifiers within the same file
i := 0
for _, ident := range p.unresolved {
// i <= index for current ident
assert(ident.Obj == unresolved, "object already resolved")
ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
if ident.Obj == nil {
p.unresolved[i] = ident
i++
}
}
return &ast.File{
Doc: doc,
Package: pos,
Name: ident,
Decls: decls,
Scope: p.pkgScope,
Imports: p.imports,
Unresolved: p.unresolved[0:i],
Comments: p.comments,
}
}
go/parser: use method values
R=golang-dev, r
CC=golang-dev
https://codereview.appspot.com/7858045
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package parser implements a parser for Go source files. Input may be
// provided in a variety of forms (see the various Parse* functions); the
// output is an abstract syntax tree (AST) representing the Go source. The
// parser is invoked through one of the Parse* functions.
//
package parser
import (
"fmt"
"go/ast"
"go/scanner"
"go/token"
"strconv"
"strings"
"unicode"
)
// The parser structure holds the parser's internal state.
type parser struct {
file *token.File
errors scanner.ErrorList
scanner scanner.Scanner
// Tracing/debugging
mode Mode // parsing mode
trace bool // == (mode & Trace != 0)
indent int // indentation used for tracing output
// Comments
comments []*ast.CommentGroup
leadComment *ast.CommentGroup // last lead comment
lineComment *ast.CommentGroup // last line comment
// Next token
pos token.Pos // token position
tok token.Token // one token look-ahead
lit string // token literal
// Error recovery
// (used to limit the number of calls to syncXXX functions
// w/o making scanning progress - avoids potential endless
// loops across multiple parser functions during error recovery)
syncPos token.Pos // last synchronization position
syncCnt int // number of calls to syncXXX without progress
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
inRhs bool // if set, the parser is parsing a rhs expression
// Ordinary identifier scopes
pkgScope *ast.Scope // pkgScope.Outer == nil
topScope *ast.Scope // top-most scope; may be pkgScope
unresolved []*ast.Ident // unresolved identifiers
imports []*ast.ImportSpec // list of imports
// Label scopes
// (maintained by open/close LabelScope)
labelScope *ast.Scope // label scope for current function
targetStack [][]*ast.Ident // stack of unresolved labels
}
func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
p.file = fset.AddFile(filename, fset.Base(), len(src))
var m scanner.Mode
if mode&ParseComments != 0 {
m = scanner.ScanComments
}
eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
p.scanner.Init(p.file, src, eh, m)
p.mode = mode
p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
p.next()
}
// ----------------------------------------------------------------------------
// Scoping support
func (p *parser) openScope() {
p.topScope = ast.NewScope(p.topScope)
}
func (p *parser) closeScope() {
p.topScope = p.topScope.Outer
}
func (p *parser) openLabelScope() {
p.labelScope = ast.NewScope(p.labelScope)
p.targetStack = append(p.targetStack, nil)
}
func (p *parser) closeLabelScope() {
// resolve labels
n := len(p.targetStack) - 1
scope := p.labelScope
for _, ident := range p.targetStack[n] {
ident.Obj = scope.Lookup(ident.Name)
if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
}
}
// pop label scope
p.targetStack = p.targetStack[0:n]
p.labelScope = p.labelScope.Outer
}
func (p *parser) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
for _, ident := range idents {
assert(ident.Obj == nil, "identifier already declared or resolved")
obj := ast.NewObj(kind, ident.Name)
// remember the corresponding declaration for redeclaration
// errors and global variable resolution/typechecking phase
obj.Decl = decl
obj.Data = data
ident.Obj = obj
if ident.Name != "_" {
if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
prevDecl := ""
if pos := alt.Pos(); pos.IsValid() {
prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
}
p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
}
}
}
}
func (p *parser) shortVarDecl(decl *ast.AssignStmt, list []ast.Expr) {
// Go spec: A short variable declaration may redeclare variables
// provided they were originally declared in the same block with
// the same type, and at least one of the non-blank variables is new.
n := 0 // number of new variables
for _, x := range list {
if ident, isIdent := x.(*ast.Ident); isIdent {
assert(ident.Obj == nil, "identifier already declared or resolved")
obj := ast.NewObj(ast.Var, ident.Name)
// remember corresponding assignment for other tools
obj.Decl = decl
ident.Obj = obj
if ident.Name != "_" {
if alt := p.topScope.Insert(obj); alt != nil {
ident.Obj = alt // redeclaration
} else {
n++ // new declaration
}
}
} else {
p.errorExpected(x.Pos(), "identifier on left side of :=")
}
}
if n == 0 && p.mode&DeclarationErrors != 0 {
p.error(list[0].Pos(), "no new variables on left side of :=")
}
}
// The unresolved object is a sentinel to mark identifiers that have been added
// to the list of unresolved identifiers. The sentinel is only used for verifying
// internal consistency.
var unresolved = new(ast.Object)
// If x is an identifier, tryResolve attempts to resolve x by looking up
// the object it denotes. If no object is found and collectUnresolved is
// set, x is marked as unresolved and collected in the list of unresolved
// identifiers.
//
func (p *parser) tryResolve(x ast.Expr, collectUnresolved bool) {
// nothing to do if x is not an identifier or the blank identifier
ident, _ := x.(*ast.Ident)
if ident == nil {
return
}
assert(ident.Obj == nil, "identifier already declared or resolved")
if ident.Name == "_" {
return
}
// try to resolve the identifier
for s := p.topScope; s != nil; s = s.Outer {
if obj := s.Lookup(ident.Name); obj != nil {
ident.Obj = obj
return
}
}
// all local scopes are known, so any unresolved identifier
// must be found either in the file scope, package scope
// (perhaps in another file), or universe scope --- collect
// them so that they can be resolved later
if collectUnresolved {
ident.Obj = unresolved
p.unresolved = append(p.unresolved, ident)
}
}
func (p *parser) resolve(x ast.Expr) {
p.tryResolve(x, true)
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *parser) printTrace(a ...interface{}) {
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
pos := p.file.Position(p.pos)
fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
i := 2 * p.indent
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."))
func un(p *parser) {
p.indent--
p.printTrace(")")
}
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
// when tracing as it provides a more readable output. The
// very first token (!p.pos.IsValid()) is not initialized
// (it is token.ILLEGAL), so don't print it .
if p.trace && p.pos.IsValid() {
s := p.tok.String()
switch {
case p.tok.IsLiteral():
p.printTrace(s, p.lit)
case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"")
default:
p.printTrace(s)
}
}
p.pos, p.tok, p.lit = p.scanner.Scan()
}
// Consume a comment and return it and the line on which it ends.
func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
// /*-style comments may end on a different line than where they start.
// Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.file.Line(p.pos)
if p.lit[1] == '*' {
// don't use range here - no need to decode Unicode code points
for i := 0; i < len(p.lit); i++ {
if p.lit[i] == '\n' {
endline++
}
}
}
comment = &ast.Comment{Slash: p.pos, Text: p.lit}
p.next0()
return
}
// Consume a group of adjacent comments, add it to the parser's
// comments list, and return it together with the line at which
// the last comment in the group ends. A non-comment token or n
// empty lines terminate a comment group.
//
func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
var list []*ast.Comment
endline = p.file.Line(p.pos)
for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
var comment *ast.Comment
comment, endline = p.consumeComment()
list = append(list, comment)
}
// add comment group to the comments list
comments = &ast.CommentGroup{List: list}
p.comments = append(p.comments, comments)
return
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// and line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
//
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
prev := p.pos
p.next0()
if p.tok == token.COMMENT {
var comment *ast.CommentGroup
var endline int
if p.file.Line(p.pos) == p.file.Line(prev) {
// The comment is on same line as the previous token; it
// cannot be a lead comment but may be a line comment.
comment, endline = p.consumeCommentGroup(0)
if p.file.Line(p.pos) != endline {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = comment
}
}
// consume successor comments, if any
endline = -1
for p.tok == token.COMMENT {
comment, endline = p.consumeCommentGroup(1)
}
if endline+1 == p.file.Line(p.pos) {
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = comment
}
}
}
// A bailout panic is raised to indicate early termination.
type bailout struct{}
func (p *parser) error(pos token.Pos, msg string) {
epos := p.file.Position(pos)
// If AllErrors is not set, discard errors reported on the same line
// as the last recorded error and stop parsing if there are more than
// 10 errors.
if p.mode&AllErrors == 0 {
n := len(p.errors)
if n > 0 && p.errors[n-1].Pos.Line == epos.Line {
return // discard - likely a spurious error
}
if n > 10 {
panic(bailout{})
}
}
p.errors.Add(epos, msg)
}
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
// the error happened at the current position;
// make the error message more specific
if p.tok == token.SEMICOLON && p.lit == "\n" {
msg += ", found newline"
} else {
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
msg += " " + p.lit
}
}
}
p.error(pos, msg)
}
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, "'"+tok.String()+"'")
}
p.next() // make progress
return pos
}
// expectClosing is like expect but provides a better error message
// for the common case of a missing comma before a newline.
//
func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
p.error(p.pos, "missing ',' before newline in "+context)
p.next()
}
return p.expect(tok)
}
func (p *parser) expectSemi() {
// semicolon is optional before a closing ')' or '}'
if p.tok != token.RPAREN && p.tok != token.RBRACE {
if p.tok == token.SEMICOLON {
p.next()
} else {
p.errorExpected(p.pos, "';'")
syncStmt(p)
}
}
}
func (p *parser) atComma(context string) bool {
if p.tok == token.COMMA {
return true
}
if p.tok == token.SEMICOLON && p.lit == "\n" {
p.error(p.pos, "missing ',' before newline in "+context)
return true // "insert" the comma and continue
}
return false
}
func assert(cond bool, msg string) {
if !cond {
panic("go/parser internal error: " + msg)
}
}
// syncStmt advances to the next statement.
// Used for synchronization after an error.
//
func syncStmt(p *parser) {
for {
switch p.tok {
case token.BREAK, token.CONST, token.CONTINUE, token.DEFER,
token.FALLTHROUGH, token.FOR, token.GO, token.GOTO,
token.IF, token.RETURN, token.SELECT, token.SWITCH,
token.TYPE, token.VAR:
// Return only if parser made some progress since last
// sync or if it has not reached 10 sync calls without
// progress. Otherwise consume at least one token to
// avoid an endless parser loop (it is possible that
// both parseOperand and parseStmt call syncStmt and
// correctly do not advance, thus the need for the
// invocation limit p.syncCnt).
if p.pos == p.syncPos && p.syncCnt < 10 {
p.syncCnt++
return
}
if p.pos > p.syncPos {
p.syncPos = p.pos
p.syncCnt = 0
return
}
// Reaching here indicates a parser bug, likely an
// incorrect token list in this function, but it only
// leads to skipping of possibly correct code if a
// previous error is present, and thus is preferred
// over a non-terminating parse.
case token.EOF:
return
}
p.next()
}
}
// syncDecl advances to the next declaration.
// Used for synchronization after an error.
//
func syncDecl(p *parser) {
for {
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
// see comments in syncStmt
if p.pos == p.syncPos && p.syncCnt < 10 {
p.syncCnt++
return
}
if p.pos > p.syncPos {
p.syncPos = p.pos
p.syncCnt = 0
return
}
case token.EOF:
return
}
p.next()
}
}
// ----------------------------------------------------------------------------
// Identifiers
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
if p.tok == token.IDENT {
name = p.lit
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{NamePos: pos, Name: name}
}
func (p *parser) parseIdentList() (list []*ast.Ident) {
if p.trace {
defer un(trace(p, "IdentList"))
}
list = append(list, p.parseIdent())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseIdent())
}
return
}
// ----------------------------------------------------------------------------
// Common productions
// If lhs is set, result list elements which are identifiers are not resolved.
func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ExpressionList"))
}
list = append(list, p.checkExpr(p.parseExpr(lhs)))
for p.tok == token.COMMA {
p.next()
list = append(list, p.checkExpr(p.parseExpr(lhs)))
}
return
}
func (p *parser) parseLhsList() []ast.Expr {
old := p.inRhs
p.inRhs = false
list := p.parseExprList(true)
switch p.tok {
case token.DEFINE:
// lhs of a short variable declaration
// but doesn't enter scope until later:
// caller must call p.shortVarDecl(p.makeIdentList(list))
// at appropriate time.
case token.COLON:
// lhs of a label declaration or a communication clause of a select
// statement (parseLhsList is not called when parsing the case clause
// of a switch statement):
// - labels are declared by the caller of parseLhsList
// - for communication clauses, if there is a stand-alone identifier
// followed by a colon, we have a syntax error; there is no need
// to resolve the identifier in that case
default:
// identifiers must be declared elsewhere
for _, x := range list {
p.resolve(x)
}
}
p.inRhs = old
return list
}
func (p *parser) parseRhsList() []ast.Expr {
old := p.inRhs
p.inRhs = true
list := p.parseExprList(false)
p.inRhs = old
return list
}
// ----------------------------------------------------------------------------
// Types
func (p *parser) parseType() ast.Expr {
if p.trace {
defer un(trace(p, "Type"))
}
typ := p.tryType()
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
return &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
// If the result is an identifier, it is not resolved.
func (p *parser) parseTypeName() ast.Expr {
if p.trace {
defer un(trace(p, "TypeName"))
}
ident := p.parseIdent()
// don't resolve ident yet - it may be a parameter or field name
if p.tok == token.PERIOD {
// ident is a package name
p.next()
p.resolve(ident)
sel := p.parseIdent()
return &ast.SelectorExpr{X: ident, Sel: sel}
}
return ident
}
func (p *parser) parseArrayType() ast.Expr {
if p.trace {
defer un(trace(p, "ArrayType"))
}
lbrack := p.expect(token.LBRACK)
var len ast.Expr
// always permit ellipsis for more fault-tolerant parsing
if p.tok == token.ELLIPSIS {
len = &ast.Ellipsis{Ellipsis: p.pos}
p.next()
} else if p.tok != token.RBRACK {
len = p.parseRhs()
}
p.expect(token.RBRACK)
elt := p.parseType()
return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
}
func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
idents := make([]*ast.Ident, len(list))
for i, x := range list {
ident, isIdent := x.(*ast.Ident)
if !isIdent {
if _, isBad := x.(*ast.BadExpr); !isBad {
// only report error if it's a new one
p.errorExpected(x.Pos(), "identifier")
}
ident = &ast.Ident{NamePos: x.Pos(), Name: "_"}
}
idents[i] = ident
}
return idents
}
func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"))
}
doc := p.leadComment
// FieldDecl
list, typ := p.parseVarList(false)
// Tag
var tag *ast.BasicLit
if p.tok == token.STRING {
tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
}
// analyze case
var idents []*ast.Ident
if typ != nil {
// IdentifierList Type
idents = p.makeIdentList(list)
} else {
// ["*"] TypeName (AnonymousField)
typ = list[0] // we always have at least one element
if n := len(list); n > 1 || !isTypeName(deref(typ)) {
pos := typ.Pos()
p.errorExpected(pos, "anonymous field")
typ = &ast.BadExpr{From: pos, To: list[n-1].End()}
}
}
p.expectSemi() // call before accessing p.linecomment
field := &ast.Field{Doc: doc, Names: idents, Type: typ, Tag: tag, Comment: p.lineComment}
p.declare(field, nil, scope, ast.Var, idents...)
p.resolve(typ)
return field
}
func (p *parser) parseStructType() *ast.StructType {
if p.trace {
defer un(trace(p, "StructType"))
}
pos := p.expect(token.STRUCT)
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // struct scope
var list []*ast.Field
for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
// a field declaration cannot start with a '(' but we accept
// it here for more robust parsing and better error messages
// (parseFieldDecl will check and complain if necessary)
list = append(list, p.parseFieldDecl(scope))
}
rbrace := p.expect(token.RBRACE)
return &ast.StructType{
Struct: pos,
Fields: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parsePointerType() *ast.StarExpr {
if p.trace {
defer un(trace(p, "PointerType"))
}
star := p.expect(token.MUL)
base := p.parseType()
return &ast.StarExpr{Star: star, X: base}
}
// If the result is an identifier, it is not resolved.
func (p *parser) tryVarType(isParam bool) ast.Expr {
if isParam && p.tok == token.ELLIPSIS {
pos := p.pos
p.next()
typ := p.tryIdentOrType() // don't use parseType so we can provide better error message
if typ != nil {
p.resolve(typ)
} else {
p.error(pos, "'...' parameter is missing type")
typ = &ast.BadExpr{From: pos, To: p.pos}
}
return &ast.Ellipsis{Ellipsis: pos, Elt: typ}
}
return p.tryIdentOrType()
}
// If the result is an identifier, it is not resolved.
func (p *parser) parseVarType(isParam bool) ast.Expr {
typ := p.tryVarType(isParam)
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
typ = &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
// If any of the results are identifiers, they are not resolved.
func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
if p.trace {
defer un(trace(p, "VarList"))
}
// a list of identifiers looks like a list of type names
//
// parse/tryVarType accepts any type (including parenthesized
// ones) even though the syntax does not permit them here: we
// accept them all for more robust parsing and complain later
for typ := p.parseVarType(isParam); typ != nil; {
list = append(list, typ)
if p.tok != token.COMMA {
break
}
p.next()
typ = p.tryVarType(isParam) // maybe nil as in: func f(int,) {}
}
// if we had a list of identifiers, it must be followed by a type
typ = p.tryVarType(isParam)
return
}
func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
if p.trace {
defer un(trace(p, "ParameterList"))
}
// ParameterDecl
list, typ := p.parseVarList(ellipsisOk)
// analyze case
if typ != nil {
// IdentifierList Type
idents := p.makeIdentList(list)
field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, nil, scope, ast.Var, idents...)
p.resolve(typ)
if p.tok == token.COMMA {
p.next()
}
for p.tok != token.RPAREN && p.tok != token.EOF {
idents := p.parseIdentList()
typ := p.parseVarType(ellipsisOk)
field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, nil, scope, ast.Var, idents...)
p.resolve(typ)
if !p.atComma("parameter list") {
break
}
p.next()
}
} else {
// Type { "," Type } (anonymous parameters)
params = make([]*ast.Field, len(list))
for i, typ := range list {
p.resolve(typ)
params[i] = &ast.Field{Type: typ}
}
}
return
}
func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
if p.trace {
defer un(trace(p, "Parameters"))
}
var params []*ast.Field
lparen := p.expect(token.LPAREN)
if p.tok != token.RPAREN {
params = p.parseParameterList(scope, ellipsisOk)
}
rparen := p.expect(token.RPAREN)
return &ast.FieldList{Opening: lparen, List: params, Closing: rparen}
}
func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Result"))
}
if p.tok == token.LPAREN {
return p.parseParameters(scope, false)
}
typ := p.tryType()
if typ != nil {
list := make([]*ast.Field, 1)
list[0] = &ast.Field{Type: typ}
return &ast.FieldList{List: list}
}
return nil
}
func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
if p.trace {
defer un(trace(p, "Signature"))
}
params = p.parseParameters(scope, true)
results = p.parseResult(scope)
return
}
func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
if p.trace {
defer un(trace(p, "FuncType"))
}
pos := p.expect(token.FUNC)
scope := ast.NewScope(p.topScope) // function scope
params, results := p.parseSignature(scope)
return &ast.FuncType{Func: pos, Params: params, Results: results}, scope
}
func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"))
}
doc := p.leadComment
var idents []*ast.Ident
var typ ast.Expr
x := p.parseTypeName()
if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
// method
idents = []*ast.Ident{ident}
scope := ast.NewScope(nil) // method scope
params, results := p.parseSignature(scope)
typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
} else {
// embedded interface
typ = x
p.resolve(typ)
}
p.expectSemi() // call before accessing p.linecomment
spec := &ast.Field{Doc: doc, Names: idents, Type: typ, Comment: p.lineComment}
p.declare(spec, nil, scope, ast.Fun, idents...)
return spec
}
func (p *parser) parseInterfaceType() *ast.InterfaceType {
if p.trace {
defer un(trace(p, "InterfaceType"))
}
pos := p.expect(token.INTERFACE)
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // interface scope
var list []*ast.Field
for p.tok == token.IDENT {
list = append(list, p.parseMethodSpec(scope))
}
rbrace := p.expect(token.RBRACE)
return &ast.InterfaceType{
Interface: pos,
Methods: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parseMapType() *ast.MapType {
if p.trace {
defer un(trace(p, "MapType"))
}
pos := p.expect(token.MAP)
p.expect(token.LBRACK)
key := p.parseType()
p.expect(token.RBRACK)
value := p.parseType()
return &ast.MapType{Map: pos, Key: key, Value: value}
}
func (p *parser) parseChanType() *ast.ChanType {
if p.trace {
defer un(trace(p, "ChanType"))
}
pos := p.pos
dir := ast.SEND | ast.RECV
var arrow token.Pos
if p.tok == token.CHAN {
p.next()
if p.tok == token.ARROW {
arrow = p.pos
p.next()
dir = ast.SEND
}
} else {
arrow = p.expect(token.ARROW)
p.expect(token.CHAN)
dir = ast.RECV
}
value := p.parseType()
return &ast.ChanType{Begin: pos, Arrow: arrow, Dir: dir, Value: value}
}
// If the result is an identifier, it is not resolved.
func (p *parser) tryIdentOrType() ast.Expr {
switch p.tok {
case token.IDENT:
return p.parseTypeName()
case token.LBRACK:
return p.parseArrayType()
case token.STRUCT:
return p.parseStructType()
case token.MUL:
return p.parsePointerType()
case token.FUNC:
typ, _ := p.parseFuncType()
return typ
case token.INTERFACE:
return p.parseInterfaceType()
case token.MAP:
return p.parseMapType()
case token.CHAN, token.ARROW:
return p.parseChanType()
case token.LPAREN:
lparen := p.pos
p.next()
typ := p.parseType()
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
}
// no type found
return nil
}
func (p *parser) tryType() ast.Expr {
typ := p.tryIdentOrType()
if typ != nil {
p.resolve(typ)
}
return typ
}
// ----------------------------------------------------------------------------
// Blocks
func (p *parser) parseStmtList() (list []ast.Stmt) {
if p.trace {
defer un(trace(p, "StatementList"))
}
for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseStmt())
}
return
}
func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
if p.trace {
defer un(trace(p, "Body"))
}
lbrace := p.expect(token.LBRACE)
p.topScope = scope // open function scope
p.openLabelScope()
list := p.parseStmtList()
p.closeLabelScope()
p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
func (p *parser) parseBlockStmt() *ast.BlockStmt {
if p.trace {
defer un(trace(p, "BlockStmt"))
}
lbrace := p.expect(token.LBRACE)
p.openScope()
list := p.parseStmtList()
p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
// ----------------------------------------------------------------------------
// Expressions
func (p *parser) parseFuncTypeOrLit() ast.Expr {
if p.trace {
defer un(trace(p, "FuncTypeOrLit"))
}
typ, scope := p.parseFuncType()
if p.tok != token.LBRACE {
// function type only
return typ
}
p.exprLev++
body := p.parseBody(scope)
p.exprLev--
return &ast.FuncLit{Type: typ, Body: body}
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
// If lhs is set and the result is an identifier, it is not resolved.
//
func (p *parser) parseOperand(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Operand"))
}
switch p.tok {
case token.IDENT:
x := p.parseIdent()
if !lhs {
p.resolve(x)
}
return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
return x
case token.LPAREN:
lparen := p.pos
p.next()
p.exprLev++
x := p.parseRhsOrType() // types may be parenthesized: (some type)
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
case token.FUNC:
return p.parseFuncTypeOrLit()
}
if typ := p.tryIdentOrType(); typ != nil {
// could be type for composite literal or conversion
_, isIdent := typ.(*ast.Ident)
assert(!isIdent, "type cannot be identifier")
return typ
}
// we have an error
pos := p.pos
p.errorExpected(pos, "operand")
syncStmt(p)
return &ast.BadExpr{From: pos, To: p.pos}
}
func (p *parser) parseSelector(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "Selector"))
}
sel := p.parseIdent()
return &ast.SelectorExpr{X: x, Sel: sel}
}
func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "TypeAssertion"))
}
p.expect(token.LPAREN)
var typ ast.Expr
if p.tok == token.TYPE {
// type switch: typ == nil
p.next()
} else {
typ = p.parseType()
}
p.expect(token.RPAREN)
return &ast.TypeAssertExpr{X: x, Type: typ}
}
func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "IndexOrSlice"))
}
lbrack := p.expect(token.LBRACK)
p.exprLev++
var low, high ast.Expr
isSlice := false
if p.tok != token.COLON {
low = p.parseRhs()
}
if p.tok == token.COLON {
isSlice = true
p.next()
if p.tok != token.RBRACK {
high = p.parseRhs()
}
}
p.exprLev--
rbrack := p.expect(token.RBRACK)
if isSlice {
return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: low, High: high, Rbrack: rbrack}
}
return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: low, Rbrack: rbrack}
}
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
if p.trace {
defer un(trace(p, "CallOrConversion"))
}
lparen := p.expect(token.LPAREN)
p.exprLev++
var list []ast.Expr
var ellipsis token.Pos
for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
list = append(list, p.parseRhsOrType()) // builtins may expect a type: make(some type, ...)
if p.tok == token.ELLIPSIS {
ellipsis = p.pos
p.next()
}
if !p.atComma("argument list") {
break
}
p.next()
}
p.exprLev--
rparen := p.expectClosing(token.RPAREN, "argument list")
return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
}
func (p *parser) parseElement(keyOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "Element"))
}
if p.tok == token.LBRACE {
return p.parseLiteralValue(nil)
}
// Because the parser doesn't know the composite literal type, it cannot
// know if a key that's an identifier is a struct field name or a name
// denoting a value. The former is not resolved by the parser or the
// resolver.
//
// Instead, _try_ to resolve such a key if possible. If it resolves,
// it a) has correctly resolved, or b) incorrectly resolved because
// the key is a struct field with a name matching another identifier.
// In the former case we are done, and in the latter case we don't
// care because the type checker will do a separate field lookup.
//
// If the key does not resolve, it a) must be defined at the top
// level in another file of the same package, the universe scope, or be
// undeclared; or b) it is a struct field. In the former case, the type
// checker can do a top-level lookup, and in the latter case it will do
// a separate field lookup.
x := p.checkExpr(p.parseExpr(keyOk))
if keyOk {
if p.tok == token.COLON {
colon := p.pos
p.next()
// Try to resolve the key but don't collect it
// as unresolved identifier if it fails so that
// we don't get (possibly false) errors about
// undeclared names.
p.tryResolve(x, false)
return &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseElement(false)}
}
p.resolve(x) // not a key
}
return x
}
func (p *parser) parseElementList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ElementList"))
}
for p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseElement(true))
if !p.atComma("composite literal") {
break
}
p.next()
}
return
}
func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "LiteralValue"))
}
lbrace := p.expect(token.LBRACE)
var elts []ast.Expr
p.exprLev++
if p.tok != token.RBRACE {
elts = p.parseElementList()
}
p.exprLev--
rbrace := p.expectClosing(token.RBRACE, "composite literal")
return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
}
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
switch unparen(x).(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.BasicLit:
case *ast.FuncLit:
case *ast.CompositeLit:
case *ast.ParenExpr:
panic("unreachable")
case *ast.SelectorExpr:
case *ast.IndexExpr:
case *ast.SliceExpr:
case *ast.TypeAssertExpr:
// If t.Type == nil we have a type assertion of the form
// y.(type), which is only allowed in type switch expressions.
// It's hard to exclude those but for the case where we are in
// a type switch. Instead be lenient and test this in the type
// checker.
case *ast.CallExpr:
case *ast.StarExpr:
case *ast.UnaryExpr:
case *ast.BinaryExpr:
default:
// all other nodes are not proper expressions
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{From: x.Pos(), To: x.End()}
}
return x
}
// isTypeName returns true iff x is a (qualified) TypeName.
func isTypeName(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
default:
return false // all other nodes are not type names
}
return true
}
// isLiteralType returns true iff x is a legal composite literal type.
func isLiteralType(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
case *ast.ArrayType:
case *ast.StructType:
case *ast.MapType:
default:
return false // all other nodes are not legal composite literal types
}
return true
}
// If x is of the form *T, deref returns T, otherwise it returns x.
func deref(x ast.Expr) ast.Expr {
if p, isPtr := x.(*ast.StarExpr); isPtr {
x = p.X
}
return x
}
// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
func unparen(x ast.Expr) ast.Expr {
if p, isParen := x.(*ast.ParenExpr); isParen {
x = unparen(p.X)
}
return x
}
// checkExprOrType checks that x is an expression or a type
// (and not a raw type such as [...]T).
//
func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
switch t := unparen(x).(type) {
case *ast.ParenExpr:
panic("unreachable")
case *ast.UnaryExpr:
case *ast.ArrayType:
if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
p.error(len.Pos(), "expected array length, found '...'")
x = &ast.BadExpr{From: x.Pos(), To: x.End()}
}
}
// all other nodes are expressions or types
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "PrimaryExpr"))
}
x := p.parseOperand(lhs)
L:
for {
switch p.tok {
case token.PERIOD:
p.next()
if lhs {
p.resolve(x)
}
switch p.tok {
case token.IDENT:
x = p.parseSelector(p.checkExpr(x))
case token.LPAREN:
x = p.parseTypeAssertion(p.checkExpr(x))
default:
pos := p.pos
p.errorExpected(pos, "selector or type assertion")
p.next() // make progress
x = &ast.BadExpr{From: pos, To: p.pos}
}
case token.LBRACK:
if lhs {
p.resolve(x)
}
x = p.parseIndexOrSlice(p.checkExpr(x))
case token.LPAREN:
if lhs {
p.resolve(x)
}
x = p.parseCallOrConversion(p.checkExprOrType(x))
case token.LBRACE:
if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
if lhs {
p.resolve(x)
}
x = p.parseLiteralValue(x)
} else {
break L
}
default:
break L
}
lhs = false // no need to try to resolve again
}
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "UnaryExpr"))
}
switch p.tok {
case token.ADD, token.SUB, token.NOT, token.XOR, token.AND:
pos, op := p.pos, p.tok
p.next()
x := p.parseUnaryExpr(false)
return &ast.UnaryExpr{OpPos: pos, Op: op, X: p.checkExpr(x)}
case token.ARROW:
// channel type or receive expression
arrow := p.pos
p.next()
// If the next token is token.CHAN we still don't know if it
// is a channel type or a receive operation - we only know
// once we have found the end of the unary expression. There
// are two cases:
//
// <- type => (<-type) must be channel type
// <- expr => <-(expr) is a receive from an expression
//
// In the first case, the arrow must be re-associated with
// the channel type parsed already:
//
// <- (chan type) => (<-chan type)
// <- (chan<- type) => (<-chan (<-type))
x := p.parseUnaryExpr(false)
// determine which case we have
if typ, ok := x.(*ast.ChanType); ok {
// (<-type)
// re-associate position info and <-
dir := ast.SEND
for ok && dir == ast.SEND {
if typ.Dir == ast.RECV {
// error: (<-type) is (<-(<-chan T))
p.errorExpected(typ.Arrow, "'chan'")
}
arrow, typ.Begin, typ.Arrow = typ.Arrow, arrow, arrow
dir, typ.Dir = typ.Dir, ast.RECV
typ, ok = typ.Value.(*ast.ChanType)
}
if dir == ast.SEND {
p.errorExpected(arrow, "channel type")
}
return x
}
// <-(expr)
return &ast.UnaryExpr{OpPos: arrow, Op: token.ARROW, X: p.checkExpr(x)}
case token.MUL:
// pointer type or unary "*" expression
pos := p.pos
p.next()
x := p.parseUnaryExpr(false)
return &ast.StarExpr{Star: pos, X: p.checkExprOrType(x)}
}
return p.parsePrimaryExpr(lhs)
}
func (p *parser) tokPrec() (token.Token, int) {
tok := p.tok
if p.inRhs && tok == token.ASSIGN {
tok = token.EQL
}
return tok, tok.Precedence()
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
if p.trace {
defer un(trace(p, "BinaryExpr"))
}
x := p.parseUnaryExpr(lhs)
for _, prec := p.tokPrec(); prec >= prec1; prec-- {
for {
op, oprec := p.tokPrec()
if oprec != prec {
break
}
pos := p.expect(op)
if lhs {
p.resolve(x)
lhs = false
}
y := p.parseBinaryExpr(false, prec+1)
x = &ast.BinaryExpr{X: p.checkExpr(x), OpPos: pos, Op: op, Y: p.checkExpr(y)}
}
}
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
// The result may be a type or even a raw type ([...]int). Callers must
// check the result (using checkExpr or checkExprOrType), depending on
// context.
func (p *parser) parseExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Expression"))
}
return p.parseBinaryExpr(lhs, token.LowestPrec+1)
}
func (p *parser) parseRhs() ast.Expr {
old := p.inRhs
p.inRhs = true
x := p.checkExpr(p.parseExpr(false))
p.inRhs = old
return x
}
func (p *parser) parseRhsOrType() ast.Expr {
old := p.inRhs
p.inRhs = true
x := p.checkExprOrType(p.parseExpr(false))
p.inRhs = old
return x
}
// ----------------------------------------------------------------------------
// Statements
// Parsing modes for parseSimpleStmt.
const (
basic = iota
labelOk
rangeOk
)
// parseSimpleStmt returns true as 2nd result if it parsed the assignment
// of a range clause (with mode == rangeOk). The returned statement is an
// assignment with a right-hand side that is a single unary expression of
// the form "range x". No guarantees are given for the left-hand side.
func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
if p.trace {
defer un(trace(p, "SimpleStmt"))
}
x := p.parseLhsList()
switch p.tok {
case
token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
// assignment statement, possibly part of a range clause
pos, tok := p.pos, p.tok
p.next()
var y []ast.Expr
isRange := false
if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
pos := p.pos
p.next()
y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
isRange = true
} else {
y = p.parseRhsList()
}
as := &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}
if tok == token.DEFINE {
p.shortVarDecl(as, x)
}
return as, isRange
}
if len(x) > 1 {
p.errorExpected(x[0].Pos(), "1 expression")
// continue with first expression
}
switch p.tok {
case token.COLON:
// labeled statement
colon := p.pos
p.next()
if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
// Go spec: The scope of a label is the body of the function
// in which it is declared and excludes the body of any nested
// function.
stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
p.declare(stmt, nil, p.labelScope, ast.Lbl, label)
return stmt, false
}
// The label declaration typically starts at x[0].Pos(), but the label
// declaration may be erroneous due to a token after that position (and
// before the ':'). If SpuriousErrors is not set, the (only) error re-
// ported for the line is the illegal label error instead of the token
// before the ':' that caused the problem. Thus, use the (latest) colon
// position for error reporting.
p.error(colon, "illegal label declaration")
return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
case token.ARROW:
// send statement
arrow := p.pos
p.next()
y := p.parseRhs()
return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
case token.INC, token.DEC:
// increment or decrement
s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
p.next()
return s, false
}
// expression
return &ast.ExprStmt{X: x[0]}, false
}
func (p *parser) parseCallExpr() *ast.CallExpr {
x := p.parseRhsOrType() // could be a conversion: (some type)(x)
if call, isCall := x.(*ast.CallExpr); isCall {
return call
}
if _, isBad := x.(*ast.BadExpr); !isBad {
// only report error if it's a new one
p.errorExpected(x.Pos(), "function/method call")
}
return nil
}
func (p *parser) parseGoStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "GoStmt"))
}
pos := p.expect(token.GO)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
}
return &ast.GoStmt{Go: pos, Call: call}
}
func (p *parser) parseDeferStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "DeferStmt"))
}
pos := p.expect(token.DEFER)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
}
return &ast.DeferStmt{Defer: pos, Call: call}
}
func (p *parser) parseReturnStmt() *ast.ReturnStmt {
if p.trace {
defer un(trace(p, "ReturnStmt"))
}
pos := p.pos
p.expect(token.RETURN)
var x []ast.Expr
if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
x = p.parseRhsList()
}
p.expectSemi()
return &ast.ReturnStmt{Return: pos, Results: x}
}
func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
if p.trace {
defer un(trace(p, "BranchStmt"))
}
pos := p.expect(tok)
var label *ast.Ident
if tok != token.FALLTHROUGH && p.tok == token.IDENT {
label = p.parseIdent()
// add to list of unresolved targets
n := len(p.targetStack) - 1
p.targetStack[n] = append(p.targetStack[n], label)
}
p.expectSemi()
return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
}
func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
if s == nil {
return nil
}
if es, isExpr := s.(*ast.ExprStmt); isExpr {
return p.checkExpr(es.X)
}
p.error(s.Pos(), "expected condition, found simple statement")
return &ast.BadExpr{From: s.Pos(), To: s.End()}
}
func (p *parser) parseIfStmt() *ast.IfStmt {
if p.trace {
defer un(trace(p, "IfStmt"))
}
pos := p.expect(token.IF)
p.openScope()
defer p.closeScope()
var s ast.Stmt
var x ast.Expr
{
prevLev := p.exprLev
p.exprLev = -1
if p.tok == token.SEMICOLON {
p.next()
x = p.parseRhs()
} else {
s, _ = p.parseSimpleStmt(basic)
if p.tok == token.SEMICOLON {
p.next()
x = p.parseRhs()
} else {
x = p.makeExpr(s)
s = nil
}
}
p.exprLev = prevLev
}
body := p.parseBlockStmt()
var else_ ast.Stmt
if p.tok == token.ELSE {
p.next()
else_ = p.parseStmt()
} else {
p.expectSemi()
}
return &ast.IfStmt{If: pos, Init: s, Cond: x, Body: body, Else: else_}
}
func (p *parser) parseTypeList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "TypeList"))
}
list = append(list, p.parseType())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseType())
}
return
}
func (p *parser) parseCaseClause(typeSwitch bool) *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"))
}
pos := p.pos
var list []ast.Expr
if p.tok == token.CASE {
p.next()
if typeSwitch {
list = p.parseTypeList()
} else {
list = p.parseRhsList()
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
p.openScope()
body := p.parseStmtList()
p.closeScope()
return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
}
func isTypeSwitchAssert(x ast.Expr) bool {
a, ok := x.(*ast.TypeAssertExpr)
return ok && a.Type == nil
}
func isTypeSwitchGuard(s ast.Stmt) bool {
switch t := s.(type) {
case *ast.ExprStmt:
// x.(nil)
return isTypeSwitchAssert(t.X)
case *ast.AssignStmt:
// v := x.(nil)
return len(t.Lhs) == 1 && t.Tok == token.DEFINE && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0])
}
return false
}
func (p *parser) parseSwitchStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "SwitchStmt"))
}
pos := p.expect(token.SWITCH)
p.openScope()
defer p.closeScope()
var s1, s2 ast.Stmt
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
if p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.LBRACE {
// A TypeSwitchGuard may declare a variable in addition
// to the variable declared in the initial SimpleStmt.
// Introduce extra scope to avoid redeclaration errors:
//
// switch t := 0; t := x.(T) { ... }
//
// (this code is not valid Go because the first t
// cannot be accessed and thus is never used, the extra
// scope is needed for the correct error message).
//
// If we don't have a type switch, s2 must be an expression.
// Having the extra nested but empty scope won't affect it.
p.openScope()
defer p.closeScope()
s2, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
typeSwitch := isTypeSwitchGuard(s2)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCaseClause(typeSwitch))
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
if typeSwitch {
return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
}
return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2), Body: body}
}
func (p *parser) parseCommClause() *ast.CommClause {
if p.trace {
defer un(trace(p, "CommClause"))
}
p.openScope()
pos := p.pos
var comm ast.Stmt
if p.tok == token.CASE {
p.next()
lhs := p.parseLhsList()
if p.tok == token.ARROW {
// SendStmt
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
arrow := p.pos
p.next()
rhs := p.parseRhs()
comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
} else {
// RecvStmt
if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
// RecvStmt with assignment
if len(lhs) > 2 {
p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
// continue with first two expressions
lhs = lhs[0:2]
}
pos := p.pos
p.next()
rhs := p.parseRhs()
as := &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
if tok == token.DEFINE {
p.shortVarDecl(as, lhs)
}
comm = as
} else {
// lhs must be single receive operation
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
comm = &ast.ExprStmt{X: lhs[0]}
}
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
p.closeScope()
return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
}
func (p *parser) parseSelectStmt() *ast.SelectStmt {
if p.trace {
defer un(trace(p, "SelectStmt"))
}
pos := p.expect(token.SELECT)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCommClause())
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
return &ast.SelectStmt{Select: pos, Body: body}
}
func (p *parser) parseForStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "ForStmt"))
}
pos := p.expect(token.FOR)
p.openScope()
defer p.closeScope()
var s1, s2, s3 ast.Stmt
var isRange bool
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s2, isRange = p.parseSimpleStmt(rangeOk)
}
if !isRange && p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
p.expectSemi()
if p.tok != token.LBRACE {
s3, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
body := p.parseBlockStmt()
p.expectSemi()
if isRange {
as := s2.(*ast.AssignStmt)
// check lhs
var key, value ast.Expr
switch len(as.Lhs) {
case 2:
key, value = as.Lhs[0], as.Lhs[1]
case 1:
key = as.Lhs[0]
default:
p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions")
return &ast.BadStmt{From: pos, To: body.End()}
}
// parseSimpleStmt returned a right-hand side that
// is a single unary expression of the form "range x"
x := as.Rhs[0].(*ast.UnaryExpr).X
return &ast.RangeStmt{
For: pos,
Key: key,
Value: value,
TokPos: as.TokPos,
Tok: as.Tok,
X: x,
Body: body,
}
}
// regular for statement
return &ast.ForStmt{
For: pos,
Init: s1,
Cond: p.makeExpr(s2),
Post: s3,
Body: body,
}
}
func (p *parser) parseStmt() (s ast.Stmt) {
if p.trace {
defer un(trace(p, "Statement"))
}
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
s = &ast.DeclStmt{Decl: p.parseDecl(syncStmt)}
case
// tokens that may start an expression
token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
token.LBRACK, token.STRUCT, // composite types
token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
s, _ = p.parseSimpleStmt(labelOk)
// because of the required look-ahead, labeled statements are
// parsed by parseSimpleStmt - don't expect a semicolon after
// them
if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
p.expectSemi()
}
case token.GO:
s = p.parseGoStmt()
case token.DEFER:
s = p.parseDeferStmt()
case token.RETURN:
s = p.parseReturnStmt()
case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
s = p.parseBranchStmt(p.tok)
case token.LBRACE:
s = p.parseBlockStmt()
p.expectSemi()
case token.IF:
s = p.parseIfStmt()
case token.SWITCH:
s = p.parseSwitchStmt()
case token.SELECT:
s = p.parseSelectStmt()
case token.FOR:
s = p.parseForStmt()
case token.SEMICOLON:
s = &ast.EmptyStmt{Semicolon: p.pos}
p.next()
case token.RBRACE:
// a semicolon may be omitted before a closing "}"
s = &ast.EmptyStmt{Semicolon: p.pos}
default:
// no statement found
pos := p.pos
p.errorExpected(pos, "statement")
syncStmt(p)
s = &ast.BadStmt{From: pos, To: p.pos}
}
return
}
// ----------------------------------------------------------------------------
// Declarations
type parseSpecFunction func(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec
func isValidImport(lit string) bool {
const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
s, _ := strconv.Unquote(lit) // go/scanner returns a legal string literal
for _, r := range s {
if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
return false
}
}
return s != ""
}
func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
}
var ident *ast.Ident
switch p.tok {
case token.PERIOD:
ident = &ast.Ident{NamePos: p.pos, Name: "."}
p.next()
case token.IDENT:
ident = p.parseIdent()
}
var path *ast.BasicLit
if p.tok == token.STRING {
if !isValidImport(p.lit) {
p.error(p.pos, "invalid import path: "+p.lit)
}
path = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
} else {
p.expect(token.STRING) // use expect() error handling
}
p.expectSemi() // call before accessing p.linecomment
// collect imports
spec := &ast.ImportSpec{
Doc: doc,
Name: ident,
Path: path,
Comment: p.lineComment,
}
p.imports = append(p.imports, spec)
return spec
}
func (p *parser) parseValueSpec(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec {
if p.trace {
defer un(trace(p, keyword.String()+"Spec"))
}
idents := p.parseIdentList()
typ := p.tryType()
var values []ast.Expr
if p.tok == token.ASSIGN || keyword == token.CONST && (typ != nil || iota == 0) || keyword == token.VAR && typ == nil {
p.expect(token.ASSIGN)
values = p.parseRhsList()
}
p.expectSemi() // call before accessing p.linecomment
// Go spec: The scope of a constant or variable identifier declared inside
// a function begins at the end of the ConstSpec or VarSpec and ends at
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.ValueSpec{
Doc: doc,
Names: idents,
Type: typ,
Values: values,
Comment: p.lineComment,
}
kind := ast.Con
if keyword == token.VAR {
kind = ast.Var
}
p.declare(spec, iota, p.topScope, kind, idents...)
return spec
}
func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "TypeSpec"))
}
ident := p.parseIdent()
// Go spec: The scope of a type identifier declared inside a function begins
// at the identifier in the TypeSpec and ends at the end of the innermost
// containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.TypeSpec{Doc: doc, Name: ident}
p.declare(spec, nil, p.topScope, ast.Typ, ident)
spec.Type = p.parseType()
p.expectSemi() // call before accessing p.linecomment
spec.Comment = p.lineComment
return spec
}
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
if p.trace {
defer un(trace(p, "GenDecl("+keyword.String()+")"))
}
doc := p.leadComment
pos := p.expect(keyword)
var lparen, rparen token.Pos
var list []ast.Spec
if p.tok == token.LPAREN {
lparen = p.pos
p.next()
for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
list = append(list, f(p.leadComment, keyword, iota))
}
rparen = p.expect(token.RPAREN)
p.expectSemi()
} else {
list = append(list, f(nil, keyword, 0))
}
return &ast.GenDecl{
Doc: doc,
TokPos: pos,
Tok: keyword,
Lparen: lparen,
Specs: list,
Rparen: rparen,
}
}
func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Receiver"))
}
par := p.parseParameters(scope, false)
// must have exactly one receiver
if par.NumFields() != 1 {
p.errorExpected(par.Opening, "exactly one receiver")
par.List = []*ast.Field{{Type: &ast.BadExpr{From: par.Opening, To: par.Closing + 1}}}
return par
}
// recv type must be of the form ["*"] identifier
recv := par.List[0]
base := deref(recv.Type)
if _, isIdent := base.(*ast.Ident); !isIdent {
if _, isBad := base.(*ast.BadExpr); !isBad {
// only report error if it's a new one
p.errorExpected(base.Pos(), "(unqualified) identifier")
}
par.List = []*ast.Field{
{Type: &ast.BadExpr{From: recv.Pos(), To: recv.End()}},
}
}
return par
}
func (p *parser) parseFuncDecl() *ast.FuncDecl {
if p.trace {
defer un(trace(p, "FunctionDecl"))
}
doc := p.leadComment
pos := p.expect(token.FUNC)
scope := ast.NewScope(p.topScope) // function scope
var recv *ast.FieldList
if p.tok == token.LPAREN {
recv = p.parseReceiver(scope)
}
ident := p.parseIdent()
params, results := p.parseSignature(scope)
var body *ast.BlockStmt
if p.tok == token.LBRACE {
body = p.parseBody(scope)
}
p.expectSemi()
decl := &ast.FuncDecl{
Doc: doc,
Recv: recv,
Name: ident,
Type: &ast.FuncType{
Func: pos,
Params: params,
Results: results,
},
Body: body,
}
if recv == nil {
// Go spec: The scope of an identifier denoting a constant, type,
// variable, or function (but not method) declared at top level
// (outside any function) is the package block.
//
// init() functions cannot be referred to and there may
// be more than one - don't put them in the pkgScope
if ident.Name != "init" {
p.declare(decl, nil, p.pkgScope, ast.Fun, ident)
}
}
return decl
}
func (p *parser) parseDecl(sync func(*parser)) ast.Decl {
if p.trace {
defer un(trace(p, "Declaration"))
}
var f parseSpecFunction
switch p.tok {
case token.CONST, token.VAR:
f = p.parseValueSpec
case token.TYPE:
f = p.parseTypeSpec
case token.FUNC:
return p.parseFuncDecl()
default:
pos := p.pos
p.errorExpected(pos, "declaration")
sync(p)
return &ast.BadDecl{From: pos, To: p.pos}
}
return p.parseGenDecl(p.tok, f)
}
// ----------------------------------------------------------------------------
// Source files
func (p *parser) parseFile() *ast.File {
if p.trace {
defer un(trace(p, "File"))
}
// Don't bother parsing the rest if we had errors scanning the first token.
// Likely not a Go source file at all.
if p.errors.Len() != 0 {
return nil
}
// package clause
doc := p.leadComment
pos := p.expect(token.PACKAGE)
// Go spec: The package clause is not a declaration;
// the package name does not appear in any scope.
ident := p.parseIdent()
if ident.Name == "_" {
p.error(p.pos, "invalid package name _")
}
p.expectSemi()
// Don't bother parsing the rest if we had errors parsing the package clause.
// Likely not a Go source file at all.
if p.errors.Len() != 0 {
return nil
}
p.openScope()
p.pkgScope = p.topScope
var decls []ast.Decl
if p.mode&PackageClauseOnly == 0 {
// import decls
for p.tok == token.IMPORT {
decls = append(decls, p.parseGenDecl(token.IMPORT, p.parseImportSpec))
}
if p.mode&ImportsOnly == 0 {
// rest of package body
for p.tok != token.EOF {
decls = append(decls, p.parseDecl(syncDecl))
}
}
}
p.closeScope()
assert(p.topScope == nil, "unbalanced scopes")
assert(p.labelScope == nil, "unbalanced label scopes")
// resolve global identifiers within the same file
i := 0
for _, ident := range p.unresolved {
// i <= index for current ident
assert(ident.Obj == unresolved, "object already resolved")
ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
if ident.Obj == nil {
p.unresolved[i] = ident
i++
}
}
return &ast.File{
Doc: doc,
Package: pos,
Name: ident,
Decls: decls,
Scope: p.pkgScope,
Imports: p.imports,
Unresolved: p.unresolved[0:i],
Comments: p.comments,
}
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Tests for client.go
package http_test
import (
"fmt"
. "http"
"http/httptest"
"io"
"io/ioutil"
"net"
"os"
"strconv"
"strings"
"testing"
"url"
)
var robotsTxtHandler = HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Last-Modified", "sometime")
fmt.Fprintf(w, "User-agent: go\nDisallow: /something/")
})
func TestClient(t *testing.T) {
ts := httptest.NewServer(robotsTxtHandler)
defer ts.Close()
r, err := Get(ts.URL)
var b []byte
if err == nil {
b, err = ioutil.ReadAll(r.Body)
r.Body.Close()
}
if err != nil {
t.Error(err)
} else if s := string(b); !strings.HasPrefix(s, "User-agent:") {
t.Errorf("Incorrect page body (did not begin with User-agent): %q", s)
}
}
func TestClientHead(t *testing.T) {
ts := httptest.NewServer(robotsTxtHandler)
defer ts.Close()
r, err := Head(ts.URL)
if err != nil {
t.Fatal(err)
}
if _, ok := r.Header["Last-Modified"]; !ok {
t.Error("Last-Modified header not found.")
}
}
type recordingTransport struct {
req *Request
}
func (t *recordingTransport) RoundTrip(req *Request) (resp *Response, err os.Error) {
t.req = req
return nil, os.NewError("dummy impl")
}
func TestGetRequestFormat(t *testing.T) {
tr := &recordingTransport{}
client := &Client{Transport: tr}
url := "http://dummy.faketld/"
client.Get(url) // Note: doesn't hit network
if tr.req.Method != "GET" {
t.Errorf("expected method %q; got %q", "GET", tr.req.Method)
}
if tr.req.URL.String() != url {
t.Errorf("expected URL %q; got %q", url, tr.req.URL.String())
}
if tr.req.Header == nil {
t.Errorf("expected non-nil request Header")
}
}
func TestPostRequestFormat(t *testing.T) {
tr := &recordingTransport{}
client := &Client{Transport: tr}
url := "http://dummy.faketld/"
json := `{"key":"value"}`
b := strings.NewReader(json)
client.Post(url, "application/json", b) // Note: doesn't hit network
if tr.req.Method != "POST" {
t.Errorf("got method %q, want %q", tr.req.Method, "POST")
}
if tr.req.URL.String() != url {
t.Errorf("got URL %q, want %q", tr.req.URL.String(), url)
}
if tr.req.Header == nil {
t.Fatalf("expected non-nil request Header")
}
if tr.req.Close {
t.Error("got Close true, want false")
}
if g, e := tr.req.ContentLength, int64(len(json)); g != e {
t.Errorf("got ContentLength %d, want %d", g, e)
}
}
func TestPostFormRequestFormat(t *testing.T) {
tr := &recordingTransport{}
client := &Client{Transport: tr}
urlStr := "http://dummy.faketld/"
form := make(url.Values)
form.Set("foo", "bar")
form.Add("foo", "bar2")
form.Set("bar", "baz")
client.PostForm(urlStr, form) // Note: doesn't hit network
if tr.req.Method != "POST" {
t.Errorf("got method %q, want %q", tr.req.Method, "POST")
}
if tr.req.URL.String() != urlStr {
t.Errorf("got URL %q, want %q", tr.req.URL.String(), urlStr)
}
if tr.req.Header == nil {
t.Fatalf("expected non-nil request Header")
}
if g, e := tr.req.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; g != e {
t.Errorf("got Content-Type %q, want %q", g, e)
}
if tr.req.Close {
t.Error("got Close true, want false")
}
expectedBody := "foo=bar&foo=bar2&bar=baz"
if g, e := tr.req.ContentLength, int64(len(expectedBody)); g != e {
t.Errorf("got ContentLength %d, want %d", g, e)
}
bodyb, err := ioutil.ReadAll(tr.req.Body)
if err != nil {
t.Fatalf("ReadAll on req.Body: %v", err)
}
if g := string(bodyb); g != expectedBody {
t.Errorf("got body %q, want %q", g, expectedBody)
}
}
func TestRedirects(t *testing.T) {
var ts *httptest.Server
ts = httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
n, _ := strconv.Atoi(r.FormValue("n"))
// Test Referer header. (7 is arbitrary position to test at)
if n == 7 {
if g, e := r.Referer(), ts.URL+"/?n=6"; e != g {
t.Errorf("on request ?n=7, expected referer of %q; got %q", e, g)
}
}
if n < 15 {
Redirect(w, r, fmt.Sprintf("/?n=%d", n+1), StatusFound)
return
}
fmt.Fprintf(w, "n=%d", n)
}))
defer ts.Close()
c := &Client{}
_, err := c.Get(ts.URL)
if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
t.Errorf("with default client Get, expected error %q, got %q", e, g)
}
// HEAD request should also have the ability to follow redirects.
_, err = c.Head(ts.URL)
if e, g := "Head /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
t.Errorf("with default client Head, expected error %q, got %q", e, g)
}
// Do should also follow redirects.
greq, _ := NewRequest("GET", ts.URL, nil)
_, err = c.Do(greq)
if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
t.Errorf("with default client Do, expected error %q, got %q", e, g)
}
var checkErr os.Error
var lastVia []*Request
c = &Client{CheckRedirect: func(_ *Request, via []*Request) os.Error {
lastVia = via
return checkErr
}}
res, err := c.Get(ts.URL)
finalUrl := res.Request.URL.String()
if e, g := "<nil>", fmt.Sprintf("%v", err); e != g {
t.Errorf("with custom client, expected error %q, got %q", e, g)
}
if !strings.HasSuffix(finalUrl, "/?n=15") {
t.Errorf("expected final url to end in /?n=15; got url %q", finalUrl)
}
if e, g := 15, len(lastVia); e != g {
t.Errorf("expected lastVia to have contained %d elements; got %d", e, g)
}
checkErr = os.NewError("no redirects allowed")
res, err = c.Get(ts.URL)
finalUrl = res.Request.URL.String()
if e, g := "Get /?n=1: no redirects allowed", fmt.Sprintf("%v", err); e != g {
t.Errorf("with redirects forbidden, expected error %q, got %q", e, g)
}
}
func TestStreamingGet(t *testing.T) {
say := make(chan string)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
w.(Flusher).Flush()
for str := range say {
w.Write([]byte(str))
w.(Flusher).Flush()
}
}))
defer ts.Close()
c := &Client{}
res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
var buf [10]byte
for _, str := range []string{"i", "am", "also", "known", "as", "comet"} {
say <- str
n, err := io.ReadFull(res.Body, buf[0:len(str)])
if err != nil {
t.Fatalf("ReadFull on %q: %v", str, err)
}
if n != len(str) {
t.Fatalf("Receiving %q, only read %d bytes", str, n)
}
got := string(buf[0:n])
if got != str {
t.Fatalf("Expected %q, got %q", str, got)
}
}
close(say)
_, err = io.ReadFull(res.Body, buf[0:1])
if err != os.EOF {
t.Fatalf("at end expected EOF, got %v", err)
}
}
type writeCountingConn struct {
net.Conn
count *int
}
func (c *writeCountingConn) Write(p []byte) (int, os.Error) {
*c.count++
return c.Conn.Write(p)
}
// TestClientWrites verifies that client requests are buffered and we
// don't send a TCP packet per line of the http request + body.
func TestClientWrites(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
}))
defer ts.Close()
writes := 0
dialer := func(netz string, addr string) (net.Conn, os.Error) {
c, err := net.Dial(netz, addr)
if err == nil {
c = &writeCountingConn{c, &writes}
}
return c, err
}
c := &Client{Transport: &Transport{Dial: dialer}}
_, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
if writes != 1 {
t.Errorf("Get request did %d Write calls, want 1", writes)
}
writes = 0
_, err = c.PostForm(ts.URL, url.Values{"foo": {"bar"}})
if err != nil {
t.Fatal(err)
}
if writes != 1 {
t.Errorf("Post request did %d Write calls, want 1", writes)
}
}
http: do not depend on map iteration order
R=golang-dev, r
CC=golang-dev
https://golang.org/cl/5284050
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Tests for client.go
package http_test
import (
"fmt"
. "http"
"http/httptest"
"io"
"io/ioutil"
"net"
"os"
"strconv"
"strings"
"testing"
"url"
)
var robotsTxtHandler = HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Last-Modified", "sometime")
fmt.Fprintf(w, "User-agent: go\nDisallow: /something/")
})
func TestClient(t *testing.T) {
ts := httptest.NewServer(robotsTxtHandler)
defer ts.Close()
r, err := Get(ts.URL)
var b []byte
if err == nil {
b, err = ioutil.ReadAll(r.Body)
r.Body.Close()
}
if err != nil {
t.Error(err)
} else if s := string(b); !strings.HasPrefix(s, "User-agent:") {
t.Errorf("Incorrect page body (did not begin with User-agent): %q", s)
}
}
func TestClientHead(t *testing.T) {
ts := httptest.NewServer(robotsTxtHandler)
defer ts.Close()
r, err := Head(ts.URL)
if err != nil {
t.Fatal(err)
}
if _, ok := r.Header["Last-Modified"]; !ok {
t.Error("Last-Modified header not found.")
}
}
type recordingTransport struct {
req *Request
}
func (t *recordingTransport) RoundTrip(req *Request) (resp *Response, err os.Error) {
t.req = req
return nil, os.NewError("dummy impl")
}
func TestGetRequestFormat(t *testing.T) {
tr := &recordingTransport{}
client := &Client{Transport: tr}
url := "http://dummy.faketld/"
client.Get(url) // Note: doesn't hit network
if tr.req.Method != "GET" {
t.Errorf("expected method %q; got %q", "GET", tr.req.Method)
}
if tr.req.URL.String() != url {
t.Errorf("expected URL %q; got %q", url, tr.req.URL.String())
}
if tr.req.Header == nil {
t.Errorf("expected non-nil request Header")
}
}
func TestPostRequestFormat(t *testing.T) {
tr := &recordingTransport{}
client := &Client{Transport: tr}
url := "http://dummy.faketld/"
json := `{"key":"value"}`
b := strings.NewReader(json)
client.Post(url, "application/json", b) // Note: doesn't hit network
if tr.req.Method != "POST" {
t.Errorf("got method %q, want %q", tr.req.Method, "POST")
}
if tr.req.URL.String() != url {
t.Errorf("got URL %q, want %q", tr.req.URL.String(), url)
}
if tr.req.Header == nil {
t.Fatalf("expected non-nil request Header")
}
if tr.req.Close {
t.Error("got Close true, want false")
}
if g, e := tr.req.ContentLength, int64(len(json)); g != e {
t.Errorf("got ContentLength %d, want %d", g, e)
}
}
func TestPostFormRequestFormat(t *testing.T) {
tr := &recordingTransport{}
client := &Client{Transport: tr}
urlStr := "http://dummy.faketld/"
form := make(url.Values)
form.Set("foo", "bar")
form.Add("foo", "bar2")
form.Set("bar", "baz")
client.PostForm(urlStr, form) // Note: doesn't hit network
if tr.req.Method != "POST" {
t.Errorf("got method %q, want %q", tr.req.Method, "POST")
}
if tr.req.URL.String() != urlStr {
t.Errorf("got URL %q, want %q", tr.req.URL.String(), urlStr)
}
if tr.req.Header == nil {
t.Fatalf("expected non-nil request Header")
}
if g, e := tr.req.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; g != e {
t.Errorf("got Content-Type %q, want %q", g, e)
}
if tr.req.Close {
t.Error("got Close true, want false")
}
// Depending on map iteration, body can be either of these.
expectedBody := "foo=bar&foo=bar2&bar=baz"
expectedBody1 := "bar=baz&foo=bar&foo=bar2"
if g, e := tr.req.ContentLength, int64(len(expectedBody)); g != e {
t.Errorf("got ContentLength %d, want %d", g, e)
}
bodyb, err := ioutil.ReadAll(tr.req.Body)
if err != nil {
t.Fatalf("ReadAll on req.Body: %v", err)
}
if g := string(bodyb); g != expectedBody && g != expectedBody1 {
t.Errorf("got body %q, want %q or %q", g, expectedBody, expectedBody1)
}
}
func TestRedirects(t *testing.T) {
var ts *httptest.Server
ts = httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
n, _ := strconv.Atoi(r.FormValue("n"))
// Test Referer header. (7 is arbitrary position to test at)
if n == 7 {
if g, e := r.Referer(), ts.URL+"/?n=6"; e != g {
t.Errorf("on request ?n=7, expected referer of %q; got %q", e, g)
}
}
if n < 15 {
Redirect(w, r, fmt.Sprintf("/?n=%d", n+1), StatusFound)
return
}
fmt.Fprintf(w, "n=%d", n)
}))
defer ts.Close()
c := &Client{}
_, err := c.Get(ts.URL)
if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
t.Errorf("with default client Get, expected error %q, got %q", e, g)
}
// HEAD request should also have the ability to follow redirects.
_, err = c.Head(ts.URL)
if e, g := "Head /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
t.Errorf("with default client Head, expected error %q, got %q", e, g)
}
// Do should also follow redirects.
greq, _ := NewRequest("GET", ts.URL, nil)
_, err = c.Do(greq)
if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
t.Errorf("with default client Do, expected error %q, got %q", e, g)
}
var checkErr os.Error
var lastVia []*Request
c = &Client{CheckRedirect: func(_ *Request, via []*Request) os.Error {
lastVia = via
return checkErr
}}
res, err := c.Get(ts.URL)
finalUrl := res.Request.URL.String()
if e, g := "<nil>", fmt.Sprintf("%v", err); e != g {
t.Errorf("with custom client, expected error %q, got %q", e, g)
}
if !strings.HasSuffix(finalUrl, "/?n=15") {
t.Errorf("expected final url to end in /?n=15; got url %q", finalUrl)
}
if e, g := 15, len(lastVia); e != g {
t.Errorf("expected lastVia to have contained %d elements; got %d", e, g)
}
checkErr = os.NewError("no redirects allowed")
res, err = c.Get(ts.URL)
finalUrl = res.Request.URL.String()
if e, g := "Get /?n=1: no redirects allowed", fmt.Sprintf("%v", err); e != g {
t.Errorf("with redirects forbidden, expected error %q, got %q", e, g)
}
}
func TestStreamingGet(t *testing.T) {
say := make(chan string)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
w.(Flusher).Flush()
for str := range say {
w.Write([]byte(str))
w.(Flusher).Flush()
}
}))
defer ts.Close()
c := &Client{}
res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
var buf [10]byte
for _, str := range []string{"i", "am", "also", "known", "as", "comet"} {
say <- str
n, err := io.ReadFull(res.Body, buf[0:len(str)])
if err != nil {
t.Fatalf("ReadFull on %q: %v", str, err)
}
if n != len(str) {
t.Fatalf("Receiving %q, only read %d bytes", str, n)
}
got := string(buf[0:n])
if got != str {
t.Fatalf("Expected %q, got %q", str, got)
}
}
close(say)
_, err = io.ReadFull(res.Body, buf[0:1])
if err != os.EOF {
t.Fatalf("at end expected EOF, got %v", err)
}
}
type writeCountingConn struct {
net.Conn
count *int
}
func (c *writeCountingConn) Write(p []byte) (int, os.Error) {
*c.count++
return c.Conn.Write(p)
}
// TestClientWrites verifies that client requests are buffered and we
// don't send a TCP packet per line of the http request + body.
func TestClientWrites(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
}))
defer ts.Close()
writes := 0
dialer := func(netz string, addr string) (net.Conn, os.Error) {
c, err := net.Dial(netz, addr)
if err == nil {
c = &writeCountingConn{c, &writes}
}
return c, err
}
c := &Client{Transport: &Transport{Dial: dialer}}
_, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
if writes != 1 {
t.Errorf("Get request did %d Write calls, want 1", writes)
}
writes = 0
_, err = c.PostForm(ts.URL, url.Values{"foo": {"bar"}})
if err != nil {
t.Fatal(err)
}
if writes != 1 {
t.Errorf("Post request did %d Write calls, want 1", writes)
}
}
|
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements sysSocket and accept for platforms that
// provide a fast path for setting SetNonblock and CloseOnExec.
// +build freebsd linux
package net
import "syscall"
// Wrapper around the socket system call that marks the returned file
// descriptor as nonblocking and close-on-exec.
func sysSocket(family, sotype, proto int) (int, error) {
s, err := syscall.Socket(family, sotype|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, proto)
// On Linux the SOCK_NONBLOCK and SOCK_CLOEXEC flags were
// introduced in 2.6.27 kernel and on FreeBSD both flags were
// introduced in 10 kernel. If we get an EINVAL error on Linux
// or EPROTONOSUPPORT error on FreeBSD, fall back to using
// socket without them.
if err == nil || (err != syscall.EPROTONOSUPPORT && err != syscall.EINVAL) {
return s, err
}
// See ../syscall/exec_unix.go for description of ForkLock.
syscall.ForkLock.RLock()
s, err = syscall.Socket(family, sotype, proto)
if err == nil {
syscall.CloseOnExec(s)
}
syscall.ForkLock.RUnlock()
if err != nil {
return -1, err
}
if err = syscall.SetNonblock(s, true); err != nil {
syscall.Close(s)
return -1, err
}
return s, nil
}
// Wrapper around the accept system call that marks the returned file
// descriptor as nonblocking and close-on-exec.
func accept(s int) (int, syscall.Sockaddr, error) {
ns, sa, err := syscall.Accept4(s, syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC)
// On Linux the accept4 system call was introduced in 2.6.28
// kernel and on FreeBSD it was introduced in 10 kernel. If we
// get an ENOSYS error on both Linux and FreeBSD, or EINVAL
// error on Linux, fall back to using accept.
if err == nil || (err != syscall.ENOSYS && err != syscall.EINVAL) {
return ns, sa, err
}
// See ../syscall/exec_unix.go for description of ForkLock.
// It is probably okay to hold the lock across syscall.Accept
// because we have put fd.sysfd into non-blocking mode.
// However, a call to the File method will put it back into
// blocking mode. We can't take that risk, so no use of ForkLock here.
ns, sa, err = syscall.Accept(s)
if err == nil {
syscall.CloseOnExec(ns)
}
if err != nil {
return -1, nil, err
}
if err = syscall.SetNonblock(ns, true); err != nil {
syscall.Close(ns)
return -1, nil, err
}
return ns, sa, nil
}
net: accept a few more errors in Accept4 wrapper
Fixes #7271.
LGTM=bradfitz
R=golang-codereviews, bradfitz
CC=golang-codereviews
https://golang.org/cl/84170043
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements sysSocket and accept for platforms that
// provide a fast path for setting SetNonblock and CloseOnExec.
// +build freebsd linux
package net
import "syscall"
// Wrapper around the socket system call that marks the returned file
// descriptor as nonblocking and close-on-exec.
func sysSocket(family, sotype, proto int) (int, error) {
s, err := syscall.Socket(family, sotype|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, proto)
// On Linux the SOCK_NONBLOCK and SOCK_CLOEXEC flags were
// introduced in 2.6.27 kernel and on FreeBSD both flags were
// introduced in 10 kernel. If we get an EINVAL error on Linux
// or EPROTONOSUPPORT error on FreeBSD, fall back to using
// socket without them.
if err == nil || (err != syscall.EPROTONOSUPPORT && err != syscall.EINVAL) {
return s, err
}
// See ../syscall/exec_unix.go for description of ForkLock.
syscall.ForkLock.RLock()
s, err = syscall.Socket(family, sotype, proto)
if err == nil {
syscall.CloseOnExec(s)
}
syscall.ForkLock.RUnlock()
if err != nil {
return -1, err
}
if err = syscall.SetNonblock(s, true); err != nil {
syscall.Close(s)
return -1, err
}
return s, nil
}
// Wrapper around the accept system call that marks the returned file
// descriptor as nonblocking and close-on-exec.
func accept(s int) (int, syscall.Sockaddr, error) {
ns, sa, err := syscall.Accept4(s, syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC)
// On Linux the accept4 system call was introduced in 2.6.28
// kernel and on FreeBSD it was introduced in 10 kernel. If we
// get an ENOSYS error on both Linux and FreeBSD, or EINVAL
// error on Linux, fall back to using accept.
switch err {
default: // nil and errors other than the ones listed
return ns, sa, err
case syscall.ENOSYS: // syscall missing
case syscall.EINVAL: // some Linux use this instead of ENOSYS
case syscall.EACCES: // some Linux use this instead of ENOSYS
case syscall.EFAULT: // some Linux use this instead of ENOSYS
}
// See ../syscall/exec_unix.go for description of ForkLock.
// It is probably okay to hold the lock across syscall.Accept
// because we have put fd.sysfd into non-blocking mode.
// However, a call to the File method will put it back into
// blocking mode. We can't take that risk, so no use of ForkLock here.
ns, sa, err = syscall.Accept(s)
if err == nil {
syscall.CloseOnExec(ns)
}
if err != nil {
return -1, nil, err
}
if err = syscall.SetNonblock(ns, true); err != nil {
syscall.Close(ns)
return -1, nil, err
}
return ns, sa, nil
}
|
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package signal implements access to incoming signals.
package signal
// BUG(rsc): This package is not yet implemented on Plan 9 and Windows.
import (
"os"
"sync"
)
var handlers struct {
sync.Mutex
list []handler
}
type handler struct {
c chan<- os.Signal
sig os.Signal
all bool
}
// Notify causes package signal to relay incoming signals to c.
// If no signals are listed, all incoming signals will be relayed to c.
// Otherwise, just the listed signals will.
//
// Package signal will not block sending to c: the caller must ensure
// that c has sufficient buffer space to keep up with the expected
// signal rate. For a channel used for notification of just one signal value,
// a buffer of size 1 is sufficient.
//
func Notify(c chan<- os.Signal, sig ...os.Signal) {
if c == nil {
panic("os/signal: Notify using nil channel")
}
handlers.Lock()
defer handlers.Unlock()
if len(sig) == 0 {
enableSignal(nil)
handlers.list = append(handlers.list, handler{c: c, all: true})
} else {
for _, s := range sig {
// We use nil as a special wildcard value for enableSignal,
// so filter it out of the list of arguments. This is safe because
// we will never get an incoming nil signal, so discarding the
// registration cannot affect the observed behavior.
if s != nil {
enableSignal(s)
handlers.list = append(handlers.list, handler{c: c, sig: s})
}
}
}
}
func process(sig os.Signal) {
handlers.Lock()
defer handlers.Unlock()
for _, h := range handlers.list {
if h.all || h.sig == sig {
// send but do not block for it
select {
case h.c <- sig:
default:
}
}
}
}
os/signal: windows is supported, update comment
Fixes issue 5035.
R=golang-dev, r
CC=golang-dev
https://codereview.appspot.com/7519045
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package signal implements access to incoming signals.
package signal
// BUG(rsc): This package is not yet implemented on Plan 9.
import (
"os"
"sync"
)
var handlers struct {
sync.Mutex
list []handler
}
type handler struct {
c chan<- os.Signal
sig os.Signal
all bool
}
// Notify causes package signal to relay incoming signals to c.
// If no signals are listed, all incoming signals will be relayed to c.
// Otherwise, just the listed signals will.
//
// Package signal will not block sending to c: the caller must ensure
// that c has sufficient buffer space to keep up with the expected
// signal rate. For a channel used for notification of just one signal value,
// a buffer of size 1 is sufficient.
//
func Notify(c chan<- os.Signal, sig ...os.Signal) {
if c == nil {
panic("os/signal: Notify using nil channel")
}
handlers.Lock()
defer handlers.Unlock()
if len(sig) == 0 {
enableSignal(nil)
handlers.list = append(handlers.list, handler{c: c, all: true})
} else {
for _, s := range sig {
// We use nil as a special wildcard value for enableSignal,
// so filter it out of the list of arguments. This is safe because
// we will never get an incoming nil signal, so discarding the
// registration cannot affect the observed behavior.
if s != nil {
enableSignal(s)
handlers.list = append(handlers.list, handler{c: c, sig: s})
}
}
}
}
func process(sig os.Signal) {
handlers.Lock()
defer handlers.Unlock()
for _, h := range handlers.list {
if h.all || h.sig == sig {
// send but do not block for it
select {
case h.c <- sig:
default:
}
}
}
}
|
package system
import (
"fmt"
"log"
"os/exec"
"os/user"
"strings"
)
type User struct {
Name string `yaml:"name"`
PasswordHash string `yaml:"passwd"`
SSHAuthorizedKeys []string `yaml:"ssh-authorized-keys"`
SSHImportGithubUser string `yaml:"coreos-ssh-import-github"`
SSHImportURL string `yaml:"coreos-ssh-import-url"`
GECOS string `yaml:"gecos"`
Homedir string `yaml:"homedir"`
NoCreateHome bool `yaml:"no-create-home"`
PrimaryGroup string `yaml:"primary-group"`
Groups []string `yaml:"groups"`
NoUserGroup bool `yaml:"no-user-group"`
System bool `yaml:"system"`
NoLogInit bool `yaml:"no-log-init"`
}
func UserExists(u *User) bool {
_, err := user.Lookup(u.Name)
return err == nil
}
func CreateUser(u *User) error {
args := []string{}
if u.PasswordHash != "" {
args = append(args, "--password", u.PasswordHash)
} else {
args = append(args, "--password", "*")
}
if u.GECOS != "" {
args = append(args, "--comment", fmt.Sprintf("%q", u.GECOS))
}
if u.Homedir != "" {
args = append(args, "--home-dir", u.Homedir)
}
if u.NoCreateHome {
args = append(args, "--no-create-home")
} else {
args = append(args, "--create-home")
}
if u.PrimaryGroup != "" {
args = append(args, "--primary-group", u.PrimaryGroup)
}
if len(u.Groups) > 0 {
args = append(args, "--groups", strings.Join(u.Groups, ","))
}
if u.NoUserGroup {
args = append(args, "--no-user-group")
}
if u.System {
args = append(args, "--system")
}
if u.NoLogInit {
args = append(args, "--no-log-init")
}
args = append(args, u.Name)
output, err := exec.Command("useradd", args...).CombinedOutput()
if err != nil {
log.Printf("Command 'useradd %s' failed: %v\n%s", strings.Join(args, " "), err, output)
}
return err
}
func SetUserPassword(user, hash string) error {
cmd := exec.Command("/usr/sbin/chpasswd", "-e")
stdin, err := cmd.StdinPipe()
if err != nil {
return err
}
err = cmd.Start()
if err != nil {
log.Fatal(err)
}
arg := fmt.Sprintf("%s:%s", user, hash)
_, err = stdin.Write([]byte(arg))
if err != nil {
return err
}
stdin.Close()
err = cmd.Wait()
if err != nil {
return err
}
return nil
}
fix(user): user correct primary group flag for useradd
package system
import (
"fmt"
"log"
"os/exec"
"os/user"
"strings"
)
type User struct {
Name string `yaml:"name"`
PasswordHash string `yaml:"passwd"`
SSHAuthorizedKeys []string `yaml:"ssh-authorized-keys"`
SSHImportGithubUser string `yaml:"coreos-ssh-import-github"`
SSHImportURL string `yaml:"coreos-ssh-import-url"`
GECOS string `yaml:"gecos"`
Homedir string `yaml:"homedir"`
NoCreateHome bool `yaml:"no-create-home"`
PrimaryGroup string `yaml:"primary-group"`
Groups []string `yaml:"groups"`
NoUserGroup bool `yaml:"no-user-group"`
System bool `yaml:"system"`
NoLogInit bool `yaml:"no-log-init"`
}
func UserExists(u *User) bool {
_, err := user.Lookup(u.Name)
return err == nil
}
func CreateUser(u *User) error {
args := []string{}
if u.PasswordHash != "" {
args = append(args, "--password", u.PasswordHash)
} else {
args = append(args, "--password", "*")
}
if u.GECOS != "" {
args = append(args, "--comment", fmt.Sprintf("%q", u.GECOS))
}
if u.Homedir != "" {
args = append(args, "--home-dir", u.Homedir)
}
if u.NoCreateHome {
args = append(args, "--no-create-home")
} else {
args = append(args, "--create-home")
}
if u.PrimaryGroup != "" {
args = append(args, "--gid", u.PrimaryGroup)
}
if len(u.Groups) > 0 {
args = append(args, "--groups", strings.Join(u.Groups, ","))
}
if u.NoUserGroup {
args = append(args, "--no-user-group")
}
if u.System {
args = append(args, "--system")
}
if u.NoLogInit {
args = append(args, "--no-log-init")
}
args = append(args, u.Name)
output, err := exec.Command("useradd", args...).CombinedOutput()
if err != nil {
log.Printf("Command 'useradd %s' failed: %v\n%s", strings.Join(args, " "), err, output)
}
return err
}
func SetUserPassword(user, hash string) error {
cmd := exec.Command("/usr/sbin/chpasswd", "-e")
stdin, err := cmd.StdinPipe()
if err != nil {
return err
}
err = cmd.Start()
if err != nil {
log.Fatal(err)
}
arg := fmt.Sprintf("%s:%s", user, hash)
_, err = stdin.Write([]byte(arg))
if err != nil {
return err
}
stdin.Close()
err = cmd.Wait()
if err != nil {
return err
}
return nil
}
|
package gautomator
import (
"fmt"
"github.com/gonum/matrix/mat64" // Matrix
"io"
"log"
"time"
)
const (
TASKQUEUED = -3
TASKADVERTIZED = -2
TASKRUNNING = -1
ORPHAN = -2
FATHER = -1
)
// A task is an action executed by a module
type Task struct {
Id int `json:"id"`
Father int
OriginId int `json:"originId"`
Origin string `json:"origin"`
Name string `json:"name"` //the task name
Node string `json:"node"` // The node name
Module string `json:"module"`
Args []string `json:"args"`
Status int `json:"status"` //-3: queued
// -2 Advertized (infored that the dependencies are done)
// -1: running
// >=0 : return code
StartTime time.Time `json:"startTime"`
EndTime time.Time `json:"endTime"`
TaskCanRunChan chan bool // true: run, false: wait
}
// This is the structure corresponding to the "dot-graph" of a task list
// We store the nodes in a map
// The index is the source node
type TaskGraphStructure struct {
Tasks map[int]*Task
DegreeMatrix *mat64.Dense
AdjacencyMatrix *mat64.Dense // Row id is the map id of the source task
// Col id is the map id of the destination task
}
func (this *TaskGraphStructure) PrintAdjacencyMatrix() {
rowSize, colSize := this.AdjacencyMatrix.Dims()
fmt.Printf(" ")
for c := 0; c < colSize; c++ {
fmt.Printf("%v ", this.Tasks[c].Name)
}
fmt.Printf("\n")
for r := 0; r < rowSize; r++ {
fmt.Printf("%v ", this.Tasks[r].Name)
for c := 0; c < colSize; c++ {
fmt.Printf("%v ", this.AdjacencyMatrix.At(r, c))
}
fmt.Printf("\n")
}
}
func (this *TaskGraphStructure) PrintDegreeMatrix() {
rowSize, colSize := this.DegreeMatrix.Dims()
for r := 0; r < rowSize; r++ {
for c := 0; c < colSize; c++ {
fmt.Printf("%v ", this.DegreeMatrix.At(r, c))
}
fmt.Printf("\n")
}
}
func NewTask() *Task {
return &Task{
-1,
ORPHAN,
-1,
"null",
"null",
"null",
"dummy",
make([]string, 1),
TASKQUEUED,
time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),
time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),
make(chan bool),
}
}
func NewTaskGraphStructure() *TaskGraphStructure {
return &TaskGraphStructure{
make(map[int]*Task, 0),
mat64.NewDense(0, 0, nil),
mat64.NewDense(0, 0, nil),
}
}
// Returns a combination of the current structure
// and the one passed as argument
func (this *TaskGraphStructure) AugmentTaskStructure(taskStructure *TaskGraphStructure) *TaskGraphStructure {
// merging adjacency matrix
initialRowLen, initialColLen := this.AdjacencyMatrix.Dims()
addedRowLen, addedColLen := taskStructure.AdjacencyMatrix.Dims()
this.AdjacencyMatrix = mat64.DenseCopyOf(this.AdjacencyMatrix.Grow(addedRowLen, addedColLen))
//a, b := this.AdjacencyMatrix.Dims()
for r := 0; r < initialRowLen+addedRowLen; r++ {
for c := 0; c < initialColLen+addedColLen; c++ {
switch {
case r < initialRowLen && c < initialColLen:
// If we are in the original matrix: do nothing
case r < initialRowLen && c > initialColLen:
// If outside, put some zero
this.AdjacencyMatrix.Set(r, c, float64(0))
case r > initialRowLen && c < initialColLen:
// If outside, put some zero
this.AdjacencyMatrix.Set(r, c, float64(0))
case r >= initialRowLen && c >= initialColLen:
// Add the new matrix
this.AdjacencyMatrix.Set(r, c, taskStructure.AdjacencyMatrix.At(r-initialRowLen, c-initialColLen))
}
}
}
// merging degree matrix
initialRowLen, initialColLen = this.DegreeMatrix.Dims()
addedRowLen, addedColLen = taskStructure.DegreeMatrix.Dims()
this.DegreeMatrix = mat64.DenseCopyOf(this.DegreeMatrix.Grow(addedRowLen, addedColLen))
for r := 0; r < initialRowLen+addedRowLen; r++ {
for c := 0; c < initialColLen+addedColLen; c++ {
switch {
case r < initialRowLen && c < initialColLen:
// If we are in the original matrix: do nothing
case r < initialRowLen && c > initialColLen:
// If outside, set zero
this.DegreeMatrix.Set(r, c, float64(0))
case r > initialRowLen && c < initialColLen:
// If outside, set zero
this.DegreeMatrix.Set(r, c, float64(0))
case r >= initialRowLen && c >= initialColLen:
// Add the new matrix
this.DegreeMatrix.Set(r, c, taskStructure.DegreeMatrix.At(r-initialRowLen, c-initialColLen))
}
}
}
actualSize := len(this.Tasks)
for i, task := range taskStructure.Tasks {
task.Id = actualSize + i
this.Tasks[actualSize+i] = task
}
return this
}
func (this *TaskGraphStructure) getTaskFromName(name string) []int {
indexA := make([]int, 1)
indexA[0] = -1
for _, task := range this.Tasks {
if task.Name == name {
if indexA[0] == -1 {
indexA = append(indexA[1:], task.Id)
} else {
indexA = append(indexA, task.Id)
}
}
}
return indexA
}
func colSum(matrix *mat64.Dense, colId int) float64 {
row, _ := matrix.Dims()
sum := float64(0)
for r := 0; r < row; r++ {
sum += matrix.At(r, colId)
}
return sum
}
func rowSum(matrix *mat64.Dense, rowId int) float64 {
_, col := matrix.Dims()
sum := float64(0)
for c := 0; c < col; c++ {
sum += matrix.At(rowId, c)
}
return sum
}
// the aim of this function is to find if a task has a subdefinition (aka an origin) and change it
// Example:
// imagine the graphs
// digraph bla {
// a -> b;
// b -> c;
// }
// digraph b {
// alpha -> gamma;
// }
// then alpha and beta will have "b" as Origin.
// therefore we should add a link in the AdjacencyMatix and in the DegreeMatrix
func (this *TaskGraphStructure) Relink() *TaskGraphStructure {
// IN this array we store the row,col on which we set 1
backup := make(map[string][]int, 0)
_, col := this.AdjacencyMatrix.Dims()
for _, task := range this.Tasks {
id := this.getTaskFromName(task.Origin)
if id[0] != -1 && task.OriginId == -1 {
task.OriginId = id[0]
}
if colSum(this.AdjacencyMatrix, task.Id) == 0 {
// TODO There should be only one task, otherwise display an error
if task.OriginId != -1 {
// Task is a meta task
this.Tasks[task.OriginId].Module = "meta"
this.AdjacencyMatrix.Set(task.OriginId, task.Id, float64(1))
backup[task.Origin] = append(backup[task.Origin], task.OriginId, task.Id)
}
}
if rowSum(this.AdjacencyMatrix, task.Id) == 0 {
// TODO There should be only one task, otherwise display an error
if task.OriginId != -1 {
for c := 0; c < col; c++ {
add := true
for counter := 0; counter < len(backup[task.Origin])-1; counter += 2 {
if backup[task.Origin][counter] == task.OriginId && backup[task.Origin][counter+1] == c {
add = false
}
}
if add == true && this.Tasks[c].Origin != task.Origin {
this.AdjacencyMatrix.Set(task.Id, c, this.AdjacencyMatrix.At(task.Id, c)+this.AdjacencyMatrix.At(task.OriginId, c))
}
}
}
}
}
//TODO: complete the degreematrix
return this
}
// This function print the dot file associated with the graph
func (this *TaskGraphStructure) PrintDot(w io.Writer) {
fmt.Fprintln(w, "digraph G {")
// Writing node definition
for _, task := range this.Tasks {
fmt.Fprintf(w, "\t\"%v\" [\n", task.Id)
fmt.Fprintf(w, "\t\tid = \"%v\"\n", task.Id)
if task.Module == "meta" {
fmt.Fprintln(w, "\t\tshape=diamond")
fmt.Fprintf(w, "\t\tlabel=\"%v\"", task.Name)
} else {
fmt.Fprintf(w, "\t\tlabel = \"<name>%v|<node>%v|<module>%v\"\n", task.Name, task.Node, task.Module)
fmt.Fprintf(w, "\t\tshape = \"record\"\n")
}
fmt.Fprintf(w, "\t];\n")
}
row, col := this.AdjacencyMatrix.Dims()
for r := 0; r < row; r++ {
for c := 0; c < col; c++ {
if this.AdjacencyMatrix.At(r, c) == 1 {
fmt.Fprintf(w, "\t%v -> %v\n", this.Tasks[r].Id, this.Tasks[c].Id)
}
}
}
fmt.Fprintln(w, "}")
}
// Returns a tasks array of all tasks with the same origin
func (this *TaskGraphStructure) getTasksWithOrigin(origin string) []Task {
returnTasks := make([]Task, 0)
for _, task := range this.Tasks {
if task.Origin == origin {
returnTasks = append(returnTasks, *task)
}
}
return returnTasks
}
// Duplicate the task passed as argument, and returns the new task
func (this *TaskGraphStructure) instanciate(instance TaskInstance) []*Task {
returnTasks := make([]*Task, 0)
// First duplicate the tasks with same name
for _, task := range this.Tasks {
if task.Name == instance.Taskname {
for _, node := range instance.Hosts {
switch {
case task.Father == FATHER:
// Then duplicate
log.Printf("Duplicating %v on node %v", task.Name, node)
row, col := this.AdjacencyMatrix.Dims()
newId := row
newTask := NewTask()
newTask.Father = task.Id
newTask.OriginId = task.Id
newTask.Id = newId
newTask.Name = task.Name
newTask.Module = instance.Module
newTask.Origin = task.Origin
newTask.Node = node // Set the node to the new one
newTask.Args = instance.Args
this.Tasks[newId] = newTask
returnTasks = append(returnTasks, newTask)
this.AdjacencyMatrix = mat64.DenseCopyOf(this.AdjacencyMatrix.Grow(1, 1))
for r := 0; r < row; r++ {
for c := 0; c < col; c++ {
if this.Tasks[r].Origin != instance.Taskname {
this.AdjacencyMatrix.Set(r, newId, this.AdjacencyMatrix.At(r, task.Id))
}
if this.Tasks[c].Origin != instance.Taskname {
this.AdjacencyMatrix.Set(newId, c, this.AdjacencyMatrix.At(task.Id, c))
}
}
}
_ = this.duplicateSubtasks(newTask, node, instance)
case task.Father == ORPHAN:
// Do not duplicate, simply adapt
task.Node = node
task.Module = instance.Module
task.Args = instance.Args
this.adaptSubtask(task, node, instance)
task.Father = FATHER
}
// Then duplicate the tasks with same Father
}
}
}
return returnTasks
}
func (this *TaskGraphStructure) adaptSubtask(father *Task, node string, instance TaskInstance) {
_, col := this.AdjacencyMatrix.Dims()
for c := 0; c < col; c++ {
if this.AdjacencyMatrix.At(father.Id, c) == 1 && this.Tasks[c].Origin == father.Name {
this.Tasks[c].Father = father.Id
this.Tasks[c].Node = node
this.Tasks[c].Module = instance.Module
this.Tasks[c].Args = instance.Args
}
}
}
func (this *TaskGraphStructure) duplicateSubtasks(father *Task, node string, instance TaskInstance) *TaskGraphStructure {
taskStructure := NewTaskGraphStructure()
// Get all the tasks with origin father.Name
myindex := 0
// Define a new origin composed of the Id
for _, task := range this.Tasks {
if father.Father == task.OriginId {
// task match, create a new task with the same informations...
newTask := NewTask()
newTask.Id = myindex
newTask.Node = node
newTask.Father = task.Id
newTask.OriginId = father.Id
newTask.Module = instance.Module
newTask.Args = instance.Args
// ... Add it to the structure...
taskStructure.Tasks[myindex] = newTask
// ... Extract the matrix associated
taskStructure.AdjacencyMatrix = mat64.DenseCopyOf(taskStructure.AdjacencyMatrix.Grow(1, 1))
taskStructure.DegreeMatrix = mat64.DenseCopyOf(taskStructure.DegreeMatrix.Grow(1, 1))
myindex += 1
// And add it to the structure as well
}
}
row, col := taskStructure.AdjacencyMatrix.Dims()
for r := 0; r < row; r++ {
for c := 0; c < col; c++ {
taskStructure.AdjacencyMatrix.Set(r, c, this.AdjacencyMatrix.At(taskStructure.Tasks[r].Father, taskStructure.Tasks[c].Father))
}
}
return taskStructure
}
func (this *TaskGraphStructure) InstanciateTaskStructure(taskDefinition TaskDefinition) {
for _, instance := range taskDefinition {
//newTasks := this.duplicateTasks(instance.Taskname, host)
log.Printf("Instance %v %v %v", instance.Taskname, instance.Hosts, instance.Module)
if instance.Module != "" {
instance.Module = fmt.Sprintf("%v%v", "../examples/modules/", instance.Module)
} else {
instance.Module = "dummy"
}
this.instanciate(instance)
this.Relink()
}
}
Added some informations of debug into the dot
package gautomator
import (
"fmt"
"github.com/gonum/matrix/mat64" // Matrix
"io"
"log"
"time"
)
const (
TASKQUEUED = -3
TASKADVERTIZED = -2
TASKRUNNING = -1
ORPHAN = -2
FATHER = -1
)
// A task is an action executed by a module
type Task struct {
Id int `json:"id"`
Father int
OriginId int `json:"originId"`
Origin string `json:"origin"`
Name string `json:"name"` //the task name
Node string `json:"node"` // The node name
Module string `json:"module"`
Args []string `json:"args"`
Status int `json:"status"` //-3: queued
// -2 Advertized (infored that the dependencies are done)
// -1: running
// >=0 : return code
StartTime time.Time `json:"startTime"`
EndTime time.Time `json:"endTime"`
TaskCanRunChan chan bool // true: run, false: wait
}
// This is the structure corresponding to the "dot-graph" of a task list
// We store the nodes in a map
// The index is the source node
type TaskGraphStructure struct {
Tasks map[int]*Task
DegreeMatrix *mat64.Dense
AdjacencyMatrix *mat64.Dense // Row id is the map id of the source task
// Col id is the map id of the destination task
}
func (this *TaskGraphStructure) PrintAdjacencyMatrix() {
rowSize, colSize := this.AdjacencyMatrix.Dims()
fmt.Printf(" ")
for c := 0; c < colSize; c++ {
fmt.Printf("%v ", this.Tasks[c].Name)
}
fmt.Printf("\n")
for r := 0; r < rowSize; r++ {
fmt.Printf("%v ", this.Tasks[r].Name)
for c := 0; c < colSize; c++ {
fmt.Printf("%v ", this.AdjacencyMatrix.At(r, c))
}
fmt.Printf("\n")
}
}
func (this *TaskGraphStructure) PrintDegreeMatrix() {
rowSize, colSize := this.DegreeMatrix.Dims()
for r := 0; r < rowSize; r++ {
for c := 0; c < colSize; c++ {
fmt.Printf("%v ", this.DegreeMatrix.At(r, c))
}
fmt.Printf("\n")
}
}
func NewTask() *Task {
return &Task{
-1,
ORPHAN,
-1,
"null",
"null",
"null",
"dummy",
make([]string, 1),
TASKQUEUED,
time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),
time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),
make(chan bool),
}
}
func NewTaskGraphStructure() *TaskGraphStructure {
return &TaskGraphStructure{
make(map[int]*Task, 0),
mat64.NewDense(0, 0, nil),
mat64.NewDense(0, 0, nil),
}
}
// Returns a combination of the current structure
// and the one passed as argument
func (this *TaskGraphStructure) AugmentTaskStructure(taskStructure *TaskGraphStructure) *TaskGraphStructure {
// merging adjacency matrix
initialRowLen, initialColLen := this.AdjacencyMatrix.Dims()
addedRowLen, addedColLen := taskStructure.AdjacencyMatrix.Dims()
this.AdjacencyMatrix = mat64.DenseCopyOf(this.AdjacencyMatrix.Grow(addedRowLen, addedColLen))
//a, b := this.AdjacencyMatrix.Dims()
for r := 0; r < initialRowLen+addedRowLen; r++ {
for c := 0; c < initialColLen+addedColLen; c++ {
switch {
case r < initialRowLen && c < initialColLen:
// If we are in the original matrix: do nothing
case r < initialRowLen && c > initialColLen:
// If outside, put some zero
this.AdjacencyMatrix.Set(r, c, float64(0))
case r > initialRowLen && c < initialColLen:
// If outside, put some zero
this.AdjacencyMatrix.Set(r, c, float64(0))
case r >= initialRowLen && c >= initialColLen:
// Add the new matrix
this.AdjacencyMatrix.Set(r, c, taskStructure.AdjacencyMatrix.At(r-initialRowLen, c-initialColLen))
}
}
}
// merging degree matrix
initialRowLen, initialColLen = this.DegreeMatrix.Dims()
addedRowLen, addedColLen = taskStructure.DegreeMatrix.Dims()
this.DegreeMatrix = mat64.DenseCopyOf(this.DegreeMatrix.Grow(addedRowLen, addedColLen))
for r := 0; r < initialRowLen+addedRowLen; r++ {
for c := 0; c < initialColLen+addedColLen; c++ {
switch {
case r < initialRowLen && c < initialColLen:
// If we are in the original matrix: do nothing
case r < initialRowLen && c > initialColLen:
// If outside, set zero
this.DegreeMatrix.Set(r, c, float64(0))
case r > initialRowLen && c < initialColLen:
// If outside, set zero
this.DegreeMatrix.Set(r, c, float64(0))
case r >= initialRowLen && c >= initialColLen:
// Add the new matrix
this.DegreeMatrix.Set(r, c, taskStructure.DegreeMatrix.At(r-initialRowLen, c-initialColLen))
}
}
}
actualSize := len(this.Tasks)
for i, task := range taskStructure.Tasks {
task.Id = actualSize + i
this.Tasks[actualSize+i] = task
}
return this
}
func (this *TaskGraphStructure) getTaskFromName(name string) []int {
indexA := make([]int, 1)
indexA[0] = -1
for _, task := range this.Tasks {
if task.Name == name {
if indexA[0] == -1 {
indexA = append(indexA[1:], task.Id)
} else {
indexA = append(indexA, task.Id)
}
}
}
return indexA
}
func colSum(matrix *mat64.Dense, colId int) float64 {
row, _ := matrix.Dims()
sum := float64(0)
for r := 0; r < row; r++ {
sum += matrix.At(r, colId)
}
return sum
}
func rowSum(matrix *mat64.Dense, rowId int) float64 {
_, col := matrix.Dims()
sum := float64(0)
for c := 0; c < col; c++ {
sum += matrix.At(rowId, c)
}
return sum
}
// the aim of this function is to find if a task has a subdefinition (aka an origin) and change it
// Example:
// imagine the graphs
// digraph bla {
// a -> b;
// b -> c;
// }
// digraph b {
// alpha -> gamma;
// }
// then alpha and beta will have "b" as Origin.
// therefore we should add a link in the AdjacencyMatix and in the DegreeMatrix
func (this *TaskGraphStructure) Relink() *TaskGraphStructure {
// IN this array we store the row,col on which we set 1
backup := make(map[string][]int, 0)
_, col := this.AdjacencyMatrix.Dims()
for _, task := range this.Tasks {
id := this.getTaskFromName(task.Origin)
if id[0] != -1 && task.OriginId == -1 {
task.OriginId = id[0]
}
if colSum(this.AdjacencyMatrix, task.Id) == 0 {
// TODO There should be only one task, otherwise display an error
if task.OriginId != -1 {
// Task is a meta task
this.Tasks[task.OriginId].Module = "meta"
this.AdjacencyMatrix.Set(task.OriginId, task.Id, float64(1))
backup[task.Origin] = append(backup[task.Origin], task.OriginId, task.Id)
}
}
if rowSum(this.AdjacencyMatrix, task.Id) == 0 {
// TODO There should be only one task, otherwise display an error
if task.OriginId != -1 {
for c := 0; c < col; c++ {
add := true
for counter := 0; counter < len(backup[task.Origin])-1; counter += 2 {
if backup[task.Origin][counter] == task.OriginId && backup[task.Origin][counter+1] == c {
add = false
}
}
if add == true && this.Tasks[c].Origin != task.Origin {
this.AdjacencyMatrix.Set(task.Id, c, this.AdjacencyMatrix.At(task.Id, c)+this.AdjacencyMatrix.At(task.OriginId, c))
}
}
}
}
}
//TODO: complete the degreematrix
return this
}
// This function print the dot file associated with the graph
func (this *TaskGraphStructure) PrintDot(w io.Writer) {
fmt.Fprintln(w, "digraph G {")
// Writing node definition
for _, task := range this.Tasks {
fmt.Fprintf(w, "\t\"%v\" [\n", task.Id)
fmt.Fprintf(w, "\t\tid = \"%v\"\n", task.Id)
if task.Module == "meta" {
fmt.Fprintln(w, "\t\tshape=diamond")
fmt.Fprintf(w, "\t\tlabel=\"%v\"", task.Name)
} else {
fmt.Fprintf(w, "\t\tlabel = \"<name>%v|<id>%v|<node>%v|<module>%v\"\n", task.Name, task.Id, task.Node, task.Module)
fmt.Fprintf(w, "\t\tshape = \"record\"\n")
}
fmt.Fprintf(w, "\t];\n")
}
row, col := this.AdjacencyMatrix.Dims()
for r := 0; r < row; r++ {
for c := 0; c < col; c++ {
if this.AdjacencyMatrix.At(r, c) == 1 {
fmt.Fprintf(w, "\t%v -> %v\n", this.Tasks[r].Id, this.Tasks[c].Id)
}
}
}
fmt.Fprintln(w, "}")
}
// Returns a tasks array of all tasks with the same origin
func (this *TaskGraphStructure) getTasksWithOrigin(origin string) []Task {
returnTasks := make([]Task, 0)
for _, task := range this.Tasks {
if task.Origin == origin {
returnTasks = append(returnTasks, *task)
}
}
return returnTasks
}
// Duplicate the task passed as argument, and returns the new task
func (this *TaskGraphStructure) instanciate(instance TaskInstance) []*Task {
returnTasks := make([]*Task, 0)
// First duplicate the tasks with same name
for _, task := range this.Tasks {
if task.Name == instance.Taskname {
for _, node := range instance.Hosts {
switch {
case task.Father == FATHER:
// Then duplicate
log.Printf("Duplicating %v on node %v", task.Name, node)
row, col := this.AdjacencyMatrix.Dims()
newId := row
newTask := NewTask()
newTask.Father = task.Id
newTask.OriginId = task.Id
newTask.Id = newId
newTask.Name = task.Name
newTask.Module = instance.Module
newTask.Origin = task.Origin
newTask.Node = node // Set the node to the new one
newTask.Args = instance.Args
this.Tasks[newId] = newTask
returnTasks = append(returnTasks, newTask)
this.AdjacencyMatrix = mat64.DenseCopyOf(this.AdjacencyMatrix.Grow(1, 1))
for r := 0; r < row; r++ {
for c := 0; c < col; c++ {
if this.Tasks[r].Origin != instance.Taskname {
this.AdjacencyMatrix.Set(r, newId, this.AdjacencyMatrix.At(r, task.Id))
}
if this.Tasks[c].Origin != instance.Taskname {
this.AdjacencyMatrix.Set(newId, c, this.AdjacencyMatrix.At(task.Id, c))
}
}
}
this = this.AugmentTaskStructure(this.duplicateSubtasks(newTask, node, instance))
case task.Father == ORPHAN:
// Do not duplicate, simply adapt
task.Node = node
task.Module = instance.Module
task.Args = instance.Args
this.adaptSubtask(task, node, instance)
task.Father = FATHER
}
// Then duplicate the tasks with same Father
}
}
}
return returnTasks
}
func (this *TaskGraphStructure) adaptSubtask(father *Task, node string, instance TaskInstance) {
_, col := this.AdjacencyMatrix.Dims()
for c := 0; c < col; c++ {
if this.AdjacencyMatrix.At(father.Id, c) == 1 && this.Tasks[c].Origin == father.Name {
this.Tasks[c].Father = father.Id
this.Tasks[c].Node = node
this.Tasks[c].Module = instance.Module
this.Tasks[c].Args = instance.Args
}
}
}
func (this *TaskGraphStructure) duplicateSubtasks(father *Task, node string, instance TaskInstance) *TaskGraphStructure {
taskStructure := NewTaskGraphStructure()
// Get all the tasks with origin father.Name
myindex := 0
// Define a new origin composed of the Id
for _, task := range this.Tasks {
if father.Father == task.OriginId {
// task match, create a new task with the same informations...
newTask := NewTask()
newTask.Id = myindex
newTask.Name = task.Name
newTask.Node = node
newTask.Father = task.Id
newTask.OriginId = father.Id
newTask.Module = instance.Module
newTask.Args = instance.Args
// ... Add it to the structure...
taskStructure.Tasks[myindex] = newTask
// ... Extract the matrix associated
taskStructure.AdjacencyMatrix = mat64.DenseCopyOf(taskStructure.AdjacencyMatrix.Grow(1, 1))
taskStructure.DegreeMatrix = mat64.DenseCopyOf(taskStructure.DegreeMatrix.Grow(1, 1))
myindex += 1
// And add it to the structure as well
}
}
row, col := taskStructure.AdjacencyMatrix.Dims()
for r := 0; r < row; r++ {
for c := 0; c < col; c++ {
taskStructure.AdjacencyMatrix.Set(r, c, this.AdjacencyMatrix.At(taskStructure.Tasks[r].Father, taskStructure.Tasks[c].Father))
}
}
return taskStructure
}
func (this *TaskGraphStructure) InstanciateTaskStructure(taskDefinition TaskDefinition) {
for _, instance := range taskDefinition {
//newTasks := this.duplicateTasks(instance.Taskname, host)
log.Printf("Instance %v %v %v", instance.Taskname, instance.Hosts, instance.Module)
if instance.Module != "" {
instance.Module = fmt.Sprintf("%v%v", "../examples/modules/", instance.Module)
} else {
instance.Module = "dummy"
}
this.instanciate(instance)
this.Relink()
}
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tcpip provides the interfaces and related types that users of the
// tcpip stack will use in order to create endpoints used to send and receive
// data over the network stack.
//
// The starting point is the creation and configuration of a stack. A stack can
// be created by calling the New() function of the tcpip/stack/stack package;
// configuring a stack involves creating NICs (via calls to Stack.CreateNIC()),
// adding network addresses (via calls to Stack.AddAddress()), and
// setting a route table (via a call to Stack.SetRouteTable()).
//
// Once a stack is configured, endpoints can be created by calling
// Stack.NewEndpoint(). Such endpoints can be used to send/receive data, connect
// to peers, listen for connections, accept connections, etc., depending on the
// transport protocol selected.
package tcpip
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/google/netstack/tcpip/buffer"
"github.com/google/netstack/waiter"
)
// Error represents an error in the netstack error space. Using a special type
// ensures that errors outside of this space are not accidentally introduced.
//
// Note: to support save / restore, it is important that all tcpip errors have
// distinct error messages.
type Error struct {
msg string
ignoreStats bool
}
// String implements fmt.Stringer.String.
func (e *Error) String() string {
return e.msg
}
// IgnoreStats indicates whether this error type should be included in failure
// counts in tcpip.Stats structs.
func (e *Error) IgnoreStats() bool {
return e.ignoreStats
}
// Errors that can be returned by the network stack.
var (
ErrUnknownProtocol = &Error{msg: "unknown protocol"}
ErrUnknownNICID = &Error{msg: "unknown nic id"}
ErrUnknownDevice = &Error{msg: "unknown device"}
ErrUnknownProtocolOption = &Error{msg: "unknown option for protocol"}
ErrDuplicateNICID = &Error{msg: "duplicate nic id"}
ErrDuplicateAddress = &Error{msg: "duplicate address"}
ErrNoRoute = &Error{msg: "no route"}
ErrBadLinkEndpoint = &Error{msg: "bad link layer endpoint"}
ErrAlreadyBound = &Error{msg: "endpoint already bound", ignoreStats: true}
ErrInvalidEndpointState = &Error{msg: "endpoint is in invalid state"}
ErrAlreadyConnecting = &Error{msg: "endpoint is already connecting", ignoreStats: true}
ErrAlreadyConnected = &Error{msg: "endpoint is already connected", ignoreStats: true}
ErrNoPortAvailable = &Error{msg: "no ports are available"}
ErrPortInUse = &Error{msg: "port is in use"}
ErrBadLocalAddress = &Error{msg: "bad local address"}
ErrClosedForSend = &Error{msg: "endpoint is closed for send"}
ErrClosedForReceive = &Error{msg: "endpoint is closed for receive"}
ErrWouldBlock = &Error{msg: "operation would block", ignoreStats: true}
ErrConnectionRefused = &Error{msg: "connection was refused"}
ErrTimeout = &Error{msg: "operation timed out"}
ErrAborted = &Error{msg: "operation aborted"}
ErrConnectStarted = &Error{msg: "connection attempt started", ignoreStats: true}
ErrDestinationRequired = &Error{msg: "destination address is required"}
ErrNotSupported = &Error{msg: "operation not supported"}
ErrQueueSizeNotSupported = &Error{msg: "queue size querying not supported"}
ErrNotConnected = &Error{msg: "endpoint not connected"}
ErrConnectionReset = &Error{msg: "connection reset by peer"}
ErrConnectionAborted = &Error{msg: "connection aborted"}
ErrNoSuchFile = &Error{msg: "no such file"}
ErrInvalidOptionValue = &Error{msg: "invalid option value specified"}
ErrNoLinkAddress = &Error{msg: "no remote link address"}
ErrBadAddress = &Error{msg: "bad address"}
ErrNetworkUnreachable = &Error{msg: "network is unreachable"}
ErrMessageTooLong = &Error{msg: "message too long"}
ErrNoBufferSpace = &Error{msg: "no buffer space available"}
ErrBroadcastDisabled = &Error{msg: "broadcast socket option disabled"}
ErrNotPermitted = &Error{msg: "operation not permitted"}
)
// Errors related to Subnet
var (
errSubnetLengthMismatch = errors.New("subnet length of address and mask differ")
errSubnetAddressMasked = errors.New("subnet address has bits set outside the mask")
)
// ErrSaveRejection indicates a failed save due to unsupported networking state.
// This type of errors is only used for save logic.
type ErrSaveRejection struct {
Err error
}
// Error returns a sensible description of the save rejection error.
func (e ErrSaveRejection) Error() string {
return "save rejected due to unsupported networking state: " + e.Err.Error()
}
// A Clock provides the current time.
//
// Times returned by a Clock should always be used for application-visible
// time, but never for netstack internal timekeeping.
type Clock interface {
// NowNanoseconds returns the current real time as a number of
// nanoseconds since the Unix epoch.
NowNanoseconds() int64
// NowMonotonic returns a monotonic time value.
NowMonotonic() int64
}
// Address is a byte slice cast as a string that represents the address of a
// network node. Or, in the case of unix endpoints, it may represent a path.
type Address string
// AddressMask is a bitmask for an address.
type AddressMask string
// String implements Stringer.
func (a AddressMask) String() string {
return Address(a).String()
}
// Subnet is a subnet defined by its address and mask.
type Subnet struct {
address Address
mask AddressMask
}
// NewSubnet creates a new Subnet, checking that the address and mask are the same length.
func NewSubnet(a Address, m AddressMask) (Subnet, error) {
if len(a) != len(m) {
return Subnet{}, errSubnetLengthMismatch
}
for i := 0; i < len(a); i++ {
if a[i]&^m[i] != 0 {
return Subnet{}, errSubnetAddressMasked
}
}
return Subnet{a, m}, nil
}
// Contains returns true iff the address is of the same length and matches the
// subnet address and mask.
func (s *Subnet) Contains(a Address) bool {
if len(a) != len(s.address) {
return false
}
for i := 0; i < len(a); i++ {
if a[i]&s.mask[i] != s.address[i] {
return false
}
}
return true
}
// ID returns the subnet ID.
func (s *Subnet) ID() Address {
return s.address
}
// Bits returns the number of ones (network bits) and zeros (host bits) in the
// subnet mask.
func (s *Subnet) Bits() (ones int, zeros int) {
for _, b := range []byte(s.mask) {
for i := uint(0); i < 8; i++ {
if b&(1<<i) == 0 {
zeros++
} else {
ones++
}
}
}
return
}
// Prefix returns the number of bits before the first host bit.
func (s *Subnet) Prefix() int {
for i, b := range []byte(s.mask) {
for j := 7; j >= 0; j-- {
if b&(1<<uint(j)) == 0 {
return i*8 + 7 - j
}
}
}
return len(s.mask) * 8
}
// Mask returns the subnet mask.
func (s *Subnet) Mask() AddressMask {
return s.mask
}
// NICID is a number that uniquely identifies a NIC.
type NICID int32
// ShutdownFlags represents flags that can be passed to the Shutdown() method
// of the Endpoint interface.
type ShutdownFlags int
// Values of the flags that can be passed to the Shutdown() method. They can
// be OR'ed together.
const (
ShutdownRead ShutdownFlags = 1 << iota
ShutdownWrite
)
// FullAddress represents a full transport node address, as required by the
// Connect() and Bind() methods.
//
// +stateify savable
type FullAddress struct {
// NIC is the ID of the NIC this address refers to.
//
// This may not be used by all endpoint types.
NIC NICID
// Addr is the network address.
Addr Address
// Port is the transport port.
//
// This may not be used by all endpoint types.
Port uint16
}
// Payload provides an interface around data that is being sent to an endpoint.
// This allows the endpoint to request the amount of data it needs based on
// internal buffers without exposing them. 'p.Get(p.Size())' reads all the data.
type Payload interface {
// Get returns a slice containing exactly 'min(size, p.Size())' bytes.
Get(size int) ([]byte, *Error)
// Size returns the payload size.
Size() int
}
// SlicePayload implements Payload on top of slices for convenience.
type SlicePayload []byte
// Get implements Payload.
func (s SlicePayload) Get(size int) ([]byte, *Error) {
if size > s.Size() {
size = s.Size()
}
return s[:size], nil
}
// Size implements Payload.
func (s SlicePayload) Size() int {
return len(s)
}
// A ControlMessages contains socket control messages for IP sockets.
//
// +stateify savable
type ControlMessages struct {
// HasTimestamp indicates whether Timestamp is valid/set.
HasTimestamp bool
// Timestamp is the time (in ns) that the last packed used to create
// the read data was received.
Timestamp int64
}
// Endpoint is the interface implemented by transport protocols (e.g., tcp, udp)
// that exposes functionality like read, write, connect, etc. to users of the
// networking stack.
type Endpoint interface {
// Close puts the endpoint in a closed state and frees all resources
// associated with it.
Close()
// Read reads data from the endpoint and optionally returns the sender.
//
// This method does not block if there is no data pending. It will also
// either return an error or data, never both.
Read(*FullAddress) (buffer.View, ControlMessages, *Error)
// Write writes data to the endpoint's peer. This method does not block if
// the data cannot be written.
//
// Unlike io.Writer.Write, Endpoint.Write transfers ownership of any bytes
// successfully written to the Endpoint. That is, if a call to
// Write(SlicePayload{data}) returns (n, err), it may retain data[:n], and
// the caller should not use data[:n] after Write returns.
//
// Note that unlike io.Writer.Write, it is not an error for Write to
// perform a partial write (if n > 0, no error may be returned). Only
// stream (TCP) Endpoints may return partial writes, and even then only
// in the case where writing additional data would block. Other Endpoints
// will either write the entire message or return an error.
//
// For UDP and Ping sockets if address resolution is required,
// ErrNoLinkAddress and a notification channel is returned for the caller to
// block. Channel is closed once address resolution is complete (success or
// not). The channel is only non-nil in this case.
Write(Payload, WriteOptions) (uintptr, <-chan struct{}, *Error)
// Peek reads data without consuming it from the endpoint.
//
// This method does not block if there is no data pending.
Peek([][]byte) (uintptr, ControlMessages, *Error)
// Connect connects the endpoint to its peer. Specifying a NIC is
// optional.
//
// There are three classes of return values:
// nil -- the attempt to connect succeeded.
// ErrConnectStarted/ErrAlreadyConnecting -- the connect attempt started
// but hasn't completed yet. In this case, the caller must call Connect
// or GetSockOpt(ErrorOption) when the endpoint becomes writable to
// get the actual result. The first call to Connect after the socket has
// connected returns nil. Calling connect again results in ErrAlreadyConnected.
// Anything else -- the attempt to connect failed.
Connect(address FullAddress) *Error
// Shutdown closes the read and/or write end of the endpoint connection
// to its peer.
Shutdown(flags ShutdownFlags) *Error
// Listen puts the endpoint in "listen" mode, which allows it to accept
// new connections.
Listen(backlog int) *Error
// Accept returns a new endpoint if a peer has established a connection
// to an endpoint previously set to listen mode. This method does not
// block if no new connections are available.
//
// The returned Queue is the wait queue for the newly created endpoint.
Accept() (Endpoint, *waiter.Queue, *Error)
// Bind binds the endpoint to a specific local address and port.
// Specifying a NIC is optional.
Bind(address FullAddress) *Error
// GetLocalAddress returns the address to which the endpoint is bound.
GetLocalAddress() (FullAddress, *Error)
// GetRemoteAddress returns the address to which the endpoint is
// connected.
GetRemoteAddress() (FullAddress, *Error)
// Readiness returns the current readiness of the endpoint. For example,
// if waiter.EventIn is set, the endpoint is immediately readable.
Readiness(mask waiter.EventMask) waiter.EventMask
// SetSockOpt sets a socket option. opt should be one of the *Option types.
SetSockOpt(opt interface{}) *Error
// GetSockOpt gets a socket option. opt should be a pointer to one of the
// *Option types.
GetSockOpt(opt interface{}) *Error
}
// WriteOptions contains options for Endpoint.Write.
type WriteOptions struct {
// If To is not nil, write to the given address instead of the endpoint's
// peer.
To *FullAddress
// More has the same semantics as Linux's MSG_MORE.
More bool
// EndOfRecord has the same semantics as Linux's MSG_EOR.
EndOfRecord bool
}
// ErrorOption is used in GetSockOpt to specify that the last error reported by
// the endpoint should be cleared and returned.
type ErrorOption struct{}
// SendBufferSizeOption is used by SetSockOpt/GetSockOpt to specify the send
// buffer size option.
type SendBufferSizeOption int
// ReceiveBufferSizeOption is used by SetSockOpt/GetSockOpt to specify the
// receive buffer size option.
type ReceiveBufferSizeOption int
// SendQueueSizeOption is used in GetSockOpt to specify that the number of
// unread bytes in the output buffer should be returned.
type SendQueueSizeOption int
// ReceiveQueueSizeOption is used in GetSockOpt to specify that the number of
// unread bytes in the input buffer should be returned.
type ReceiveQueueSizeOption int
// V6OnlyOption is used by SetSockOpt/GetSockOpt to specify whether an IPv6
// socket is to be restricted to sending and receiving IPv6 packets only.
type V6OnlyOption int
// DelayOption is used by SetSockOpt/GetSockOpt to specify if data should be
// sent out immediately by the transport protocol. For TCP, it determines if the
// Nagle algorithm is on or off.
type DelayOption int
// CorkOption is used by SetSockOpt/GetSockOpt to specify if data should be
// held until segments are full by the TCP transport protocol.
type CorkOption int
// ReuseAddressOption is used by SetSockOpt/GetSockOpt to specify whether Bind()
// should allow reuse of local address.
type ReuseAddressOption int
// ReusePortOption is used by SetSockOpt/GetSockOpt to permit multiple sockets
// to be bound to an identical socket address.
type ReusePortOption int
// QuickAckOption is stubbed out in SetSockOpt/GetSockOpt.
type QuickAckOption int
// PasscredOption is used by SetSockOpt/GetSockOpt to specify whether
// SCM_CREDENTIALS socket control messages are enabled.
//
// Only supported on Unix sockets.
type PasscredOption int
// TCPInfoOption is used by GetSockOpt to expose TCP statistics.
//
// TODO(b/64800844): Add and populate stat fields.
type TCPInfoOption struct {
RTT time.Duration
RTTVar time.Duration
}
// KeepaliveEnabledOption is used by SetSockOpt/GetSockOpt to specify whether
// TCP keepalive is enabled for this socket.
type KeepaliveEnabledOption int
// KeepaliveIdleOption is used by SetSockOpt/GetSockOpt to specify the time a
// connection must remain idle before the first TCP keepalive packet is sent.
// Once this time is reached, KeepaliveIntervalOption is used instead.
type KeepaliveIdleOption time.Duration
// KeepaliveIntervalOption is used by SetSockOpt/GetSockOpt to specify the
// interval between sending TCP keepalive packets.
type KeepaliveIntervalOption time.Duration
// KeepaliveCountOption is used by SetSockOpt/GetSockOpt to specify the number
// of un-ACKed TCP keepalives that will be sent before the connection is
// closed.
type KeepaliveCountOption int
// MulticastTTLOption is used by SetSockOpt/GetSockOpt to control the default
// TTL value for multicast messages. The default is 1.
type MulticastTTLOption uint8
// MulticastInterfaceOption is used by SetSockOpt/GetSockOpt to specify a
// default interface for multicast.
type MulticastInterfaceOption struct {
NIC NICID
InterfaceAddr Address
}
// MulticastLoopOption is used by SetSockOpt/GetSockOpt to specify whether
// multicast packets sent over a non-loopback interface will be looped back.
type MulticastLoopOption bool
// MembershipOption is used by SetSockOpt/GetSockOpt as an argument to
// AddMembershipOption and RemoveMembershipOption.
type MembershipOption struct {
NIC NICID
InterfaceAddr Address
MulticastAddr Address
}
// AddMembershipOption is used by SetSockOpt/GetSockOpt to join a multicast
// group identified by the given multicast address, on the interface matching
// the given interface address.
type AddMembershipOption MembershipOption
// RemoveMembershipOption is used by SetSockOpt/GetSockOpt to leave a multicast
// group identified by the given multicast address, on the interface matching
// the given interface address.
type RemoveMembershipOption MembershipOption
// OutOfBandInlineOption is used by SetSockOpt/GetSockOpt to specify whether
// TCP out-of-band data is delivered along with the normal in-band data.
type OutOfBandInlineOption int
// BroadcastOption is used by SetSockOpt/GetSockOpt to specify whether
// datagram sockets are allowed to send packets to a broadcast address.
type BroadcastOption int
// Route is a row in the routing table. It specifies through which NIC (and
// gateway) sets of packets should be routed. A row is considered viable if the
// masked target address matches the destination adddress in the row.
type Route struct {
// Destination is the address that must be matched against the masked
// target address to check if this row is viable.
Destination Address
// Mask specifies which bits of the Destination and the target address
// must match for this row to be viable.
Mask AddressMask
// Gateway is the gateway to be used if this row is viable.
Gateway Address
// NIC is the id of the nic to be used if this row is viable.
NIC NICID
}
// Match determines if r is viable for the given destination address.
func (r *Route) Match(addr Address) bool {
if len(addr) != len(r.Destination) {
return false
}
// Using header.Ipv4Broadcast would introduce an import cycle, so
// we'll use a literal instead.
if addr == "\xff\xff\xff\xff" {
return true
}
for i := 0; i < len(r.Destination); i++ {
if (addr[i] & r.Mask[i]) != r.Destination[i] {
return false
}
}
return true
}
// LinkEndpointID represents a data link layer endpoint.
type LinkEndpointID uint64
// TransportProtocolNumber is the number of a transport protocol.
type TransportProtocolNumber uint32
// NetworkProtocolNumber is the number of a network protocol.
type NetworkProtocolNumber uint32
// A StatCounter keeps track of a statistic.
type StatCounter struct {
count uint64
}
// Increment adds one to the counter.
func (s *StatCounter) Increment() {
s.IncrementBy(1)
}
// Value returns the current value of the counter.
func (s *StatCounter) Value() uint64 {
return atomic.LoadUint64(&s.count)
}
// IncrementBy increments the counter by v.
func (s *StatCounter) IncrementBy(v uint64) {
atomic.AddUint64(&s.count, v)
}
func (s *StatCounter) String() string {
return strconv.FormatUint(s.Value(), 10)
}
// ICMPv4PacketStats enumerates counts for all ICMPv4 packet types.
type ICMPv4PacketStats struct {
// Echo is the total number of ICMPv4 echo packets counted.
Echo *StatCounter
// EchoReply is the total number of ICMPv4 echo reply packets counted.
EchoReply *StatCounter
// DstUnreachable is the total number of ICMPv4 destination unreachable
// packets counted.
DstUnreachable *StatCounter
// SrcQuench is the total number of ICMPv4 source quench packets
// counted.
SrcQuench *StatCounter
// Redirect is the total number of ICMPv4 redirect packets counted.
Redirect *StatCounter
// TimeExceeded is the total number of ICMPv4 time exceeded packets
// counted.
TimeExceeded *StatCounter
// ParamProblem is the total number of ICMPv4 parameter problem packets
// counted.
ParamProblem *StatCounter
// Timestamp is the total number of ICMPv4 timestamp packets counted.
Timestamp *StatCounter
// TimestampReply is the total number of ICMPv4 timestamp reply packets
// counted.
TimestampReply *StatCounter
// InfoRequest is the total number of ICMPv4 information request
// packets counted.
InfoRequest *StatCounter
// InfoReply is the total number of ICMPv4 information reply packets
// counted.
InfoReply *StatCounter
}
// ICMPv6PacketStats enumerates counts for all ICMPv6 packet types.
type ICMPv6PacketStats struct {
// EchoRequest is the total number of ICMPv6 echo request packets
// counted.
EchoRequest *StatCounter
// EchoReply is the total number of ICMPv6 echo reply packets counted.
EchoReply *StatCounter
// DstUnreachable is the total number of ICMPv6 destination unreachable
// packets counted.
DstUnreachable *StatCounter
// PacketTooBig is the total number of ICMPv6 packet too big packets
// counted.
PacketTooBig *StatCounter
// TimeExceeded is the total number of ICMPv6 time exceeded packets
// counted.
TimeExceeded *StatCounter
// ParamProblem is the total number of ICMPv6 parameter problem packets
// counted.
ParamProblem *StatCounter
// RouterSolicit is the total number of ICMPv6 router solicit packets
// counted.
RouterSolicit *StatCounter
// RouterAdvert is the total number of ICMPv6 router advert packets
// counted.
RouterAdvert *StatCounter
// NeighborSolicit is the total number of ICMPv6 neighbor solicit
// packets counted.
NeighborSolicit *StatCounter
// NeighborAdvert is the total number of ICMPv6 neighbor advert packets
// counted.
NeighborAdvert *StatCounter
// RedirectMsg is the total number of ICMPv6 redirect message packets
// counted.
RedirectMsg *StatCounter
}
// ICMPv4SentPacketStats collects outbound ICMPv4-specific stats.
type ICMPv4SentPacketStats struct {
ICMPv4PacketStats
// Dropped is the total number of ICMPv4 packets dropped due to link
// layer errors.
Dropped *StatCounter
}
// ICMPv4ReceivedPacketStats collects inbound ICMPv4-specific stats.
type ICMPv4ReceivedPacketStats struct {
ICMPv4PacketStats
// Invalid is the total number of ICMPv4 packets received that the
// transport layer could not parse.
Invalid *StatCounter
}
// ICMPv6SentPacketStats collects outbound ICMPv6-specific stats.
type ICMPv6SentPacketStats struct {
ICMPv6PacketStats
// Dropped is the total number of ICMPv6 packets dropped due to link
// layer errors.
Dropped *StatCounter
}
// ICMPv6ReceivedPacketStats collects inbound ICMPv6-specific stats.
type ICMPv6ReceivedPacketStats struct {
ICMPv6PacketStats
// Invalid is the total number of ICMPv6 packets received that the
// transport layer could not parse.
Invalid *StatCounter
}
// ICMPStats collects ICMP-specific stats (both v4 and v6).
type ICMPStats struct {
// ICMPv4SentPacketStats contains counts of sent packets by ICMPv4 packet type
// and a single count of packets which failed to write to the link
// layer.
V4PacketsSent ICMPv4SentPacketStats
// ICMPv4ReceivedPacketStats contains counts of received packets by ICMPv4
// packet type and a single count of invalid packets received.
V4PacketsReceived ICMPv4ReceivedPacketStats
// ICMPv6SentPacketStats contains counts of sent packets by ICMPv6 packet type
// and a single count of packets which failed to write to the link
// layer.
V6PacketsSent ICMPv6SentPacketStats
// ICMPv6ReceivedPacketStats contains counts of received packets by ICMPv6
// packet type and a single count of invalid packets received.
V6PacketsReceived ICMPv6ReceivedPacketStats
}
// IPStats collects IP-specific stats (both v4 and v6).
type IPStats struct {
// PacketsReceived is the total number of IP packets received from the
// link layer in nic.DeliverNetworkPacket.
PacketsReceived *StatCounter
// InvalidAddressesReceived is the total number of IP packets received
// with an unknown or invalid destination address.
InvalidAddressesReceived *StatCounter
// PacketsDelivered is the total number of incoming IP packets that
// are successfully delivered to the transport layer via HandlePacket.
PacketsDelivered *StatCounter
// PacketsSent is the total number of IP packets sent via WritePacket.
PacketsSent *StatCounter
// OutgoingPacketErrors is the total number of IP packets which failed
// to write to a link-layer endpoint.
OutgoingPacketErrors *StatCounter
}
// TCPStats collects TCP-specific stats.
type TCPStats struct {
// ActiveConnectionOpenings is the number of connections opened
// successfully via Connect.
ActiveConnectionOpenings *StatCounter
// PassiveConnectionOpenings is the number of connections opened
// successfully via Listen.
PassiveConnectionOpenings *StatCounter
// FailedConnectionAttempts is the number of calls to Connect or Listen
// (active and passive openings, respectively) that end in an error.
FailedConnectionAttempts *StatCounter
// ValidSegmentsReceived is the number of TCP segments received that
// the transport layer successfully parsed.
ValidSegmentsReceived *StatCounter
// InvalidSegmentsReceived is the number of TCP segments received that
// the transport layer could not parse.
InvalidSegmentsReceived *StatCounter
// SegmentsSent is the number of TCP segments sent.
SegmentsSent *StatCounter
// ResetsSent is the number of TCP resets sent.
ResetsSent *StatCounter
// ResetsReceived is the number of TCP resets received.
ResetsReceived *StatCounter
// Retransmits is the number of TCP segments retransmitted.
Retransmits *StatCounter
// FastRecovery is the number of times Fast Recovery was used to
// recover from packet loss.
FastRecovery *StatCounter
// SACKRecovery is the number of times SACK Recovery was used to
// recover from packet loss.
SACKRecovery *StatCounter
// SlowStartRetransmits is the number of segments retransmitted in slow
// start.
SlowStartRetransmits *StatCounter
// FastRetransmit is the number of segments retransmitted in fast
// recovery.
FastRetransmit *StatCounter
// Timeouts is the number of times the RTO expired.
Timeouts *StatCounter
// ChecksumErrors is the number of segments dropped due to bad checksums.
ChecksumErrors *StatCounter
}
// UDPStats collects UDP-specific stats.
type UDPStats struct {
// PacketsReceived is the number of UDP datagrams received via
// HandlePacket.
PacketsReceived *StatCounter
// UnknownPortErrors is the number of incoming UDP datagrams dropped
// because they did not have a known destination port.
UnknownPortErrors *StatCounter
// ReceiveBufferErrors is the number of incoming UDP datagrams dropped
// due to the receiving buffer being in an invalid state.
ReceiveBufferErrors *StatCounter
// MalformedPacketsReceived is the number of incoming UDP datagrams
// dropped due to the UDP header being in a malformed state.
MalformedPacketsReceived *StatCounter
// PacketsSent is the number of UDP datagrams sent via sendUDP.
PacketsSent *StatCounter
}
// Stats holds statistics about the networking stack.
//
// All fields are optional.
type Stats struct {
// UnknownProtocolRcvdPackets is the number of packets received by the
// stack that were for an unknown or unsupported protocol.
UnknownProtocolRcvdPackets *StatCounter
// MalformedRcvPackets is the number of packets received by the stack
// that were deemed malformed.
MalformedRcvdPackets *StatCounter
// DroppedPackets is the number of packets dropped due to full queues.
DroppedPackets *StatCounter
// ICMP breaks out ICMP-specific stats (both v4 and v6).
ICMP ICMPStats
// IP breaks out IP-specific stats (both v4 and v6).
IP IPStats
// TCP breaks out TCP-specific stats.
TCP TCPStats
// UDP breaks out UDP-specific stats.
UDP UDPStats
}
func fillIn(v reflect.Value) {
for i := 0; i < v.NumField(); i++ {
v := v.Field(i)
switch v.Kind() {
case reflect.Ptr:
if s := v.Addr().Interface().(**StatCounter); *s == nil {
*s = &StatCounter{}
}
case reflect.Struct:
fillIn(v)
default:
panic(fmt.Sprintf("unexpected type %s", v.Type()))
}
}
}
// FillIn returns a copy of s with nil fields initialized to new StatCounters.
func (s Stats) FillIn() Stats {
fillIn(reflect.ValueOf(&s).Elem())
return s
}
// String implements the fmt.Stringer interface.
func (a Address) String() string {
switch len(a) {
case 4:
return fmt.Sprintf("%d.%d.%d.%d", int(a[0]), int(a[1]), int(a[2]), int(a[3]))
case 16:
// Find the longest subsequence of hexadecimal zeros.
start, end := -1, -1
for i := 0; i < len(a); i += 2 {
j := i
for j < len(a) && a[j] == 0 && a[j+1] == 0 {
j += 2
}
if j > i+2 && j-i > end-start {
start, end = i, j
}
}
var b strings.Builder
for i := 0; i < len(a); i += 2 {
if i == start {
b.WriteString("::")
i = end
if end >= len(a) {
break
}
} else if i > 0 {
b.WriteByte(':')
}
v := uint16(a[i+0])<<8 | uint16(a[i+1])
if v == 0 {
b.WriteByte('0')
} else {
const digits = "0123456789abcdef"
for i := uint(3); i < 4; i-- {
if v := v >> (i * 4); v != 0 {
b.WriteByte(digits[v&0xf])
}
}
}
}
return b.String()
default:
return fmt.Sprintf("%x", []byte(a))
}
}
// To4 converts the IPv4 address to a 4-byte representation.
// If the address is not an IPv4 address, To4 returns "".
func (a Address) To4() Address {
const (
ipv4len = 4
ipv6len = 16
)
if len(a) == ipv4len {
return a
}
if len(a) == ipv6len &&
isZeros(a[0:10]) &&
a[10] == 0xff &&
a[11] == 0xff {
return a[12:16]
}
return ""
}
// isZeros reports whether a is all zeros.
func isZeros(a Address) bool {
for i := 0; i < len(a); i++ {
if a[i] != 0 {
return false
}
}
return true
}
// LinkAddress is a byte slice cast as a string that represents a link address.
// It is typically a 6-byte MAC address.
type LinkAddress string
// String implements the fmt.Stringer interface.
func (a LinkAddress) String() string {
switch len(a) {
case 6:
return fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x", a[0], a[1], a[2], a[3], a[4], a[5])
default:
return fmt.Sprintf("%x", []byte(a))
}
}
// ParseMACAddress parses an IEEE 802 address.
//
// It must be in the format aa:bb:cc:dd:ee:ff or aa-bb-cc-dd-ee-ff.
func ParseMACAddress(s string) (LinkAddress, error) {
parts := strings.FieldsFunc(s, func(c rune) bool {
return c == ':' || c == '-'
})
if len(parts) != 6 {
return "", fmt.Errorf("inconsistent parts: %s", s)
}
addr := make([]byte, 0, len(parts))
for _, part := range parts {
u, err := strconv.ParseUint(part, 16, 8)
if err != nil {
return "", fmt.Errorf("invalid hex digits: %s", s)
}
addr = append(addr, byte(u))
}
return LinkAddress(addr), nil
}
// ProtocolAddress is an address and the network protocol it is associated
// with.
type ProtocolAddress struct {
// Protocol is the protocol of the address.
Protocol NetworkProtocolNumber
// Address is a network address.
Address Address
}
// danglingEndpointsMu protects access to danglingEndpoints.
var danglingEndpointsMu sync.Mutex
// danglingEndpoints tracks all dangling endpoints no longer owned by the app.
var danglingEndpoints = make(map[Endpoint]struct{})
// GetDanglingEndpoints returns all dangling endpoints.
func GetDanglingEndpoints() []Endpoint {
es := make([]Endpoint, 0, len(danglingEndpoints))
danglingEndpointsMu.Lock()
for e := range danglingEndpoints {
es = append(es, e)
}
danglingEndpointsMu.Unlock()
return es
}
// AddDanglingEndpoint adds a dangling endpoint.
func AddDanglingEndpoint(e Endpoint) {
danglingEndpointsMu.Lock()
danglingEndpoints[e] = struct{}{}
danglingEndpointsMu.Unlock()
}
// DeleteDanglingEndpoint removes a dangling endpoint.
func DeleteDanglingEndpoint(e Endpoint) {
danglingEndpointsMu.Lock()
delete(danglingEndpoints, e)
danglingEndpointsMu.Unlock()
}
// AsyncLoading is the global barrier for asynchronous endpoint loading
// activities.
var AsyncLoading sync.WaitGroup
Update tcpip Clock description.
The tcpip.Clock comment stated that times provided by it should not be used for
netstack internal timekeeping. This comment was from before the interface
supported monotonic times. The monotonic times that it provides are now be the
preferred time source for netstack internal timekeeping.
PiperOrigin-RevId: 246618772
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tcpip provides the interfaces and related types that users of the
// tcpip stack will use in order to create endpoints used to send and receive
// data over the network stack.
//
// The starting point is the creation and configuration of a stack. A stack can
// be created by calling the New() function of the tcpip/stack/stack package;
// configuring a stack involves creating NICs (via calls to Stack.CreateNIC()),
// adding network addresses (via calls to Stack.AddAddress()), and
// setting a route table (via a call to Stack.SetRouteTable()).
//
// Once a stack is configured, endpoints can be created by calling
// Stack.NewEndpoint(). Such endpoints can be used to send/receive data, connect
// to peers, listen for connections, accept connections, etc., depending on the
// transport protocol selected.
package tcpip
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/google/netstack/tcpip/buffer"
"github.com/google/netstack/waiter"
)
// Error represents an error in the netstack error space. Using a special type
// ensures that errors outside of this space are not accidentally introduced.
//
// Note: to support save / restore, it is important that all tcpip errors have
// distinct error messages.
type Error struct {
msg string
ignoreStats bool
}
// String implements fmt.Stringer.String.
func (e *Error) String() string {
return e.msg
}
// IgnoreStats indicates whether this error type should be included in failure
// counts in tcpip.Stats structs.
func (e *Error) IgnoreStats() bool {
return e.ignoreStats
}
// Errors that can be returned by the network stack.
var (
ErrUnknownProtocol = &Error{msg: "unknown protocol"}
ErrUnknownNICID = &Error{msg: "unknown nic id"}
ErrUnknownDevice = &Error{msg: "unknown device"}
ErrUnknownProtocolOption = &Error{msg: "unknown option for protocol"}
ErrDuplicateNICID = &Error{msg: "duplicate nic id"}
ErrDuplicateAddress = &Error{msg: "duplicate address"}
ErrNoRoute = &Error{msg: "no route"}
ErrBadLinkEndpoint = &Error{msg: "bad link layer endpoint"}
ErrAlreadyBound = &Error{msg: "endpoint already bound", ignoreStats: true}
ErrInvalidEndpointState = &Error{msg: "endpoint is in invalid state"}
ErrAlreadyConnecting = &Error{msg: "endpoint is already connecting", ignoreStats: true}
ErrAlreadyConnected = &Error{msg: "endpoint is already connected", ignoreStats: true}
ErrNoPortAvailable = &Error{msg: "no ports are available"}
ErrPortInUse = &Error{msg: "port is in use"}
ErrBadLocalAddress = &Error{msg: "bad local address"}
ErrClosedForSend = &Error{msg: "endpoint is closed for send"}
ErrClosedForReceive = &Error{msg: "endpoint is closed for receive"}
ErrWouldBlock = &Error{msg: "operation would block", ignoreStats: true}
ErrConnectionRefused = &Error{msg: "connection was refused"}
ErrTimeout = &Error{msg: "operation timed out"}
ErrAborted = &Error{msg: "operation aborted"}
ErrConnectStarted = &Error{msg: "connection attempt started", ignoreStats: true}
ErrDestinationRequired = &Error{msg: "destination address is required"}
ErrNotSupported = &Error{msg: "operation not supported"}
ErrQueueSizeNotSupported = &Error{msg: "queue size querying not supported"}
ErrNotConnected = &Error{msg: "endpoint not connected"}
ErrConnectionReset = &Error{msg: "connection reset by peer"}
ErrConnectionAborted = &Error{msg: "connection aborted"}
ErrNoSuchFile = &Error{msg: "no such file"}
ErrInvalidOptionValue = &Error{msg: "invalid option value specified"}
ErrNoLinkAddress = &Error{msg: "no remote link address"}
ErrBadAddress = &Error{msg: "bad address"}
ErrNetworkUnreachable = &Error{msg: "network is unreachable"}
ErrMessageTooLong = &Error{msg: "message too long"}
ErrNoBufferSpace = &Error{msg: "no buffer space available"}
ErrBroadcastDisabled = &Error{msg: "broadcast socket option disabled"}
ErrNotPermitted = &Error{msg: "operation not permitted"}
)
// Errors related to Subnet
var (
errSubnetLengthMismatch = errors.New("subnet length of address and mask differ")
errSubnetAddressMasked = errors.New("subnet address has bits set outside the mask")
)
// ErrSaveRejection indicates a failed save due to unsupported networking state.
// This type of errors is only used for save logic.
type ErrSaveRejection struct {
Err error
}
// Error returns a sensible description of the save rejection error.
func (e ErrSaveRejection) Error() string {
return "save rejected due to unsupported networking state: " + e.Err.Error()
}
// A Clock provides the current time.
//
// Times returned by a Clock should always be used for application-visible
// time. Only monotonic times should be used for netstack internal timekeeping.
type Clock interface {
// NowNanoseconds returns the current real time as a number of
// nanoseconds since the Unix epoch.
NowNanoseconds() int64
// NowMonotonic returns a monotonic time value.
NowMonotonic() int64
}
// Address is a byte slice cast as a string that represents the address of a
// network node. Or, in the case of unix endpoints, it may represent a path.
type Address string
// AddressMask is a bitmask for an address.
type AddressMask string
// String implements Stringer.
func (a AddressMask) String() string {
return Address(a).String()
}
// Subnet is a subnet defined by its address and mask.
type Subnet struct {
address Address
mask AddressMask
}
// NewSubnet creates a new Subnet, checking that the address and mask are the same length.
func NewSubnet(a Address, m AddressMask) (Subnet, error) {
if len(a) != len(m) {
return Subnet{}, errSubnetLengthMismatch
}
for i := 0; i < len(a); i++ {
if a[i]&^m[i] != 0 {
return Subnet{}, errSubnetAddressMasked
}
}
return Subnet{a, m}, nil
}
// Contains returns true iff the address is of the same length and matches the
// subnet address and mask.
func (s *Subnet) Contains(a Address) bool {
if len(a) != len(s.address) {
return false
}
for i := 0; i < len(a); i++ {
if a[i]&s.mask[i] != s.address[i] {
return false
}
}
return true
}
// ID returns the subnet ID.
func (s *Subnet) ID() Address {
return s.address
}
// Bits returns the number of ones (network bits) and zeros (host bits) in the
// subnet mask.
func (s *Subnet) Bits() (ones int, zeros int) {
for _, b := range []byte(s.mask) {
for i := uint(0); i < 8; i++ {
if b&(1<<i) == 0 {
zeros++
} else {
ones++
}
}
}
return
}
// Prefix returns the number of bits before the first host bit.
func (s *Subnet) Prefix() int {
for i, b := range []byte(s.mask) {
for j := 7; j >= 0; j-- {
if b&(1<<uint(j)) == 0 {
return i*8 + 7 - j
}
}
}
return len(s.mask) * 8
}
// Mask returns the subnet mask.
func (s *Subnet) Mask() AddressMask {
return s.mask
}
// NICID is a number that uniquely identifies a NIC.
type NICID int32
// ShutdownFlags represents flags that can be passed to the Shutdown() method
// of the Endpoint interface.
type ShutdownFlags int
// Values of the flags that can be passed to the Shutdown() method. They can
// be OR'ed together.
const (
ShutdownRead ShutdownFlags = 1 << iota
ShutdownWrite
)
// FullAddress represents a full transport node address, as required by the
// Connect() and Bind() methods.
//
// +stateify savable
type FullAddress struct {
// NIC is the ID of the NIC this address refers to.
//
// This may not be used by all endpoint types.
NIC NICID
// Addr is the network address.
Addr Address
// Port is the transport port.
//
// This may not be used by all endpoint types.
Port uint16
}
// Payload provides an interface around data that is being sent to an endpoint.
// This allows the endpoint to request the amount of data it needs based on
// internal buffers without exposing them. 'p.Get(p.Size())' reads all the data.
type Payload interface {
// Get returns a slice containing exactly 'min(size, p.Size())' bytes.
Get(size int) ([]byte, *Error)
// Size returns the payload size.
Size() int
}
// SlicePayload implements Payload on top of slices for convenience.
type SlicePayload []byte
// Get implements Payload.
func (s SlicePayload) Get(size int) ([]byte, *Error) {
if size > s.Size() {
size = s.Size()
}
return s[:size], nil
}
// Size implements Payload.
func (s SlicePayload) Size() int {
return len(s)
}
// A ControlMessages contains socket control messages for IP sockets.
//
// +stateify savable
type ControlMessages struct {
// HasTimestamp indicates whether Timestamp is valid/set.
HasTimestamp bool
// Timestamp is the time (in ns) that the last packed used to create
// the read data was received.
Timestamp int64
}
// Endpoint is the interface implemented by transport protocols (e.g., tcp, udp)
// that exposes functionality like read, write, connect, etc. to users of the
// networking stack.
type Endpoint interface {
// Close puts the endpoint in a closed state and frees all resources
// associated with it.
Close()
// Read reads data from the endpoint and optionally returns the sender.
//
// This method does not block if there is no data pending. It will also
// either return an error or data, never both.
Read(*FullAddress) (buffer.View, ControlMessages, *Error)
// Write writes data to the endpoint's peer. This method does not block if
// the data cannot be written.
//
// Unlike io.Writer.Write, Endpoint.Write transfers ownership of any bytes
// successfully written to the Endpoint. That is, if a call to
// Write(SlicePayload{data}) returns (n, err), it may retain data[:n], and
// the caller should not use data[:n] after Write returns.
//
// Note that unlike io.Writer.Write, it is not an error for Write to
// perform a partial write (if n > 0, no error may be returned). Only
// stream (TCP) Endpoints may return partial writes, and even then only
// in the case where writing additional data would block. Other Endpoints
// will either write the entire message or return an error.
//
// For UDP and Ping sockets if address resolution is required,
// ErrNoLinkAddress and a notification channel is returned for the caller to
// block. Channel is closed once address resolution is complete (success or
// not). The channel is only non-nil in this case.
Write(Payload, WriteOptions) (uintptr, <-chan struct{}, *Error)
// Peek reads data without consuming it from the endpoint.
//
// This method does not block if there is no data pending.
Peek([][]byte) (uintptr, ControlMessages, *Error)
// Connect connects the endpoint to its peer. Specifying a NIC is
// optional.
//
// There are three classes of return values:
// nil -- the attempt to connect succeeded.
// ErrConnectStarted/ErrAlreadyConnecting -- the connect attempt started
// but hasn't completed yet. In this case, the caller must call Connect
// or GetSockOpt(ErrorOption) when the endpoint becomes writable to
// get the actual result. The first call to Connect after the socket has
// connected returns nil. Calling connect again results in ErrAlreadyConnected.
// Anything else -- the attempt to connect failed.
Connect(address FullAddress) *Error
// Shutdown closes the read and/or write end of the endpoint connection
// to its peer.
Shutdown(flags ShutdownFlags) *Error
// Listen puts the endpoint in "listen" mode, which allows it to accept
// new connections.
Listen(backlog int) *Error
// Accept returns a new endpoint if a peer has established a connection
// to an endpoint previously set to listen mode. This method does not
// block if no new connections are available.
//
// The returned Queue is the wait queue for the newly created endpoint.
Accept() (Endpoint, *waiter.Queue, *Error)
// Bind binds the endpoint to a specific local address and port.
// Specifying a NIC is optional.
Bind(address FullAddress) *Error
// GetLocalAddress returns the address to which the endpoint is bound.
GetLocalAddress() (FullAddress, *Error)
// GetRemoteAddress returns the address to which the endpoint is
// connected.
GetRemoteAddress() (FullAddress, *Error)
// Readiness returns the current readiness of the endpoint. For example,
// if waiter.EventIn is set, the endpoint is immediately readable.
Readiness(mask waiter.EventMask) waiter.EventMask
// SetSockOpt sets a socket option. opt should be one of the *Option types.
SetSockOpt(opt interface{}) *Error
// GetSockOpt gets a socket option. opt should be a pointer to one of the
// *Option types.
GetSockOpt(opt interface{}) *Error
}
// WriteOptions contains options for Endpoint.Write.
type WriteOptions struct {
// If To is not nil, write to the given address instead of the endpoint's
// peer.
To *FullAddress
// More has the same semantics as Linux's MSG_MORE.
More bool
// EndOfRecord has the same semantics as Linux's MSG_EOR.
EndOfRecord bool
}
// ErrorOption is used in GetSockOpt to specify that the last error reported by
// the endpoint should be cleared and returned.
type ErrorOption struct{}
// SendBufferSizeOption is used by SetSockOpt/GetSockOpt to specify the send
// buffer size option.
type SendBufferSizeOption int
// ReceiveBufferSizeOption is used by SetSockOpt/GetSockOpt to specify the
// receive buffer size option.
type ReceiveBufferSizeOption int
// SendQueueSizeOption is used in GetSockOpt to specify that the number of
// unread bytes in the output buffer should be returned.
type SendQueueSizeOption int
// ReceiveQueueSizeOption is used in GetSockOpt to specify that the number of
// unread bytes in the input buffer should be returned.
type ReceiveQueueSizeOption int
// V6OnlyOption is used by SetSockOpt/GetSockOpt to specify whether an IPv6
// socket is to be restricted to sending and receiving IPv6 packets only.
type V6OnlyOption int
// DelayOption is used by SetSockOpt/GetSockOpt to specify if data should be
// sent out immediately by the transport protocol. For TCP, it determines if the
// Nagle algorithm is on or off.
type DelayOption int
// CorkOption is used by SetSockOpt/GetSockOpt to specify if data should be
// held until segments are full by the TCP transport protocol.
type CorkOption int
// ReuseAddressOption is used by SetSockOpt/GetSockOpt to specify whether Bind()
// should allow reuse of local address.
type ReuseAddressOption int
// ReusePortOption is used by SetSockOpt/GetSockOpt to permit multiple sockets
// to be bound to an identical socket address.
type ReusePortOption int
// QuickAckOption is stubbed out in SetSockOpt/GetSockOpt.
type QuickAckOption int
// PasscredOption is used by SetSockOpt/GetSockOpt to specify whether
// SCM_CREDENTIALS socket control messages are enabled.
//
// Only supported on Unix sockets.
type PasscredOption int
// TCPInfoOption is used by GetSockOpt to expose TCP statistics.
//
// TODO(b/64800844): Add and populate stat fields.
type TCPInfoOption struct {
RTT time.Duration
RTTVar time.Duration
}
// KeepaliveEnabledOption is used by SetSockOpt/GetSockOpt to specify whether
// TCP keepalive is enabled for this socket.
type KeepaliveEnabledOption int
// KeepaliveIdleOption is used by SetSockOpt/GetSockOpt to specify the time a
// connection must remain idle before the first TCP keepalive packet is sent.
// Once this time is reached, KeepaliveIntervalOption is used instead.
type KeepaliveIdleOption time.Duration
// KeepaliveIntervalOption is used by SetSockOpt/GetSockOpt to specify the
// interval between sending TCP keepalive packets.
type KeepaliveIntervalOption time.Duration
// KeepaliveCountOption is used by SetSockOpt/GetSockOpt to specify the number
// of un-ACKed TCP keepalives that will be sent before the connection is
// closed.
type KeepaliveCountOption int
// MulticastTTLOption is used by SetSockOpt/GetSockOpt to control the default
// TTL value for multicast messages. The default is 1.
type MulticastTTLOption uint8
// MulticastInterfaceOption is used by SetSockOpt/GetSockOpt to specify a
// default interface for multicast.
type MulticastInterfaceOption struct {
NIC NICID
InterfaceAddr Address
}
// MulticastLoopOption is used by SetSockOpt/GetSockOpt to specify whether
// multicast packets sent over a non-loopback interface will be looped back.
type MulticastLoopOption bool
// MembershipOption is used by SetSockOpt/GetSockOpt as an argument to
// AddMembershipOption and RemoveMembershipOption.
type MembershipOption struct {
NIC NICID
InterfaceAddr Address
MulticastAddr Address
}
// AddMembershipOption is used by SetSockOpt/GetSockOpt to join a multicast
// group identified by the given multicast address, on the interface matching
// the given interface address.
type AddMembershipOption MembershipOption
// RemoveMembershipOption is used by SetSockOpt/GetSockOpt to leave a multicast
// group identified by the given multicast address, on the interface matching
// the given interface address.
type RemoveMembershipOption MembershipOption
// OutOfBandInlineOption is used by SetSockOpt/GetSockOpt to specify whether
// TCP out-of-band data is delivered along with the normal in-band data.
type OutOfBandInlineOption int
// BroadcastOption is used by SetSockOpt/GetSockOpt to specify whether
// datagram sockets are allowed to send packets to a broadcast address.
type BroadcastOption int
// Route is a row in the routing table. It specifies through which NIC (and
// gateway) sets of packets should be routed. A row is considered viable if the
// masked target address matches the destination adddress in the row.
type Route struct {
// Destination is the address that must be matched against the masked
// target address to check if this row is viable.
Destination Address
// Mask specifies which bits of the Destination and the target address
// must match for this row to be viable.
Mask AddressMask
// Gateway is the gateway to be used if this row is viable.
Gateway Address
// NIC is the id of the nic to be used if this row is viable.
NIC NICID
}
// Match determines if r is viable for the given destination address.
func (r *Route) Match(addr Address) bool {
if len(addr) != len(r.Destination) {
return false
}
// Using header.Ipv4Broadcast would introduce an import cycle, so
// we'll use a literal instead.
if addr == "\xff\xff\xff\xff" {
return true
}
for i := 0; i < len(r.Destination); i++ {
if (addr[i] & r.Mask[i]) != r.Destination[i] {
return false
}
}
return true
}
// LinkEndpointID represents a data link layer endpoint.
type LinkEndpointID uint64
// TransportProtocolNumber is the number of a transport protocol.
type TransportProtocolNumber uint32
// NetworkProtocolNumber is the number of a network protocol.
type NetworkProtocolNumber uint32
// A StatCounter keeps track of a statistic.
type StatCounter struct {
count uint64
}
// Increment adds one to the counter.
func (s *StatCounter) Increment() {
s.IncrementBy(1)
}
// Value returns the current value of the counter.
func (s *StatCounter) Value() uint64 {
return atomic.LoadUint64(&s.count)
}
// IncrementBy increments the counter by v.
func (s *StatCounter) IncrementBy(v uint64) {
atomic.AddUint64(&s.count, v)
}
func (s *StatCounter) String() string {
return strconv.FormatUint(s.Value(), 10)
}
// ICMPv4PacketStats enumerates counts for all ICMPv4 packet types.
type ICMPv4PacketStats struct {
// Echo is the total number of ICMPv4 echo packets counted.
Echo *StatCounter
// EchoReply is the total number of ICMPv4 echo reply packets counted.
EchoReply *StatCounter
// DstUnreachable is the total number of ICMPv4 destination unreachable
// packets counted.
DstUnreachable *StatCounter
// SrcQuench is the total number of ICMPv4 source quench packets
// counted.
SrcQuench *StatCounter
// Redirect is the total number of ICMPv4 redirect packets counted.
Redirect *StatCounter
// TimeExceeded is the total number of ICMPv4 time exceeded packets
// counted.
TimeExceeded *StatCounter
// ParamProblem is the total number of ICMPv4 parameter problem packets
// counted.
ParamProblem *StatCounter
// Timestamp is the total number of ICMPv4 timestamp packets counted.
Timestamp *StatCounter
// TimestampReply is the total number of ICMPv4 timestamp reply packets
// counted.
TimestampReply *StatCounter
// InfoRequest is the total number of ICMPv4 information request
// packets counted.
InfoRequest *StatCounter
// InfoReply is the total number of ICMPv4 information reply packets
// counted.
InfoReply *StatCounter
}
// ICMPv6PacketStats enumerates counts for all ICMPv6 packet types.
type ICMPv6PacketStats struct {
// EchoRequest is the total number of ICMPv6 echo request packets
// counted.
EchoRequest *StatCounter
// EchoReply is the total number of ICMPv6 echo reply packets counted.
EchoReply *StatCounter
// DstUnreachable is the total number of ICMPv6 destination unreachable
// packets counted.
DstUnreachable *StatCounter
// PacketTooBig is the total number of ICMPv6 packet too big packets
// counted.
PacketTooBig *StatCounter
// TimeExceeded is the total number of ICMPv6 time exceeded packets
// counted.
TimeExceeded *StatCounter
// ParamProblem is the total number of ICMPv6 parameter problem packets
// counted.
ParamProblem *StatCounter
// RouterSolicit is the total number of ICMPv6 router solicit packets
// counted.
RouterSolicit *StatCounter
// RouterAdvert is the total number of ICMPv6 router advert packets
// counted.
RouterAdvert *StatCounter
// NeighborSolicit is the total number of ICMPv6 neighbor solicit
// packets counted.
NeighborSolicit *StatCounter
// NeighborAdvert is the total number of ICMPv6 neighbor advert packets
// counted.
NeighborAdvert *StatCounter
// RedirectMsg is the total number of ICMPv6 redirect message packets
// counted.
RedirectMsg *StatCounter
}
// ICMPv4SentPacketStats collects outbound ICMPv4-specific stats.
type ICMPv4SentPacketStats struct {
ICMPv4PacketStats
// Dropped is the total number of ICMPv4 packets dropped due to link
// layer errors.
Dropped *StatCounter
}
// ICMPv4ReceivedPacketStats collects inbound ICMPv4-specific stats.
type ICMPv4ReceivedPacketStats struct {
ICMPv4PacketStats
// Invalid is the total number of ICMPv4 packets received that the
// transport layer could not parse.
Invalid *StatCounter
}
// ICMPv6SentPacketStats collects outbound ICMPv6-specific stats.
type ICMPv6SentPacketStats struct {
ICMPv6PacketStats
// Dropped is the total number of ICMPv6 packets dropped due to link
// layer errors.
Dropped *StatCounter
}
// ICMPv6ReceivedPacketStats collects inbound ICMPv6-specific stats.
type ICMPv6ReceivedPacketStats struct {
ICMPv6PacketStats
// Invalid is the total number of ICMPv6 packets received that the
// transport layer could not parse.
Invalid *StatCounter
}
// ICMPStats collects ICMP-specific stats (both v4 and v6).
type ICMPStats struct {
// ICMPv4SentPacketStats contains counts of sent packets by ICMPv4 packet type
// and a single count of packets which failed to write to the link
// layer.
V4PacketsSent ICMPv4SentPacketStats
// ICMPv4ReceivedPacketStats contains counts of received packets by ICMPv4
// packet type and a single count of invalid packets received.
V4PacketsReceived ICMPv4ReceivedPacketStats
// ICMPv6SentPacketStats contains counts of sent packets by ICMPv6 packet type
// and a single count of packets which failed to write to the link
// layer.
V6PacketsSent ICMPv6SentPacketStats
// ICMPv6ReceivedPacketStats contains counts of received packets by ICMPv6
// packet type and a single count of invalid packets received.
V6PacketsReceived ICMPv6ReceivedPacketStats
}
// IPStats collects IP-specific stats (both v4 and v6).
type IPStats struct {
// PacketsReceived is the total number of IP packets received from the
// link layer in nic.DeliverNetworkPacket.
PacketsReceived *StatCounter
// InvalidAddressesReceived is the total number of IP packets received
// with an unknown or invalid destination address.
InvalidAddressesReceived *StatCounter
// PacketsDelivered is the total number of incoming IP packets that
// are successfully delivered to the transport layer via HandlePacket.
PacketsDelivered *StatCounter
// PacketsSent is the total number of IP packets sent via WritePacket.
PacketsSent *StatCounter
// OutgoingPacketErrors is the total number of IP packets which failed
// to write to a link-layer endpoint.
OutgoingPacketErrors *StatCounter
}
// TCPStats collects TCP-specific stats.
type TCPStats struct {
// ActiveConnectionOpenings is the number of connections opened
// successfully via Connect.
ActiveConnectionOpenings *StatCounter
// PassiveConnectionOpenings is the number of connections opened
// successfully via Listen.
PassiveConnectionOpenings *StatCounter
// FailedConnectionAttempts is the number of calls to Connect or Listen
// (active and passive openings, respectively) that end in an error.
FailedConnectionAttempts *StatCounter
// ValidSegmentsReceived is the number of TCP segments received that
// the transport layer successfully parsed.
ValidSegmentsReceived *StatCounter
// InvalidSegmentsReceived is the number of TCP segments received that
// the transport layer could not parse.
InvalidSegmentsReceived *StatCounter
// SegmentsSent is the number of TCP segments sent.
SegmentsSent *StatCounter
// ResetsSent is the number of TCP resets sent.
ResetsSent *StatCounter
// ResetsReceived is the number of TCP resets received.
ResetsReceived *StatCounter
// Retransmits is the number of TCP segments retransmitted.
Retransmits *StatCounter
// FastRecovery is the number of times Fast Recovery was used to
// recover from packet loss.
FastRecovery *StatCounter
// SACKRecovery is the number of times SACK Recovery was used to
// recover from packet loss.
SACKRecovery *StatCounter
// SlowStartRetransmits is the number of segments retransmitted in slow
// start.
SlowStartRetransmits *StatCounter
// FastRetransmit is the number of segments retransmitted in fast
// recovery.
FastRetransmit *StatCounter
// Timeouts is the number of times the RTO expired.
Timeouts *StatCounter
// ChecksumErrors is the number of segments dropped due to bad checksums.
ChecksumErrors *StatCounter
}
// UDPStats collects UDP-specific stats.
type UDPStats struct {
// PacketsReceived is the number of UDP datagrams received via
// HandlePacket.
PacketsReceived *StatCounter
// UnknownPortErrors is the number of incoming UDP datagrams dropped
// because they did not have a known destination port.
UnknownPortErrors *StatCounter
// ReceiveBufferErrors is the number of incoming UDP datagrams dropped
// due to the receiving buffer being in an invalid state.
ReceiveBufferErrors *StatCounter
// MalformedPacketsReceived is the number of incoming UDP datagrams
// dropped due to the UDP header being in a malformed state.
MalformedPacketsReceived *StatCounter
// PacketsSent is the number of UDP datagrams sent via sendUDP.
PacketsSent *StatCounter
}
// Stats holds statistics about the networking stack.
//
// All fields are optional.
type Stats struct {
// UnknownProtocolRcvdPackets is the number of packets received by the
// stack that were for an unknown or unsupported protocol.
UnknownProtocolRcvdPackets *StatCounter
// MalformedRcvPackets is the number of packets received by the stack
// that were deemed malformed.
MalformedRcvdPackets *StatCounter
// DroppedPackets is the number of packets dropped due to full queues.
DroppedPackets *StatCounter
// ICMP breaks out ICMP-specific stats (both v4 and v6).
ICMP ICMPStats
// IP breaks out IP-specific stats (both v4 and v6).
IP IPStats
// TCP breaks out TCP-specific stats.
TCP TCPStats
// UDP breaks out UDP-specific stats.
UDP UDPStats
}
func fillIn(v reflect.Value) {
for i := 0; i < v.NumField(); i++ {
v := v.Field(i)
switch v.Kind() {
case reflect.Ptr:
if s := v.Addr().Interface().(**StatCounter); *s == nil {
*s = &StatCounter{}
}
case reflect.Struct:
fillIn(v)
default:
panic(fmt.Sprintf("unexpected type %s", v.Type()))
}
}
}
// FillIn returns a copy of s with nil fields initialized to new StatCounters.
func (s Stats) FillIn() Stats {
fillIn(reflect.ValueOf(&s).Elem())
return s
}
// String implements the fmt.Stringer interface.
func (a Address) String() string {
switch len(a) {
case 4:
return fmt.Sprintf("%d.%d.%d.%d", int(a[0]), int(a[1]), int(a[2]), int(a[3]))
case 16:
// Find the longest subsequence of hexadecimal zeros.
start, end := -1, -1
for i := 0; i < len(a); i += 2 {
j := i
for j < len(a) && a[j] == 0 && a[j+1] == 0 {
j += 2
}
if j > i+2 && j-i > end-start {
start, end = i, j
}
}
var b strings.Builder
for i := 0; i < len(a); i += 2 {
if i == start {
b.WriteString("::")
i = end
if end >= len(a) {
break
}
} else if i > 0 {
b.WriteByte(':')
}
v := uint16(a[i+0])<<8 | uint16(a[i+1])
if v == 0 {
b.WriteByte('0')
} else {
const digits = "0123456789abcdef"
for i := uint(3); i < 4; i-- {
if v := v >> (i * 4); v != 0 {
b.WriteByte(digits[v&0xf])
}
}
}
}
return b.String()
default:
return fmt.Sprintf("%x", []byte(a))
}
}
// To4 converts the IPv4 address to a 4-byte representation.
// If the address is not an IPv4 address, To4 returns "".
func (a Address) To4() Address {
const (
ipv4len = 4
ipv6len = 16
)
if len(a) == ipv4len {
return a
}
if len(a) == ipv6len &&
isZeros(a[0:10]) &&
a[10] == 0xff &&
a[11] == 0xff {
return a[12:16]
}
return ""
}
// isZeros reports whether a is all zeros.
func isZeros(a Address) bool {
for i := 0; i < len(a); i++ {
if a[i] != 0 {
return false
}
}
return true
}
// LinkAddress is a byte slice cast as a string that represents a link address.
// It is typically a 6-byte MAC address.
type LinkAddress string
// String implements the fmt.Stringer interface.
func (a LinkAddress) String() string {
switch len(a) {
case 6:
return fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x", a[0], a[1], a[2], a[3], a[4], a[5])
default:
return fmt.Sprintf("%x", []byte(a))
}
}
// ParseMACAddress parses an IEEE 802 address.
//
// It must be in the format aa:bb:cc:dd:ee:ff or aa-bb-cc-dd-ee-ff.
func ParseMACAddress(s string) (LinkAddress, error) {
parts := strings.FieldsFunc(s, func(c rune) bool {
return c == ':' || c == '-'
})
if len(parts) != 6 {
return "", fmt.Errorf("inconsistent parts: %s", s)
}
addr := make([]byte, 0, len(parts))
for _, part := range parts {
u, err := strconv.ParseUint(part, 16, 8)
if err != nil {
return "", fmt.Errorf("invalid hex digits: %s", s)
}
addr = append(addr, byte(u))
}
return LinkAddress(addr), nil
}
// ProtocolAddress is an address and the network protocol it is associated
// with.
type ProtocolAddress struct {
// Protocol is the protocol of the address.
Protocol NetworkProtocolNumber
// Address is a network address.
Address Address
}
// danglingEndpointsMu protects access to danglingEndpoints.
var danglingEndpointsMu sync.Mutex
// danglingEndpoints tracks all dangling endpoints no longer owned by the app.
var danglingEndpoints = make(map[Endpoint]struct{})
// GetDanglingEndpoints returns all dangling endpoints.
func GetDanglingEndpoints() []Endpoint {
es := make([]Endpoint, 0, len(danglingEndpoints))
danglingEndpointsMu.Lock()
for e := range danglingEndpoints {
es = append(es, e)
}
danglingEndpointsMu.Unlock()
return es
}
// AddDanglingEndpoint adds a dangling endpoint.
func AddDanglingEndpoint(e Endpoint) {
danglingEndpointsMu.Lock()
danglingEndpoints[e] = struct{}{}
danglingEndpointsMu.Unlock()
}
// DeleteDanglingEndpoint removes a dangling endpoint.
func DeleteDanglingEndpoint(e Endpoint) {
danglingEndpointsMu.Lock()
delete(danglingEndpoints, e)
danglingEndpointsMu.Unlock()
}
// AsyncLoading is the global barrier for asynchronous endpoint loading
// activities.
var AsyncLoading sync.WaitGroup
|
// Example channel-based high-level Apache Kafka consumer
package main
/**
* Copyright 2016 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import (
"fmt"
"github.com/confluentinc/confluent-kafka-go/kafka"
"os"
"os/signal"
"syscall"
)
func main() {
if len(os.Args) < 4 {
fmt.Fprintf(os.Stderr, "Usage: %s <broker> <group> <topics..>\n",
os.Args[0])
os.Exit(1)
}
broker := os.Args[1]
group := os.Args[2]
topics := os.Args[3:]
sigchan := make(chan os.Signal)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
c, err := kafka.NewConsumer(&kafka.ConfigMap{
"bootstrap.servers": broker,
"group.id": group,
"session.timeout.ms": 6000,
"go.events.channel.enable": true,
"go.application.rebalance.enable": true,
"default.topic.config": kafka.ConfigMap{"auto.offset.reset": "earliest"}})
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to create consumer: %s\n", err)
os.Exit(1)
}
fmt.Printf("Created Consumer %v\n", c)
err = c.SubscribeTopics(topics, nil)
run := true
for run == true {
select {
case sig := <-sigchan:
fmt.Printf("Caught signal %v: terminating\n", sig)
run = false
case ev := <-c.Events():
switch e := ev.(type) {
case kafka.AssignedPartitions:
fmt.Fprintf(os.Stderr, "%% %v\n", e)
c.Assign(e.Partitions)
case kafka.RevokedPartitions:
fmt.Fprintf(os.Stderr, "%% %v\n", e)
c.Unassign()
case *kafka.Message:
fmt.Printf("%% Message on %s:\n%s\n",
e.TopicPartition, string(e.Value))
case kafka.PartitionEOF:
fmt.Printf("%% Reached %v\n", e)
case kafka.Error:
fmt.Fprintf(os.Stderr, "%% Error: %v\n", e)
run = false
}
}
}
fmt.Printf("Closing consumer\n")
c.Close()
}
buffer signal channel (#66)
// Example channel-based high-level Apache Kafka consumer
package main
/**
* Copyright 2016 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import (
"fmt"
"github.com/confluentinc/confluent-kafka-go/kafka"
"os"
"os/signal"
"syscall"
)
func main() {
if len(os.Args) < 4 {
fmt.Fprintf(os.Stderr, "Usage: %s <broker> <group> <topics..>\n",
os.Args[0])
os.Exit(1)
}
broker := os.Args[1]
group := os.Args[2]
topics := os.Args[3:]
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
c, err := kafka.NewConsumer(&kafka.ConfigMap{
"bootstrap.servers": broker,
"group.id": group,
"session.timeout.ms": 6000,
"go.events.channel.enable": true,
"go.application.rebalance.enable": true,
"default.topic.config": kafka.ConfigMap{"auto.offset.reset": "earliest"}})
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to create consumer: %s\n", err)
os.Exit(1)
}
fmt.Printf("Created Consumer %v\n", c)
err = c.SubscribeTopics(topics, nil)
run := true
for run == true {
select {
case sig := <-sigchan:
fmt.Printf("Caught signal %v: terminating\n", sig)
run = false
case ev := <-c.Events():
switch e := ev.(type) {
case kafka.AssignedPartitions:
fmt.Fprintf(os.Stderr, "%% %v\n", e)
c.Assign(e.Partitions)
case kafka.RevokedPartitions:
fmt.Fprintf(os.Stderr, "%% %v\n", e)
c.Unassign()
case *kafka.Message:
fmt.Printf("%% Message on %s:\n%s\n",
e.TopicPartition, string(e.Value))
case kafka.PartitionEOF:
fmt.Printf("%% Reached %v\n", e)
case kafka.Error:
fmt.Fprintf(os.Stderr, "%% Error: %v\n", e)
run = false
}
}
}
fmt.Printf("Closing consumer\n")
c.Close()
}
|
package netlink
import (
"bytes"
"net"
"syscall"
"testing"
"time"
"github.com/vishvananda/netns"
)
const (
testTxQLen int = 100
defaultTxQLen int = 1000
)
func testLinkAddDel(t *testing.T, link Link) {
links, err := LinkList()
if err != nil {
t.Fatal(err)
}
num := len(links)
if err := LinkAdd(link); err != nil {
t.Fatal(err)
}
base := link.Attrs()
result, err := LinkByName(base.Name)
if err != nil {
t.Fatal(err)
}
rBase := result.Attrs()
if vlan, ok := link.(*Vlan); ok {
other, ok := result.(*Vlan)
if !ok {
t.Fatal("Result of create is not a vlan")
}
if vlan.VlanId != other.VlanId {
t.Fatal("Link.VlanId id doesn't match")
}
}
if rBase.ParentIndex == 0 && base.ParentIndex != 0 {
t.Fatal("Created link doesn't have a Parent but it should")
} else if rBase.ParentIndex != 0 && base.ParentIndex == 0 {
t.Fatal("Created link has a Parent but it shouldn't")
} else if rBase.ParentIndex != 0 && base.ParentIndex != 0 {
if rBase.ParentIndex != base.ParentIndex {
t.Fatal("Link.ParentIndex doesn't match")
}
}
if veth, ok := result.(*Veth); ok {
if rBase.TxQLen != base.TxQLen {
t.Fatalf("qlen is %d, should be %d", rBase.TxQLen, base.TxQLen)
}
if rBase.MTU != base.MTU {
t.Fatalf("MTU is %d, should be %d", rBase.MTU, base.MTU)
}
if veth.PeerName != "" {
var peer *Veth
other, err := LinkByName(veth.PeerName)
if err != nil {
t.Fatalf("Peer %s not created", veth.PeerName)
}
if peer, ok = other.(*Veth); !ok {
t.Fatalf("Peer %s is incorrect type", veth.PeerName)
}
if peer.TxQLen != testTxQLen {
t.Fatalf("TxQLen of peer is %d, should be %d", peer.TxQLen, testTxQLen)
}
}
}
if vxlan, ok := link.(*Vxlan); ok {
other, ok := result.(*Vxlan)
if !ok {
t.Fatal("Result of create is not a vxlan")
}
compareVxlan(t, vxlan, other)
}
if ipv, ok := link.(*IPVlan); ok {
other, ok := result.(*IPVlan)
if !ok {
t.Fatal("Result of create is not a ipvlan")
}
if ipv.Mode != other.Mode {
t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, ipv.Mode)
}
}
if macv, ok := link.(*Macvlan); ok {
other, ok := result.(*Macvlan)
if !ok {
t.Fatal("Result of create is not a macvlan")
}
if macv.Mode != other.Mode {
t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, macv.Mode)
}
}
if err = LinkDel(link); err != nil {
t.Fatal(err)
}
links, err = LinkList()
if err != nil {
t.Fatal(err)
}
if len(links) != num {
t.Fatal("Link not removed properly")
}
}
func compareVxlan(t *testing.T, expected, actual *Vxlan) {
if actual.VxlanId != expected.VxlanId {
t.Fatal("Vxlan.VxlanId doesn't match")
}
if expected.SrcAddr != nil && !actual.SrcAddr.Equal(expected.SrcAddr) {
t.Fatal("Vxlan.SrcAddr doesn't match")
}
if expected.Group != nil && !actual.Group.Equal(expected.Group) {
t.Fatal("Vxlan.Group doesn't match")
}
if expected.TTL != -1 && actual.TTL != expected.TTL {
t.Fatal("Vxlan.TTL doesn't match")
}
if expected.TOS != -1 && actual.TOS != expected.TOS {
t.Fatal("Vxlan.TOS doesn't match")
}
if actual.Learning != expected.Learning {
t.Fatal("Vxlan.Learning doesn't match")
}
if actual.Proxy != expected.Proxy {
t.Fatal("Vxlan.Proxy doesn't match")
}
if actual.RSC != expected.RSC {
t.Fatal("Vxlan.RSC doesn't match")
}
if actual.L2miss != expected.L2miss {
t.Fatal("Vxlan.L2miss doesn't match")
}
if actual.L3miss != expected.L3miss {
t.Fatal("Vxlan.L3miss doesn't match")
}
if actual.GBP != expected.GBP {
t.Fatal("Vxlan.GBP doesn't match")
}
if expected.NoAge {
if !actual.NoAge {
t.Fatal("Vxlan.NoAge doesn't match")
}
} else if expected.Age > 0 && actual.Age != expected.Age {
t.Fatal("Vxlan.Age doesn't match")
}
if expected.Limit > 0 && actual.Limit != expected.Limit {
t.Fatal("Vxlan.Limit doesn't match")
}
if expected.Port > 0 && actual.Port != expected.Port {
t.Fatal("Vxlan.Port doesn't match")
}
if expected.PortLow > 0 || expected.PortHigh > 0 {
if actual.PortLow != expected.PortLow {
t.Fatal("Vxlan.PortLow doesn't match")
}
if actual.PortHigh != expected.PortHigh {
t.Fatal("Vxlan.PortHigh doesn't match")
}
}
}
func TestLinkAddDelDummy(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Dummy{LinkAttrs{Name: "foo"}})
}
func TestLinkAddDelIfb(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Ifb{LinkAttrs{Name: "foo"}})
}
func TestLinkAddDelBridge(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Bridge{LinkAttrs{Name: "foo", MTU: 1400}})
}
func TestLinkAddDelVlan(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
testLinkAddDel(t, &Vlan{LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, 900})
if err := LinkDel(parent); err != nil {
t.Fatal(err)
}
}
func TestLinkAddDelMacvlan(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
testLinkAddDel(t, &Macvlan{
LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index},
Mode: MACVLAN_MODE_PRIVATE,
})
if err := LinkDel(parent); err != nil {
t.Fatal(err)
}
}
func TestLinkAddDelMacvtap(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
testLinkAddDel(t, &Macvtap{
Macvlan: Macvlan{
LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index},
Mode: MACVLAN_MODE_PRIVATE,
},
})
if err := LinkDel(parent); err != nil {
t.Fatal(err)
}
}
func TestLinkAddDelVeth(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Veth{LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1400}, "bar"})
}
func TestLinkAddVethWithDefaultTxQLen(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
la := NewLinkAttrs()
la.Name = "foo"
veth := &Veth{LinkAttrs: la, PeerName: "bar"}
if err := LinkAdd(veth); err != nil {
t.Fatal(err)
}
link, err := LinkByName("foo")
if err != nil {
t.Fatal(err)
}
if veth, ok := link.(*Veth); !ok {
t.Fatalf("unexpected link type: %T", link)
} else {
if veth.TxQLen != defaultTxQLen {
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, defaultTxQLen)
}
}
peer, err := LinkByName("bar")
if err != nil {
t.Fatal(err)
}
if veth, ok := peer.(*Veth); !ok {
t.Fatalf("unexpected link type: %T", link)
} else {
if veth.TxQLen != defaultTxQLen {
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, defaultTxQLen)
}
}
}
func TestLinkAddVethWithZeroTxQLen(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
la := NewLinkAttrs()
la.Name = "foo"
la.TxQLen = 0
veth := &Veth{LinkAttrs: la, PeerName: "bar"}
if err := LinkAdd(veth); err != nil {
t.Fatal(err)
}
link, err := LinkByName("foo")
if err != nil {
t.Fatal(err)
}
if veth, ok := link.(*Veth); !ok {
t.Fatalf("unexpected link type: %T", link)
} else {
if veth.TxQLen != 0 {
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, 0)
}
}
peer, err := LinkByName("bar")
if err != nil {
t.Fatal(err)
}
if veth, ok := peer.(*Veth); !ok {
t.Fatalf("unexpected link type: %T", link)
} else {
if veth.TxQLen != 0 {
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, 0)
}
}
}
func TestLinkAddDummyWithTxQLen(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
la := NewLinkAttrs()
la.Name = "foo"
la.TxQLen = 1500
dummy := &Dummy{LinkAttrs: la}
if err := LinkAdd(dummy); err != nil {
t.Fatal(err)
}
link, err := LinkByName("foo")
if err != nil {
t.Fatal(err)
}
if dummy, ok := link.(*Dummy); !ok {
t.Fatalf("unexpected link type: %T", link)
} else {
if dummy.TxQLen != 1500 {
t.Fatalf("TxQLen is %d, should be %d", dummy.TxQLen, 1500)
}
}
}
func TestLinkAddDelBridgeMaster(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
master := &Bridge{LinkAttrs{Name: "foo"}}
if err := LinkAdd(master); err != nil {
t.Fatal(err)
}
testLinkAddDel(t, &Dummy{LinkAttrs{Name: "bar", MasterIndex: master.Attrs().Index}})
if err := LinkDel(master); err != nil {
t.Fatal(err)
}
}
func TestLinkSetUnsetResetMaster(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
master := &Bridge{LinkAttrs{Name: "foo"}}
if err := LinkAdd(master); err != nil {
t.Fatal(err)
}
newmaster := &Bridge{LinkAttrs{Name: "bar"}}
if err := LinkAdd(newmaster); err != nil {
t.Fatal(err)
}
slave := &Dummy{LinkAttrs{Name: "baz"}}
if err := LinkAdd(slave); err != nil {
t.Fatal(err)
}
if err := LinkSetMaster(slave, master); err != nil {
t.Fatal(err)
}
link, err := LinkByName("baz")
if err != nil {
t.Fatal(err)
}
if link.Attrs().MasterIndex != master.Attrs().Index {
t.Fatal("Master not set properly")
}
if err := LinkSetMaster(slave, newmaster); err != nil {
t.Fatal(err)
}
link, err = LinkByName("baz")
if err != nil {
t.Fatal(err)
}
if link.Attrs().MasterIndex != newmaster.Attrs().Index {
t.Fatal("Master not reset properly")
}
if err := LinkSetMaster(slave, nil); err != nil {
t.Fatal(err)
}
link, err = LinkByName("baz")
if err != nil {
t.Fatal(err)
}
if link.Attrs().MasterIndex != 0 {
t.Fatal("Master not unset properly")
}
if err := LinkDel(slave); err != nil {
t.Fatal(err)
}
if err := LinkDel(newmaster); err != nil {
t.Fatal(err)
}
if err := LinkDel(master); err != nil {
t.Fatal(err)
}
}
func TestLinkSetNs(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
basens, err := netns.Get()
if err != nil {
t.Fatal("Failed to get basens")
}
defer basens.Close()
newns, err := netns.New()
if err != nil {
t.Fatal("Failed to create newns")
}
defer newns.Close()
link := &Veth{LinkAttrs{Name: "foo"}, "bar"}
if err := LinkAdd(link); err != nil {
t.Fatal(err)
}
peer, err := LinkByName("bar")
if err != nil {
t.Fatal(err)
}
LinkSetNsFd(peer, int(basens))
if err != nil {
t.Fatal("Failed to set newns for link")
}
_, err = LinkByName("bar")
if err == nil {
t.Fatal("Link bar is still in newns")
}
err = netns.Set(basens)
if err != nil {
t.Fatal("Failed to set basens")
}
peer, err = LinkByName("bar")
if err != nil {
t.Fatal("Link is not in basens")
}
if err := LinkDel(peer); err != nil {
t.Fatal(err)
}
err = netns.Set(newns)
if err != nil {
t.Fatal("Failed to set newns")
}
_, err = LinkByName("foo")
if err == nil {
t.Fatal("Other half of veth pair not deleted")
}
}
func TestLinkAddDelVxlan(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{
LinkAttrs{Name: "foo"},
}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
vxlan := Vxlan{
LinkAttrs: LinkAttrs{
Name: "bar",
},
VxlanId: 10,
VtepDevIndex: parent.Index,
Learning: true,
L2miss: true,
L3miss: true,
}
testLinkAddDel(t, &vxlan)
if err := LinkDel(parent); err != nil {
t.Fatal(err)
}
}
func TestLinkAddDelIPVlanL2(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
ipv := IPVlan{
LinkAttrs: LinkAttrs{
Name: "bar",
ParentIndex: parent.Index,
},
Mode: IPVLAN_MODE_L2,
}
testLinkAddDel(t, &ipv)
}
func TestLinkAddDelIPVlanL3(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
ipv := IPVlan{
LinkAttrs: LinkAttrs{
Name: "bar",
ParentIndex: parent.Index,
},
Mode: IPVLAN_MODE_L3,
}
testLinkAddDel(t, &ipv)
}
func TestLinkAddDelIPVlanNoParent(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
ipv := IPVlan{
LinkAttrs: LinkAttrs{
Name: "bar",
},
Mode: IPVLAN_MODE_L3,
}
err := LinkAdd(&ipv)
if err == nil {
t.Fatal("Add should fail if ipvlan creating without ParentIndex")
}
if err.Error() != "Can't create ipvlan link without ParentIndex" {
t.Fatalf("Error should be about missing ParentIndex, got %q", err)
}
}
func TestLinkByIndex(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
dummy := &Dummy{LinkAttrs{Name: "dummy"}}
if err := LinkAdd(dummy); err != nil {
t.Fatal(err)
}
found, err := LinkByIndex(dummy.Index)
if err != nil {
t.Fatal(err)
}
if found.Attrs().Index != dummy.Attrs().Index {
t.Fatalf("Indices don't match: %v != %v", found.Attrs().Index, dummy.Attrs().Index)
}
LinkDel(dummy)
// test not found
_, err = LinkByIndex(dummy.Attrs().Index)
if err == nil {
t.Fatalf("LinkByIndex(%v) found deleted link", err)
}
}
func TestLinkSet(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
iface := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(iface); err != nil {
t.Fatal(err)
}
link, err := LinkByName("foo")
if err != nil {
t.Fatal(err)
}
err = LinkSetName(link, "bar")
if err != nil {
t.Fatalf("Could not change interface name: %v", err)
}
link, err = LinkByName("bar")
if err != nil {
t.Fatalf("Interface name not changed: %v", err)
}
err = LinkSetMTU(link, 1400)
if err != nil {
t.Fatalf("Could not set MTU: %v", err)
}
link, err = LinkByName("bar")
if err != nil {
t.Fatal(err)
}
if link.Attrs().MTU != 1400 {
t.Fatal("MTU not changed!")
}
addr, err := net.ParseMAC("00:12:34:56:78:AB")
if err != nil {
t.Fatal(err)
}
err = LinkSetHardwareAddr(link, addr)
if err != nil {
t.Fatal(err)
}
link, err = LinkByName("bar")
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(link.Attrs().HardwareAddr, addr) {
t.Fatalf("hardware address not changed!")
}
}
func expectLinkUpdate(ch <-chan LinkUpdate, ifaceName string, up bool) bool {
for {
timeout := time.After(time.Minute)
select {
case update := <-ch:
if ifaceName == update.Link.Attrs().Name && (update.IfInfomsg.Flags&syscall.IFF_UP != 0) == up {
return true
}
case <-timeout:
return false
}
}
}
func TestLinkSubscribe(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
ch := make(chan LinkUpdate)
done := make(chan struct{})
defer close(done)
if err := LinkSubscribe(ch, done); err != nil {
t.Fatal(err)
}
link := &Veth{LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1400}, "bar"}
if err := LinkAdd(link); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "foo", false) {
t.Fatal("Add update not received as expected")
}
if err := LinkSetUp(link); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "foo", true) {
t.Fatal("Link Up update not received as expected")
}
if err := LinkDel(link); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "foo", false) {
t.Fatal("Del update not received as expected")
}
}
Add test for adding non existing master, use new api - LinkSetNoMaster
package netlink
import (
"bytes"
"net"
"syscall"
"testing"
"time"
"github.com/vishvananda/netns"
)
const (
testTxQLen int = 100
defaultTxQLen int = 1000
)
func testLinkAddDel(t *testing.T, link Link) {
links, err := LinkList()
if err != nil {
t.Fatal(err)
}
num := len(links)
if err := LinkAdd(link); err != nil {
t.Fatal(err)
}
base := link.Attrs()
result, err := LinkByName(base.Name)
if err != nil {
t.Fatal(err)
}
rBase := result.Attrs()
if vlan, ok := link.(*Vlan); ok {
other, ok := result.(*Vlan)
if !ok {
t.Fatal("Result of create is not a vlan")
}
if vlan.VlanId != other.VlanId {
t.Fatal("Link.VlanId id doesn't match")
}
}
if rBase.ParentIndex == 0 && base.ParentIndex != 0 {
t.Fatal("Created link doesn't have a Parent but it should")
} else if rBase.ParentIndex != 0 && base.ParentIndex == 0 {
t.Fatal("Created link has a Parent but it shouldn't")
} else if rBase.ParentIndex != 0 && base.ParentIndex != 0 {
if rBase.ParentIndex != base.ParentIndex {
t.Fatal("Link.ParentIndex doesn't match")
}
}
if veth, ok := result.(*Veth); ok {
if rBase.TxQLen != base.TxQLen {
t.Fatalf("qlen is %d, should be %d", rBase.TxQLen, base.TxQLen)
}
if rBase.MTU != base.MTU {
t.Fatalf("MTU is %d, should be %d", rBase.MTU, base.MTU)
}
if veth.PeerName != "" {
var peer *Veth
other, err := LinkByName(veth.PeerName)
if err != nil {
t.Fatalf("Peer %s not created", veth.PeerName)
}
if peer, ok = other.(*Veth); !ok {
t.Fatalf("Peer %s is incorrect type", veth.PeerName)
}
if peer.TxQLen != testTxQLen {
t.Fatalf("TxQLen of peer is %d, should be %d", peer.TxQLen, testTxQLen)
}
}
}
if vxlan, ok := link.(*Vxlan); ok {
other, ok := result.(*Vxlan)
if !ok {
t.Fatal("Result of create is not a vxlan")
}
compareVxlan(t, vxlan, other)
}
if ipv, ok := link.(*IPVlan); ok {
other, ok := result.(*IPVlan)
if !ok {
t.Fatal("Result of create is not a ipvlan")
}
if ipv.Mode != other.Mode {
t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, ipv.Mode)
}
}
if macv, ok := link.(*Macvlan); ok {
other, ok := result.(*Macvlan)
if !ok {
t.Fatal("Result of create is not a macvlan")
}
if macv.Mode != other.Mode {
t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, macv.Mode)
}
}
if err = LinkDel(link); err != nil {
t.Fatal(err)
}
links, err = LinkList()
if err != nil {
t.Fatal(err)
}
if len(links) != num {
t.Fatal("Link not removed properly")
}
}
func compareVxlan(t *testing.T, expected, actual *Vxlan) {
if actual.VxlanId != expected.VxlanId {
t.Fatal("Vxlan.VxlanId doesn't match")
}
if expected.SrcAddr != nil && !actual.SrcAddr.Equal(expected.SrcAddr) {
t.Fatal("Vxlan.SrcAddr doesn't match")
}
if expected.Group != nil && !actual.Group.Equal(expected.Group) {
t.Fatal("Vxlan.Group doesn't match")
}
if expected.TTL != -1 && actual.TTL != expected.TTL {
t.Fatal("Vxlan.TTL doesn't match")
}
if expected.TOS != -1 && actual.TOS != expected.TOS {
t.Fatal("Vxlan.TOS doesn't match")
}
if actual.Learning != expected.Learning {
t.Fatal("Vxlan.Learning doesn't match")
}
if actual.Proxy != expected.Proxy {
t.Fatal("Vxlan.Proxy doesn't match")
}
if actual.RSC != expected.RSC {
t.Fatal("Vxlan.RSC doesn't match")
}
if actual.L2miss != expected.L2miss {
t.Fatal("Vxlan.L2miss doesn't match")
}
if actual.L3miss != expected.L3miss {
t.Fatal("Vxlan.L3miss doesn't match")
}
if actual.GBP != expected.GBP {
t.Fatal("Vxlan.GBP doesn't match")
}
if expected.NoAge {
if !actual.NoAge {
t.Fatal("Vxlan.NoAge doesn't match")
}
} else if expected.Age > 0 && actual.Age != expected.Age {
t.Fatal("Vxlan.Age doesn't match")
}
if expected.Limit > 0 && actual.Limit != expected.Limit {
t.Fatal("Vxlan.Limit doesn't match")
}
if expected.Port > 0 && actual.Port != expected.Port {
t.Fatal("Vxlan.Port doesn't match")
}
if expected.PortLow > 0 || expected.PortHigh > 0 {
if actual.PortLow != expected.PortLow {
t.Fatal("Vxlan.PortLow doesn't match")
}
if actual.PortHigh != expected.PortHigh {
t.Fatal("Vxlan.PortHigh doesn't match")
}
}
}
func TestLinkAddDelDummy(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Dummy{LinkAttrs{Name: "foo"}})
}
func TestLinkAddDelIfb(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Ifb{LinkAttrs{Name: "foo"}})
}
func TestLinkAddDelBridge(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Bridge{LinkAttrs{Name: "foo", MTU: 1400}})
}
func TestLinkAddDelVlan(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
testLinkAddDel(t, &Vlan{LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, 900})
if err := LinkDel(parent); err != nil {
t.Fatal(err)
}
}
func TestLinkAddDelMacvlan(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
testLinkAddDel(t, &Macvlan{
LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index},
Mode: MACVLAN_MODE_PRIVATE,
})
if err := LinkDel(parent); err != nil {
t.Fatal(err)
}
}
func TestLinkAddDelMacvtap(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
testLinkAddDel(t, &Macvtap{
Macvlan: Macvlan{
LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index},
Mode: MACVLAN_MODE_PRIVATE,
},
})
if err := LinkDel(parent); err != nil {
t.Fatal(err)
}
}
func TestLinkAddDelVeth(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Veth{LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1400}, "bar"})
}
func TestLinkAddVethWithDefaultTxQLen(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
la := NewLinkAttrs()
la.Name = "foo"
veth := &Veth{LinkAttrs: la, PeerName: "bar"}
if err := LinkAdd(veth); err != nil {
t.Fatal(err)
}
link, err := LinkByName("foo")
if err != nil {
t.Fatal(err)
}
if veth, ok := link.(*Veth); !ok {
t.Fatalf("unexpected link type: %T", link)
} else {
if veth.TxQLen != defaultTxQLen {
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, defaultTxQLen)
}
}
peer, err := LinkByName("bar")
if err != nil {
t.Fatal(err)
}
if veth, ok := peer.(*Veth); !ok {
t.Fatalf("unexpected link type: %T", link)
} else {
if veth.TxQLen != defaultTxQLen {
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, defaultTxQLen)
}
}
}
func TestLinkAddVethWithZeroTxQLen(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
la := NewLinkAttrs()
la.Name = "foo"
la.TxQLen = 0
veth := &Veth{LinkAttrs: la, PeerName: "bar"}
if err := LinkAdd(veth); err != nil {
t.Fatal(err)
}
link, err := LinkByName("foo")
if err != nil {
t.Fatal(err)
}
if veth, ok := link.(*Veth); !ok {
t.Fatalf("unexpected link type: %T", link)
} else {
if veth.TxQLen != 0 {
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, 0)
}
}
peer, err := LinkByName("bar")
if err != nil {
t.Fatal(err)
}
if veth, ok := peer.(*Veth); !ok {
t.Fatalf("unexpected link type: %T", link)
} else {
if veth.TxQLen != 0 {
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, 0)
}
}
}
func TestLinkAddDummyWithTxQLen(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
la := NewLinkAttrs()
la.Name = "foo"
la.TxQLen = 1500
dummy := &Dummy{LinkAttrs: la}
if err := LinkAdd(dummy); err != nil {
t.Fatal(err)
}
link, err := LinkByName("foo")
if err != nil {
t.Fatal(err)
}
if dummy, ok := link.(*Dummy); !ok {
t.Fatalf("unexpected link type: %T", link)
} else {
if dummy.TxQLen != 1500 {
t.Fatalf("TxQLen is %d, should be %d", dummy.TxQLen, 1500)
}
}
}
func TestLinkAddDelBridgeMaster(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
master := &Bridge{LinkAttrs{Name: "foo"}}
if err := LinkAdd(master); err != nil {
t.Fatal(err)
}
testLinkAddDel(t, &Dummy{LinkAttrs{Name: "bar", MasterIndex: master.Attrs().Index}})
if err := LinkDel(master); err != nil {
t.Fatal(err)
}
}
func TestLinkSetUnsetResetMaster(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
master := &Bridge{LinkAttrs{Name: "foo"}}
if err := LinkAdd(master); err != nil {
t.Fatal(err)
}
newmaster := &Bridge{LinkAttrs{Name: "bar"}}
if err := LinkAdd(newmaster); err != nil {
t.Fatal(err)
}
slave := &Dummy{LinkAttrs{Name: "baz"}}
if err := LinkAdd(slave); err != nil {
t.Fatal(err)
}
nonexistsmaster := &Bridge{LinkAttrs{Name: "foobar"}}
if err := LinkSetMaster(slave, nonexistsmaster); err == nil {
t.Fatal("error expected")
}
if err := LinkSetMaster(slave, master); err != nil {
t.Fatal(err)
}
link, err := LinkByName("baz")
if err != nil {
t.Fatal(err)
}
if link.Attrs().MasterIndex != master.Attrs().Index {
t.Fatal("Master not set properly")
}
if err := LinkSetMaster(slave, newmaster); err != nil {
t.Fatal(err)
}
link, err = LinkByName("baz")
if err != nil {
t.Fatal(err)
}
if link.Attrs().MasterIndex != newmaster.Attrs().Index {
t.Fatal("Master not reset properly")
}
if err := LinkSetNoMaster(slave); err != nil {
t.Fatal(err)
}
link, err = LinkByName("baz")
if err != nil {
t.Fatal(err)
}
if link.Attrs().MasterIndex != 0 {
t.Fatal("Master not unset properly")
}
if err := LinkDel(slave); err != nil {
t.Fatal(err)
}
if err := LinkDel(newmaster); err != nil {
t.Fatal(err)
}
if err := LinkDel(master); err != nil {
t.Fatal(err)
}
}
func TestLinkSetNs(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
basens, err := netns.Get()
if err != nil {
t.Fatal("Failed to get basens")
}
defer basens.Close()
newns, err := netns.New()
if err != nil {
t.Fatal("Failed to create newns")
}
defer newns.Close()
link := &Veth{LinkAttrs{Name: "foo"}, "bar"}
if err := LinkAdd(link); err != nil {
t.Fatal(err)
}
peer, err := LinkByName("bar")
if err != nil {
t.Fatal(err)
}
LinkSetNsFd(peer, int(basens))
if err != nil {
t.Fatal("Failed to set newns for link")
}
_, err = LinkByName("bar")
if err == nil {
t.Fatal("Link bar is still in newns")
}
err = netns.Set(basens)
if err != nil {
t.Fatal("Failed to set basens")
}
peer, err = LinkByName("bar")
if err != nil {
t.Fatal("Link is not in basens")
}
if err := LinkDel(peer); err != nil {
t.Fatal(err)
}
err = netns.Set(newns)
if err != nil {
t.Fatal("Failed to set newns")
}
_, err = LinkByName("foo")
if err == nil {
t.Fatal("Other half of veth pair not deleted")
}
}
func TestLinkAddDelVxlan(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{
LinkAttrs{Name: "foo"},
}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
vxlan := Vxlan{
LinkAttrs: LinkAttrs{
Name: "bar",
},
VxlanId: 10,
VtepDevIndex: parent.Index,
Learning: true,
L2miss: true,
L3miss: true,
}
testLinkAddDel(t, &vxlan)
if err := LinkDel(parent); err != nil {
t.Fatal(err)
}
}
func TestLinkAddDelIPVlanL2(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
ipv := IPVlan{
LinkAttrs: LinkAttrs{
Name: "bar",
ParentIndex: parent.Index,
},
Mode: IPVLAN_MODE_L2,
}
testLinkAddDel(t, &ipv)
}
func TestLinkAddDelIPVlanL3(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
ipv := IPVlan{
LinkAttrs: LinkAttrs{
Name: "bar",
ParentIndex: parent.Index,
},
Mode: IPVLAN_MODE_L3,
}
testLinkAddDel(t, &ipv)
}
func TestLinkAddDelIPVlanNoParent(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
ipv := IPVlan{
LinkAttrs: LinkAttrs{
Name: "bar",
},
Mode: IPVLAN_MODE_L3,
}
err := LinkAdd(&ipv)
if err == nil {
t.Fatal("Add should fail if ipvlan creating without ParentIndex")
}
if err.Error() != "Can't create ipvlan link without ParentIndex" {
t.Fatalf("Error should be about missing ParentIndex, got %q", err)
}
}
func TestLinkByIndex(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
dummy := &Dummy{LinkAttrs{Name: "dummy"}}
if err := LinkAdd(dummy); err != nil {
t.Fatal(err)
}
found, err := LinkByIndex(dummy.Index)
if err != nil {
t.Fatal(err)
}
if found.Attrs().Index != dummy.Attrs().Index {
t.Fatalf("Indices don't match: %v != %v", found.Attrs().Index, dummy.Attrs().Index)
}
LinkDel(dummy)
// test not found
_, err = LinkByIndex(dummy.Attrs().Index)
if err == nil {
t.Fatalf("LinkByIndex(%v) found deleted link", err)
}
}
func TestLinkSet(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
iface := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(iface); err != nil {
t.Fatal(err)
}
link, err := LinkByName("foo")
if err != nil {
t.Fatal(err)
}
err = LinkSetName(link, "bar")
if err != nil {
t.Fatalf("Could not change interface name: %v", err)
}
link, err = LinkByName("bar")
if err != nil {
t.Fatalf("Interface name not changed: %v", err)
}
err = LinkSetMTU(link, 1400)
if err != nil {
t.Fatalf("Could not set MTU: %v", err)
}
link, err = LinkByName("bar")
if err != nil {
t.Fatal(err)
}
if link.Attrs().MTU != 1400 {
t.Fatal("MTU not changed!")
}
addr, err := net.ParseMAC("00:12:34:56:78:AB")
if err != nil {
t.Fatal(err)
}
err = LinkSetHardwareAddr(link, addr)
if err != nil {
t.Fatal(err)
}
link, err = LinkByName("bar")
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(link.Attrs().HardwareAddr, addr) {
t.Fatalf("hardware address not changed!")
}
}
func expectLinkUpdate(ch <-chan LinkUpdate, ifaceName string, up bool) bool {
for {
timeout := time.After(time.Minute)
select {
case update := <-ch:
if ifaceName == update.Link.Attrs().Name && (update.IfInfomsg.Flags&syscall.IFF_UP != 0) == up {
return true
}
case <-timeout:
return false
}
}
}
func TestLinkSubscribe(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
ch := make(chan LinkUpdate)
done := make(chan struct{})
defer close(done)
if err := LinkSubscribe(ch, done); err != nil {
t.Fatal(err)
}
link := &Veth{LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1400}, "bar"}
if err := LinkAdd(link); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "foo", false) {
t.Fatal("Add update not received as expected")
}
if err := LinkSetUp(link); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "foo", true) {
t.Fatal("Link Up update not received as expected")
}
if err := LinkDel(link); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "foo", false) {
t.Fatal("Del update not received as expected")
}
}
|
package Pocket
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
)
type authResponse struct {
code string
state string
}
const (
url string = "https://getpocket.com/v3/oauth/request"
redirectURI string = "https://github.com/daveym/pocket/blob/master/AUTHCOMPLETE.md"
)
// Authenticate takes the the users consumer key and performs a one time authentication with the Pocket API to request access.
// A Request Token is returned that should be used for all subsequent requests to Pocket.
func Authenticate(consumerKey string) string {
request := map[string]string{"consumer_key": consumerKey, "redirect_uri": redirectURI}
jsonStr, _ := json.Marshal(request)
fmt.Println(string(jsonStr))
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
req.Header.Set("Content-Type", "application/json")
req.Header.Set("charset", "UTF8")
req.Header.Set("X-Accept", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
fmt.Println("response Body:", string(body))
var s = new(authResponse)
err = json.Unmarshal([]byte(body), &s)
if err != nil {
fmt.Printf("%T\n%s\n%#v\n", err, err, err)
}
return string(s.code)
}
// Used by Authenticate - take a consumer key and return a request token.
func getRequestToken(consumerKey *string) {
}
Fixed JSON unmarshalling. Uppercase the Struct field!
package Pocket
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
)
// Initial Auth response from Pocket
type authResponse struct {
Code string
State string
}
const (
url string = "https://getpocket.com/v3/oauth/request"
redirectURI string = "https://github.com/daveym/pocket/blob/master/AUTHCOMPLETE.md"
)
// Authenticate takes the the users consumer key and performs a one time authentication with the Pocket API to request access.
// A Request Token is returned that should be used for all subsequent requests to Pocket.
func Authenticate(consumerKey string) string {
request := map[string]string{"consumer_key": consumerKey, "redirect_uri": redirectURI}
jsonStr, _ := json.Marshal(request)
fmt.Println(string(jsonStr))
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
req.Header.Set("Content-Type", "application/json")
req.Header.Set("charset", "UTF8")
req.Header.Set("X-Accept", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
fmt.Println("response Body:", string(body))
var s = new(authResponse)
err = json.Unmarshal([]byte(body), &s)
if err != nil {
fmt.Printf("%T\n%s\n%#v\n", err, err, err)
}
return string(s.Code)
}
// Used by Authenticate - take a consumer key and return a request token.
func getRequestToken(consumerKey *string) {
}
|
package gardenstore_test
import (
"encoding/json"
"errors"
"io/ioutil"
"net"
"os"
"sync"
"time"
"github.com/cloudfoundry-incubator/executor"
"github.com/cloudfoundry-incubator/bbs/models"
"github.com/cloudfoundry-incubator/executor/depot/gardenstore"
"github.com/cloudfoundry-incubator/executor/depot/gardenstore/fakes"
"github.com/cloudfoundry-incubator/executor/depot/transformer"
"github.com/cloudfoundry-incubator/garden"
gfakes "github.com/cloudfoundry-incubator/garden/fakes"
"github.com/cloudfoundry/dropsonde/log_sender/fake"
"github.com/cloudfoundry/dropsonde/logs"
"github.com/pivotal-golang/clock/fakeclock"
"github.com/pivotal-golang/lager/lagertest"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
)
var _ = Describe("GardenContainerStore", func() {
var (
fakeGardenClient *gfakes.FakeClient
ownerName = "some-owner-name"
maxCPUShares uint64 = 1024
inodeLimit uint64 = 2000000
clock *fakeclock.FakeClock
emitter *fakes.FakeEventEmitter
fakeLogSender *fake.FakeLogSender
logger *lagertest.TestLogger
gardenStore *gardenstore.GardenStore
)
action := &models.RunAction{
Path: "true",
}
BeforeEach(func() {
fakeGardenClient = new(gfakes.FakeClient)
clock = fakeclock.NewFakeClock(time.Now())
emitter = new(fakes.FakeEventEmitter)
fakeLogSender = fake.NewFakeLogSender()
logs.Initialize(fakeLogSender)
logger = lagertest.NewTestLogger("test")
var err error
gardenStore, err = gardenstore.NewGardenStore(
fakeGardenClient,
ownerName,
maxCPUShares,
inodeLimit,
100*time.Millisecond,
100*time.Millisecond,
transformer.NewTransformer(nil, nil, nil, nil, nil, nil, os.TempDir(), false, clock),
clock,
emitter,
100,
)
Expect(err).NotTo(HaveOccurred())
})
Describe("Lookup", func() {
var (
executorContainer executor.Container
lookupErr error
)
JustBeforeEach(func() {
executorContainer, lookupErr = gardenStore.Lookup(logger, "some-container-handle")
})
Context("when the container doesn't exist", func() {
BeforeEach(func() {
fakeGardenClient.LookupReturns(nil, garden.ContainerNotFoundError{})
})
It("returns a container-not-found error", func() {
Expect(lookupErr).To(Equal(executor.ErrContainerNotFound))
})
})
Context("when lookup fails", func() {
BeforeEach(func() {
fakeGardenClient.LookupReturns(nil, errors.New("didn't find it"))
})
It("returns the error", func() {
Expect(lookupErr).To(MatchError(Equal("didn't find it")))
})
})
Context("when the container exists", func() {
var gardenContainer *gfakes.FakeContainer
BeforeEach(func() {
gardenContainer = new(gfakes.FakeContainer)
gardenContainer.HandleReturns("some-container-handle")
fakeGardenClient.LookupReturns(gardenContainer, nil)
})
It("does not error", func() {
Expect(lookupErr).NotTo(HaveOccurred())
})
It("has the Garden container handle as its container guid", func() {
Expect(executorContainer.Guid).To(Equal("some-container-handle"))
})
It("looked up by the given guid", func() {
Expect(fakeGardenClient.LookupArgsForCall(0)).To(Equal("some-container-handle"))
})
Context("when the container has an executor:state property", func() {
Context("and it's Reserved", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:state": string(executor.StateReserved),
},
}, nil)
})
It("has it as its state", func() {
Expect(executorContainer.State).To(Equal(executor.StateReserved))
})
})
Context("and it's Initializing", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:state": string(executor.StateInitializing),
},
}, nil)
})
It("has it as its state", func() {
Expect(executorContainer.State).To(Equal(executor.StateInitializing))
})
})
Context("and it's Created", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:state": string(executor.StateCreated),
},
}, nil)
})
It("has it as its state", func() {
Expect(executorContainer.State).To(Equal(executor.StateCreated))
})
})
Context("and it's Running", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:state": string(executor.StateRunning),
},
}, nil)
})
It("has it as its state", func() {
Expect(executorContainer.State).To(Equal(executor.StateRunning))
})
})
Context("and it's Completed", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:state": string(executor.StateCompleted),
},
}, nil)
})
It("has it as its state", func() {
Expect(executorContainer.State).To(Equal(executor.StateCompleted))
})
})
Context("when it's some other state", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:state": "bogus-state",
},
}, nil)
})
It("returns an InvalidStateError", func() {
Expect(lookupErr).To(Equal(gardenstore.InvalidStateError{"bogus-state"}))
})
})
})
Context("when the container has an executor:allocated-at property", func() {
Context("when it's a valid integer", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:allocated-at": "123",
},
}, nil)
})
It("has it as its allocated at value", func() {
Expect(executorContainer.AllocatedAt).To(Equal(int64(123)))
})
})
Context("when it's a bogus value", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:allocated-at": "some-bogus-timestamp",
},
}, nil)
})
It("returns a MalformedPropertyError", func() {
Expect(lookupErr).To(Equal(gardenstore.MalformedPropertyError{
Property: "executor:allocated-at",
Value: "some-bogus-timestamp",
}))
})
})
})
Context("when the container has an executor:memory-mb property", func() {
Context("when it's a valid integer", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:memory-mb": "1024",
},
}, nil)
})
It("has it as its rootfs path", func() {
Expect(executorContainer.MemoryMB).To(Equal(1024))
})
})
Context("when it's a bogus value", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:memory-mb": "some-bogus-integer",
},
}, nil)
})
It("returns a MalformedPropertyError", func() {
Expect(lookupErr).To(Equal(gardenstore.MalformedPropertyError{
Property: "executor:memory-mb",
Value: "some-bogus-integer",
}))
})
})
})
Context("when the container has an executor:disk-mb property", func() {
Context("when it's a valid integer", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:disk-mb": "2048",
},
}, nil)
})
It("has it as its disk reservation", func() {
Expect(executorContainer.DiskMB).To(Equal(2048))
})
})
Context("when it's a bogus value", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:disk-mb": "some-bogus-integer",
},
}, nil)
})
It("returns a MalformedPropertyError", func() {
Expect(lookupErr).To(Equal(gardenstore.MalformedPropertyError{
Property: "executor:disk-mb",
Value: "some-bogus-integer",
}))
})
})
})
Context("when the container has an executor:cpu-weight", func() {
Context("when it's a valid integer", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:cpu-weight": "99",
},
}, nil)
})
It("has it as its cpu weight", func() {
Expect(executorContainer.CPUWeight).To(Equal(uint(99)))
})
})
Context("when it's a bogus value", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:cpu-weight": "some-bogus-integer",
},
}, nil)
})
It("returns a MalformedPropertyError", func() {
Expect(lookupErr).To(Equal(gardenstore.MalformedPropertyError{
Property: "executor:cpu-weight",
Value: "some-bogus-integer",
}))
})
})
})
Context("when the Garden container has tags", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"tag:a": "a-value",
"tag:b": "b-value",
"executor:x": "excluded-value",
"x": "another-excluded-value",
},
}, nil)
})
It("has the tags", func() {
Expect(executorContainer.Tags).To(Equal(executor.Tags{
"a": "a-value",
"b": "b-value",
}))
})
})
Context("when the Garden container has mapped ports", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
MappedPorts: []garden.PortMapping{
{HostPort: 1234, ContainerPort: 5678},
{HostPort: 4321, ContainerPort: 8765},
},
}, nil)
})
It("has the ports", func() {
Expect(executorContainer.Ports).To(Equal([]executor.PortMapping{
{HostPort: 1234, ContainerPort: 5678},
{HostPort: 4321, ContainerPort: 8765},
}))
})
})
Context("when the Garden container has an external IP", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
ExternalIP: "1.2.3.4",
}, nil)
})
It("has the ports", func() {
Expect(executorContainer.ExternalIP).To(Equal("1.2.3.4"))
})
})
Context("when the Garden container has a log config", func() {
Context("and the log is valid", func() {
index := 1
log := executor.LogConfig{
Guid: "my-guid",
SourceName: "source-name",
Index: index,
}
BeforeEach(func() {
payload, err := json.Marshal(log)
Expect(err).NotTo(HaveOccurred())
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:log-config": string(payload),
},
}, nil)
})
It("has it as its log", func() {
Expect(executorContainer.LogConfig).To(Equal(log))
})
})
Context("and the log is invalid", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:log-config": "ß",
},
}, nil)
})
It("returns an InvalidJSONError", func() {
Expect(lookupErr).To(HaveOccurred())
Expect(lookupErr.Error()).To(ContainSubstring("executor:log-config"))
Expect(lookupErr.Error()).To(ContainSubstring("ß"))
Expect(lookupErr.Error()).To(ContainSubstring("invalid character"))
})
})
})
Context("when the Garden container has a metrics config", func() {
Context("and the metrics config is valid", func() {
index := 1
metricsConfig := executor.MetricsConfig{
Guid: "my-guid",
Index: index,
}
BeforeEach(func() {
payload, err := json.Marshal(metricsConfig)
Expect(err).NotTo(HaveOccurred())
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:metrics-config": string(payload),
},
}, nil)
})
It("has it as its metrics config", func() {
Expect(executorContainer.MetricsConfig).To(Equal(metricsConfig))
})
})
Context("and the metrics config is invalid", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:metrics-config": "ß",
},
}, nil)
})
It("returns an InvalidJSONError", func() {
Expect(lookupErr).To(HaveOccurred())
Expect(lookupErr.Error()).To(ContainSubstring("executor:metrics-config"))
Expect(lookupErr.Error()).To(ContainSubstring("ß"))
Expect(lookupErr.Error()).To(ContainSubstring("invalid character"))
})
})
})
Context("when the Garden container has a run result", func() {
Context("and the run result is valid", func() {
runResult := executor.ContainerRunResult{
Failed: true,
FailureReason: "because",
}
BeforeEach(func() {
payload, err := json.Marshal(runResult)
Expect(err).NotTo(HaveOccurred())
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:result": string(payload),
},
}, nil)
})
It("has its run result", func() {
Expect(executorContainer.RunResult).To(Equal(runResult))
})
})
Context("and the run result is invalid", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:result": "ß",
},
}, nil)
})
It("returns an InvalidJSONError", func() {
Expect(lookupErr).To(HaveOccurred())
Expect(lookupErr.Error()).To(ContainSubstring("executor:result"))
Expect(lookupErr.Error()).To(ContainSubstring("ß"))
Expect(lookupErr.Error()).To(ContainSubstring("invalid character"))
})
})
})
Context("when getting the info from Garden fails", func() {
disaster := errors.New("oh no!")
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{}, disaster)
})
It("returns the error", func() {
Expect(lookupErr).To(Equal(disaster))
})
})
})
})
Describe("Create", func() {
var (
executorContainer executor.Container
fakeGardenContainer *gfakes.FakeContainer
createdContainer executor.Container
createErr error
)
action := &models.RunAction{
User: "me",
Path: "ls",
}
BeforeEach(func() {
executorContainer = executor.Container{
Guid: "some-guid",
State: executor.StateInitializing,
RunInfo: executor.RunInfo{
Action: models.WrapAction(action),
LogConfig: executor.LogConfig{
Guid: "log-guid",
SourceName: "some-source-name",
Index: 1,
},
},
}
fakeGardenContainer = new(gfakes.FakeContainer)
fakeGardenContainer.HandleReturns("some-guid")
})
JustBeforeEach(func() {
createdContainer, createErr = gardenStore.Create(logger, executorContainer)
})
Context("when creating the container succeeds", func() {
BeforeEach(func() {
fakeGardenClient.CreateReturns(fakeGardenContainer, nil)
})
It("does not error", func() {
Expect(createErr).NotTo(HaveOccurred())
})
It("returns a created container", func() {
expectedCreatedContainer := executorContainer
expectedCreatedContainer.State = executor.StateCreated
Expect(createdContainer).To(Equal(expectedCreatedContainer))
})
It("emits to loggregator", func() {
logs := fakeLogSender.GetLogs()
Expect(logs).To(HaveLen(2))
emission := logs[0]
Expect(emission.AppId).To(Equal("log-guid"))
Expect(emission.SourceType).To(Equal("some-source-name"))
Expect(emission.SourceInstance).To(Equal("1"))
Expect(string(emission.Message)).To(Equal("Creating container"))
Expect(emission.MessageType).To(Equal("OUT"))
emission = logs[1]
Expect(emission.AppId).To(Equal("log-guid"))
Expect(emission.SourceType).To(Equal("some-source-name"))
Expect(emission.SourceInstance).To(Equal("1"))
Expect(string(emission.Message)).To(Equal("Successfully created container"))
Expect(emission.MessageType).To(Equal("OUT"))
})
Describe("the exchanged Garden container", func() {
It("creates it with the state as 'created'", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties[gardenstore.ContainerStateProperty]).To(Equal(string(executor.StateCreated)))
})
It("creates it with the owner property", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties[gardenstore.ContainerOwnerProperty]).To(Equal(ownerName))
})
It("creates it with the guid as the handle", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Handle).To(Equal("some-guid"))
})
Context("when the executorContainer is Privileged", func() {
BeforeEach(func() {
executorContainer.Privileged = true
})
It("creates a privileged garden container spec", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Privileged).To(BeTrue())
})
})
Context("when the executorContainer is not Privileged", func() {
BeforeEach(func() {
executorContainer.Privileged = false
})
It("creates a privileged garden container spec", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Privileged).To(BeFalse())
})
})
Context("when the Executor container has container-wide env", func() {
BeforeEach(func() {
executorContainer.Env = []executor.EnvironmentVariable{
{Name: "GLOBAL1", Value: "VALUE1"},
{Name: "GLOBAL2", Value: "VALUE2"},
}
})
It("creates the container with the env", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Env).To(Equal([]string{"GLOBAL1=VALUE1", "GLOBAL2=VALUE2"}))
})
})
Context("when the Executor container has a rootfs", func() {
BeforeEach(func() {
executorContainer.RootFSPath = "focker:///some-rootfs"
})
It("creates it with the rootfs", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.RootFSPath).To(Equal("focker:///some-rootfs"))
})
})
Context("when the Executor container an allocated at time", func() {
BeforeEach(func() {
executorContainer.AllocatedAt = 123456789
})
It("creates it with the executor:allocated-at property", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["executor:allocated-at"]).To(Equal("123456789"))
})
})
Context("when the Executor container has a rootfs", func() {
BeforeEach(func() {
executorContainer.RootFSPath = "some/root/path"
})
It("creates it with the executor:rootfs property", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["executor:rootfs"]).To(Equal("some/root/path"))
})
})
Context("when the Executor container has log", func() {
index := 1
log := executor.LogConfig{
Guid: "my-guid",
SourceName: "source-name",
Index: index,
}
BeforeEach(func() {
executorContainer.LogConfig = log
})
It("creates it with the executor:log-config property", func() {
payload, err := json.Marshal(log)
Expect(err).NotTo(HaveOccurred())
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["executor:log-config"]).To(MatchJSON(payload))
})
})
Context("when the Executor container has metrics config", func() {
index := 1
metricsConfig := executor.MetricsConfig{
Guid: "my-guid",
Index: index,
}
BeforeEach(func() {
executorContainer.MetricsConfig = metricsConfig
})
It("creates it with the executor:metrics-config property", func() {
payload, err := json.Marshal(metricsConfig)
Expect(err).NotTo(HaveOccurred())
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["executor:metrics-config"]).To(MatchJSON(payload))
})
})
Context("when the Executor container has a run result", func() {
runResult := executor.ContainerRunResult{
Failed: true,
FailureReason: "because",
}
BeforeEach(func() {
executorContainer.RunResult = runResult
})
It("creates it with the executor:result property", func() {
payload, err := json.Marshal(runResult)
Expect(err).NotTo(HaveOccurred())
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["executor:result"]).To(MatchJSON(payload))
})
})
})
Context("when the Executor container has tags", func() {
BeforeEach(func() {
executorContainer.Tags = executor.Tags{
"tag-one": "one",
"tag-two": "two",
}
})
It("creates it with the tag properties", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["tag:tag-one"]).To(Equal("one"))
Expect(containerSpec.Properties["tag:tag-two"]).To(Equal("two"))
})
})
Context("when the Executor container has mapped ports", func() {
BeforeEach(func() {
executorContainer.Ports = []executor.PortMapping{
{HostPort: 1234, ContainerPort: 5678},
{HostPort: 4321, ContainerPort: 8765},
}
})
It("creates it with the tag properties", func() {
Expect(fakeGardenContainer.NetInCallCount()).To(Equal(2))
hostPort, containerPort := fakeGardenContainer.NetInArgsForCall(0)
Expect(hostPort).To(Equal(uint32(1234)))
Expect(containerPort).To(Equal(uint32(5678)))
hostPort, containerPort = fakeGardenContainer.NetInArgsForCall(1)
Expect(hostPort).To(Equal(uint32(4321)))
Expect(containerPort).To(Equal(uint32(8765)))
})
Context("when mapping ports fails", func() {
disaster := errors.New("oh no!")
BeforeEach(func() {
fakeGardenContainer.NetInReturns(0, 0, disaster)
})
It("returns the error", func() {
Expect(createErr).To(Equal(disaster))
})
It("deletes the container from Garden", func() {
Expect(fakeGardenClient.DestroyCallCount()).To(Equal(1))
Expect(fakeGardenClient.DestroyArgsForCall(0)).To(Equal("some-guid"))
})
})
Context("when mapping ports succeeds", func() {
BeforeEach(func() {
fakeGardenContainer.NetInStub = func(hostPort, containerPort uint32) (uint32, uint32, error) {
return hostPort + 1, containerPort + 1, nil
}
})
It("updates the port mappings on the returned container with what was actually mapped", func() {
Expect(createdContainer.Ports).To(Equal([]executor.PortMapping{
{HostPort: 1235, ContainerPort: 5679},
{HostPort: 4322, ContainerPort: 8766},
}))
})
})
})
Context("when the Executor container has egress rules", func() {
var rules []*models.SecurityGroupRule
BeforeEach(func() {
rules = []*models.SecurityGroupRule{
{
Protocol: "udp",
Destinations: []string{"0.0.0.0/0"},
PortRange: &models.PortRange{
Start: 1,
End: 1024,
},
},
{
Protocol: "tcp",
Destinations: []string{"1.2.3.4-2.3.4.5"},
Ports: []uint32{80, 443},
Log: true,
},
{
Protocol: "icmp",
Destinations: []string{"1.2.3.4"},
IcmpInfo: &models.ICMPInfo{Type: 1, Code: 2},
},
{
Protocol: "all",
Destinations: []string{"9.8.7.6", "8.7.6.5"},
Log: true,
},
}
executorContainer.EgressRules = rules
})
Context("when setting egress rules", func() {
It("creates it with the egress rules", func() {
Expect(createErr).NotTo(HaveOccurred())
})
It("updates egress rules on returned container", func() {
Expect(fakeGardenContainer.NetOutCallCount()).To(Equal(4))
_, expectedNet, err := net.ParseCIDR("0.0.0.0/0")
Expect(err).NotTo(HaveOccurred())
rule := fakeGardenContainer.NetOutArgsForCall(0)
Expect(rule.Protocol).To(Equal(garden.ProtocolUDP))
Expect(rule.Networks).To(Equal([]garden.IPRange{garden.IPRangeFromIPNet(expectedNet)}))
Expect(rule.Ports).To(Equal([]garden.PortRange{{Start: 1, End: 1024}}))
Expect(rule.ICMPs).To(BeNil())
Expect(rule.Log).To(BeFalse())
rule = fakeGardenContainer.NetOutArgsForCall(1)
Expect(rule.Networks).To(Equal([]garden.IPRange{{
Start: net.ParseIP("1.2.3.4"),
End: net.ParseIP("2.3.4.5"),
}}))
Expect(rule.Ports).To(Equal([]garden.PortRange{
garden.PortRangeFromPort(80),
garden.PortRangeFromPort(443),
}))
Expect(rule.ICMPs).To(BeNil())
Expect(rule.Log).To(BeTrue())
rule = fakeGardenContainer.NetOutArgsForCall(2)
Expect(rule.Protocol).To(Equal(garden.ProtocolICMP))
Expect(rule.Networks).To(Equal([]garden.IPRange{
garden.IPRangeFromIP(net.ParseIP("1.2.3.4")),
}))
Expect(rule.Ports).To(BeEmpty())
Expect(*rule.ICMPs).To(Equal(garden.ICMPControl{
Type: garden.ICMPType(1),
Code: garden.ICMPControlCode(2),
}))
Expect(rule.Log).To(BeFalse())
rule = fakeGardenContainer.NetOutArgsForCall(3)
Expect(rule.Protocol).To(Equal(garden.ProtocolAll))
Expect(rule.Networks).To(Equal([]garden.IPRange{
garden.IPRangeFromIP(net.ParseIP("9.8.7.6")),
garden.IPRangeFromIP(net.ParseIP("8.7.6.5")),
}))
Expect(rule.Ports).To(BeEmpty())
Expect(rule.ICMPs).To(BeNil())
Expect(rule.Log).To(BeTrue())
})
})
Context("when security rule is invalid", func() {
BeforeEach(func() {
rules = []*models.SecurityGroupRule{
{
Protocol: "foo",
Destinations: []string{"0.0.0.0/0"},
PortRange: &models.PortRange{
Start: 1,
End: 1024,
},
},
}
executorContainer.EgressRules = rules
})
It("returns the error", func() {
Expect(createErr).To(HaveOccurred())
Expect(createErr).To(Equal(executor.ErrInvalidSecurityGroup))
})
})
Context("when setting egress rules fails", func() {
disaster := errors.New("NO SECURITY FOR YOU!!!")
BeforeEach(func() {
fakeGardenContainer.NetOutReturns(disaster)
})
It("returns the error", func() {
Expect(createErr).To(HaveOccurred())
})
It("deletes the container from Garden", func() {
Expect(fakeGardenClient.DestroyCallCount()).To(Equal(1))
Expect(fakeGardenClient.DestroyArgsForCall(0)).To(Equal("some-guid"))
})
})
})
Context("when a memory limit is set", func() {
BeforeEach(func() {
executorContainer.MemoryMB = 64
})
It("creates it with the memory limit", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Limits.Memory).To(Equal(garden.MemoryLimits{
LimitInBytes: 64 * 1024 * 1024,
}))
})
It("creates it with the executor:memory-mb property", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["executor:memory-mb"]).To(Equal("64"))
})
})
Context("when a disk limit is set", func() {
BeforeEach(func() {
executorContainer.DiskMB = 64
})
It("creates it the disk limit", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Limits.Disk).To(Equal(garden.DiskLimits{
ByteHard: 64 * 1024 * 1024,
InodeHard: inodeLimit,
Scope: garden.DiskLimitScopeExclusive,
}))
})
It("creates it with the executor:disk-mb property", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["executor:disk-mb"]).To(Equal("64"))
})
})
Context("when a cpu limit is set", func() {
BeforeEach(func() {
executorContainer.CPUWeight = 50
})
It("creates it with the CPU shares to the ratio of the max shares", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Limits.CPU).To(Equal(garden.CPULimits{
LimitInShares: 512,
}))
})
})
Context("when gardenContainer.Info succeeds", func() {
BeforeEach(func() {
fakeGardenContainer.InfoReturns(garden.ContainerInfo{
ExternalIP: "fake-ip",
}, nil)
})
It("sets the external IP on the returned container", func() {
Expect(createdContainer.ExternalIP).To(Equal("fake-ip"))
})
})
Context("when gardenContainer.Info fails", func() {
var gardenError = errors.New("garden error")
BeforeEach(func() {
fakeGardenContainer.InfoReturns(garden.ContainerInfo{}, gardenError)
})
It("propagates the error", func() {
Expect(createErr).To(Equal(gardenError))
})
It("deletes the container from Garden", func() {
Expect(fakeGardenClient.DestroyCallCount()).To(Equal(1))
Expect(fakeGardenClient.DestroyArgsForCall(0)).To(Equal(executorContainer.Guid))
})
})
})
Context("when creating the container fails", func() {
disaster := errors.New("oh no!")
BeforeEach(func() {
fakeGardenClient.CreateReturns(nil, disaster)
})
It("returns the error", func() {
Expect(createErr).To(Equal(disaster))
})
It("emits to loggregator", func() {
logs := fakeLogSender.GetLogs()
Expect(logs).To(HaveLen(2))
emission := logs[0]
Expect(emission.AppId).To(Equal("log-guid"))
Expect(emission.SourceType).To(Equal("some-source-name"))
Expect(emission.SourceInstance).To(Equal("1"))
Expect(string(emission.Message)).To(Equal("Creating container"))
Expect(emission.MessageType).To(Equal("OUT"))
emission = logs[1]
Expect(emission.AppId).To(Equal("log-guid"))
Expect(emission.SourceType).To(Equal("some-source-name"))
Expect(emission.SourceInstance).To(Equal("1"))
Expect(string(emission.Message)).To(Equal("Failed to create container"))
Expect(emission.MessageType).To(Equal("ERR"))
})
})
})
Describe("List", func() {
var (
fakeContainer1, fakeContainer2 *gfakes.FakeContainer
)
BeforeEach(func() {
fakeContainer1 = &gfakes.FakeContainer{
HandleStub: func() string {
return "fake-handle-1"
},
}
fakeContainer2 = &gfakes.FakeContainer{
HandleStub: func() string {
return "fake-handle-2"
},
}
fakeGardenClient.ContainersReturns([]garden.Container{
fakeContainer1,
fakeContainer2,
}, nil)
fakeGardenClient.BulkInfoReturns(
map[string]garden.ContainerInfoEntry{
"fake-handle-1": garden.ContainerInfoEntry{
Info: garden.ContainerInfo{
Properties: garden.Properties{
gardenstore.ContainerStateProperty: string(executor.StateCreated),
},
},
},
"fake-handle-2": garden.ContainerInfoEntry{
Info: garden.ContainerInfo{
Properties: garden.Properties{
gardenstore.ContainerStateProperty: string(executor.StateCreated),
},
},
},
}, nil)
})
It("returns an executor container for each container in garden", func() {
containers, err := gardenStore.List(logger, nil)
Expect(err).NotTo(HaveOccurred())
Expect(containers).To(HaveLen(2))
Expect([]string{containers[0].Guid, containers[1].Guid}).To(ConsistOf("fake-handle-1", "fake-handle-2"))
Expect(containers[0].State).To(Equal(executor.StateCreated))
Expect(containers[1].State).To(Equal(executor.StateCreated))
Expect(fakeGardenClient.BulkInfoCallCount()).To(Equal(1))
Expect(fakeGardenClient.BulkInfoArgsForCall(0)).To(ConsistOf("fake-handle-1", "fake-handle-2"))
})
It("only queries garden for the containers with the right owner", func() {
_, err := gardenStore.List(logger, nil)
Expect(err).NotTo(HaveOccurred())
Expect(fakeGardenClient.ContainersArgsForCall(0)).To(Equal(garden.Properties{
gardenstore.ContainerOwnerProperty: ownerName,
}))
})
Context("when tags are specified", func() {
It("filters by the tag properties", func() {
_, err := gardenStore.List(logger, executor.Tags{"a": "b", "c": "d"})
Expect(err).NotTo(HaveOccurred())
Expect(fakeGardenClient.ContainersArgsForCall(0)).To(Equal(garden.Properties{
gardenstore.ContainerOwnerProperty: ownerName,
"tag:a": "b",
"tag:c": "d",
}))
})
})
Context("when a container's info fails to fetch", func() {
BeforeEach(func() {
fakeGardenClient.BulkInfoReturns(
map[string]garden.ContainerInfoEntry{
"fake-handle-1": garden.ContainerInfoEntry{
Err: garden.NewError("oh no"),
},
"fake-handle-2": garden.ContainerInfoEntry{
Info: garden.ContainerInfo{
Properties: garden.Properties{
gardenstore.ContainerStateProperty: string(executor.StateCreated),
},
},
},
},
nil,
)
})
It("excludes it from the result set", func() {
containers, err := gardenStore.List(logger, nil)
Expect(err).NotTo(HaveOccurred())
Expect(containers).To(HaveLen(1))
Expect(containers[0].Guid).To(Equal("fake-handle-2"))
Expect(containers[0].State).To(Equal(executor.StateCreated))
})
})
})
Describe("Destroy", func() {
const destroySessionPrefix = "test.destroy."
const freeProcessSessionPrefix = destroySessionPrefix + "freeing-step-process."
var destroyErr error
JustBeforeEach(func() {
destroyErr = gardenStore.Destroy(logger, "the-guid")
})
It("doesn't return an error", func() {
Expect(destroyErr).NotTo(HaveOccurred())
})
It("destroys the container", func() {
Expect(fakeGardenClient.DestroyArgsForCall(0)).To(Equal("the-guid"))
})
It("logs its lifecycle", func() {
Expect(logger).To(gbytes.Say(destroySessionPrefix + "started"))
Expect(logger).To(gbytes.Say(freeProcessSessionPrefix + "started"))
Expect(logger).To(gbytes.Say(freeProcessSessionPrefix + "finished"))
Expect(logger).To(gbytes.Say(destroySessionPrefix + "succeeded"))
})
Context("when the Garden client fails to destroy the given container", func() {
var gardenDestroyErr = errors.New("destroy-err")
BeforeEach(func() {
fakeGardenClient.DestroyReturns(gardenDestroyErr)
})
It("returns the Garden error", func() {
Expect(destroyErr).To(Equal(gardenDestroyErr))
})
It("logs the error", func() {
Expect(logger).To(gbytes.Say(destroySessionPrefix + "failed-to-destroy-garden-container"))
})
})
Context("when the Garden client returns ContainerNotFoundError", func() {
BeforeEach(func() {
fakeGardenClient.DestroyReturns(garden.ContainerNotFoundError{
Handle: "some-handle",
})
})
It("doesn't return an error", func() {
Expect(destroyErr).NotTo(HaveOccurred())
})
})
})
Describe("GetFiles", func() {
Context("when the container exists", func() {
var (
container *gfakes.FakeContainer
fakeStream *gbytes.Buffer
)
BeforeEach(func() {
fakeStream = gbytes.BufferWithBytes([]byte("stuff"))
container = &gfakes.FakeContainer{}
container.StreamOutReturns(fakeStream, nil)
fakeGardenClient.LookupReturns(container, nil)
})
It("gets the files", func() {
stream, err := gardenStore.GetFiles(logger, "the-guid", "the-path")
Expect(err).NotTo(HaveOccurred())
Expect(container.StreamOutArgsForCall(0)).To(Equal(garden.StreamOutSpec{Path: "the-path", User: "root"}))
bytes, err := ioutil.ReadAll(stream)
Expect(err).NotTo(HaveOccurred())
Expect(string(bytes)).To(Equal("stuff"))
stream.Close()
Expect(fakeStream.Closed()).To(BeTrue())
})
})
Context("when the container doesn't exist", func() {
BeforeEach(func() {
fakeGardenClient.LookupReturns(nil, garden.ContainerNotFoundError{})
})
It("returns a container-not-found error", func() {
_, err := gardenStore.GetFiles(logger, "the-guid", "the-path")
Expect(err).To(Equal(executor.ErrContainerNotFound))
})
})
})
Describe("Ping", func() {
Context("when pinging succeeds", func() {
It("succeeds", func() {
err := gardenStore.Ping()
Expect(err).NotTo(HaveOccurred())
})
})
Context("when pinging fails", func() {
disaster := errors.New("welp")
BeforeEach(func() {
fakeGardenClient.PingReturns(disaster)
})
It("returns a container-not-found error", func() {
Expect(gardenStore.Ping()).To(Equal(disaster))
})
})
})
Describe("Run", func() {
const (
runSessionPrefix = "test.run."
stepSessionPrefix = runSessionPrefix + "run-step-process."
)
var (
processes map[string]*gfakes.FakeProcess
containerProperties map[string]string
orderInWhichPropertiesAreSet []string
gardenContainer *gfakes.FakeContainer
executorContainer executor.Container
err error
monitorReturns chan int
runReturns chan int
runAction *models.RunAction
monitorAction *models.RunAction
mutex sync.Mutex
)
BeforeEach(func() {
runAction = &models.RunAction{User: "me", Path: "run"}
monitorAction = &models.RunAction{User: "me", Path: "monitor"}
mutex.Lock()
defer mutex.Unlock()
monitorReturns = make(chan int)
runReturns = make(chan int)
executorContainer = executor.Container{
Guid: "some-container-handle",
State: executor.StateInitializing,
RunInfo: executor.RunInfo{
Action: models.WrapAction(runAction),
Monitor: models.WrapAction(monitorAction),
StartTimeout: 3,
},
}
runSignalled := make(chan struct{})
monitorSignalled := make(chan struct{})
processes = make(map[string]*gfakes.FakeProcess)
processes["run"] = new(gfakes.FakeProcess)
processes["run"].WaitStub = func() (int, error) {
select {
case status := <-runReturns:
return status, nil
case <-runSignalled:
return 143, nil
}
}
processes["run"].SignalStub = func(garden.Signal) error {
close(runSignalled)
return nil
}
processes["monitor"] = new(gfakes.FakeProcess)
processes["monitor"].WaitStub = func() (int, error) {
select {
case status := <-monitorReturns:
return status, nil
case <-monitorSignalled:
return 143, nil
}
}
processes["monitor"].SignalStub = func(garden.Signal) error {
close(monitorSignalled)
return nil
}
containerProperties = make(map[string]string)
containerProperties[gardenstore.ContainerStateProperty] = string(executor.StateCreated)
orderInWhichPropertiesAreSet = []string{}
gardenContainer = new(gfakes.FakeContainer)
gardenContainer.HandleReturns("some-container-handle")
gardenContainer.SetPropertyStub = func(key, value string) error {
mutex.Lock()
containerProperties[key] = value
orderInWhichPropertiesAreSet = append(orderInWhichPropertiesAreSet, key)
mutex.Unlock()
return nil
}
gardenContainer.InfoStub = func() (garden.ContainerInfo, error) {
mutex.Lock()
defer mutex.Unlock()
props := map[string]string{}
for k, v := range containerProperties {
props[k] = v
}
return garden.ContainerInfo{
Properties: props,
}, nil
}
gardenContainer.RunStub = func(processSpec garden.ProcessSpec, _ garden.ProcessIO) (garden.Process, error) {
mutex.Lock()
defer mutex.Unlock()
return processes[processSpec.Path], nil
}
fakeGardenClient.LookupReturns(gardenContainer, nil)
fakeGardenClient.CreateReturns(gardenContainer, nil)
})
AfterEach(func() {
close(monitorReturns)
close(runReturns)
gardenStore.Stop(logger, "some-container-handle")
gardenStore.Destroy(logger, "some-container-handle")
})
containerStateGetter := func() string {
mutex.Lock()
defer mutex.Unlock()
return containerProperties[gardenstore.ContainerStateProperty]
}
containerResult := func() executor.ContainerRunResult {
mutex.Lock()
defer mutex.Unlock()
resultJSON := containerProperties[gardenstore.ContainerResultProperty]
result := executor.ContainerRunResult{}
err := json.Unmarshal([]byte(resultJSON), &result)
Expect(err).NotTo(HaveOccurred())
return result
}
Context("when the garden container lookup fails", func() {
JustBeforeEach(func() {
executorContainer, err = gardenStore.Create(logger, executorContainer)
Expect(err).NotTo(HaveOccurred())
gardenStore.Run(logger, executorContainer)
})
Context("when the lookup fails because the container is not found", func() {
BeforeEach(func() {
fakeGardenClient.LookupReturns(gardenContainer, garden.ContainerNotFoundError{"some-container-handle"})
})
It("logs that the container was not found", func() {
Expect(logger).To(gbytes.Say(runSessionPrefix + "lookup-failed"))
Expect(logger).To(gbytes.Say("some-container-handle"))
})
It("does not run the container", func() {
Consistently(gardenContainer.RunCallCount).Should(Equal(0))
})
})
Context("when the lookup fails for some other reason", func() {
BeforeEach(func() {
fakeGardenClient.LookupReturns(gardenContainer, errors.New("whoops"))
})
It("logs the error", func() {
Expect(logger).To(gbytes.Say(runSessionPrefix + "lookup-failed"))
})
It("does not run the container", func() {
Consistently(gardenContainer.RunCallCount).Should(Equal(0))
})
})
})
Context("when there is no monitor action", func() {
BeforeEach(func() {
executorContainer.Monitor = nil
executorContainer, err = gardenStore.Create(logger, executorContainer)
Expect(err).NotTo(HaveOccurred())
err = gardenStore.Run(logger, executorContainer)
Expect(err).NotTo(HaveOccurred())
})
It("transitions to running as soon as it starts running", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateRunning))
Eventually(emitter.EmitCallCount).Should(Equal(1))
Expect(emitter.EmitArgsForCall(0).EventType()).To(Equal(executor.EventTypeContainerRunning))
})
Context("when the running action exits succesfully", func() {
BeforeEach(func() {
//wait for the run event to have gone through
Eventually(emitter.EmitCallCount).Should(Equal(1))
runReturns <- 0
})
It("transitions to complete and succeeded", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(2))
Expect(emitter.EmitArgsForCall(1).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Failed).To(BeFalse())
})
It("logs the successful exit and the transition to complete", func() {
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "step-finished-normally"))
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "transitioning-to-complete"))
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "succeeded-transitioning-to-complete"))
})
})
Context("when the running action exits unsuccesfully", func() {
BeforeEach(func() {
//wait for the run event to have gone through
Eventually(emitter.EmitCallCount).Should(Equal(1))
runReturns <- 1
})
It("transitions to complete and failed", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(2))
Expect(emitter.EmitArgsForCall(1).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Failed).To(BeTrue())
Expect(containerResult().FailureReason).To(ContainSubstring("Exited with status 1"))
})
It("logs the unsuccessful exit", func() {
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "step-finished-with-error"))
})
})
})
Context("when there is a monitor action", func() {
JustBeforeEach(func() {
executorContainer, err = gardenStore.Create(logger, executorContainer)
Expect(err).NotTo(HaveOccurred())
err = gardenStore.Run(logger, executorContainer)
Expect(err).NotTo(HaveOccurred())
Eventually(clock.WatcherCount).Should(Equal(1))
})
Context("when the monitor action succeeds", func() {
JustBeforeEach(func() {
clock.Increment(time.Second)
monitorReturns <- 0
})
It("marks the container as running and emits an event", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateRunning))
Eventually(emitter.EmitCallCount).Should(Equal(1))
Expect(emitter.EmitArgsForCall(0).EventType()).To(Equal(executor.EventTypeContainerRunning))
})
It("logs the run session lifecycle", func() {
Expect(logger).To(gbytes.Say(runSessionPrefix + "started"))
Expect(logger).To(gbytes.Say(runSessionPrefix + "found-garden-container"))
Expect(logger).To(gbytes.Say(runSessionPrefix + "stored-step-process"))
Expect(logger).To(gbytes.Say(runSessionPrefix + "finished"))
})
It("logs that the step process started and transitioned to running", func() {
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "started"))
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "transitioning-to-running"))
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "succeeded-transitioning-to-running"))
})
Context("when the monitor action subsequently fails", func() {
JustBeforeEach(func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateRunning))
clock.Increment(time.Second)
monitorReturns <- 1
})
It("marks the container completed", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(2))
Expect(emitter.EmitArgsForCall(1).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Failed).To(BeTrue())
})
})
Context("when Stop is called", func() {
const stopSessionPrefix = "test.stop."
var stopped chan struct{}
BeforeEach(func() {
stopped = make(chan struct{})
})
JustBeforeEach(func() {
go func() {
stopped := stopped
gardenStore.Stop(logger, executorContainer.Guid)
close(stopped)
}()
})
It("logs that the step process was signaled and then finished, and was freed", func() {
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "signaled"))
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "finished"))
})
It("logs that the step process was freed", func() {
freeSessionPrefix := stopSessionPrefix + "freeing-step-process."
Eventually(logger).Should(gbytes.Say(stopSessionPrefix + "started"))
Eventually(logger).Should(gbytes.Say(freeSessionPrefix + "started"))
Eventually(logger).Should(gbytes.Say(freeSessionPrefix + "interrupting-process"))
Eventually(logger).Should(gbytes.Say(freeSessionPrefix + "finished"))
Eventually(logger).Should(gbytes.Say(stopSessionPrefix + "finished"))
})
It("completes without failure", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(2))
Expect(emitter.EmitArgsForCall(1).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Failed).To(BeFalse())
})
It("reports in the result that it was stopped", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(2))
Expect(emitter.EmitArgsForCall(1).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Stopped).To(BeTrue())
})
Context("when the step takes a while to complete", func() {
var exited chan int
BeforeEach(func() {
exited = make(chan int, 1)
processes["run"].WaitStub = func() (int, error) {
return <-exited, nil
}
})
It("waits", func() {
Consistently(stopped).ShouldNot(BeClosed())
exited <- 1
Eventually(stopped).ShouldNot(BeClosed())
})
})
})
Context("when Destroy is called", func() {
const destroySessionPrefix = "test.destroy."
var destroyed chan struct{}
BeforeEach(func() {
destroyed = make(chan struct{})
})
JustBeforeEach(func() {
go func() {
destroyed := destroyed
gardenStore.Destroy(logger, executorContainer.Guid)
close(destroyed)
}()
})
AfterEach(func() {
Eventually(destroyed).Should(BeClosed())
})
It("logs that the step process was signaled and then finished, and was freed", func() {
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "signaled"))
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "finished"))
})
It("logs that the step process was freed", func() {
freeSessionPrefix := destroySessionPrefix + "freeing-step-process."
Eventually(logger).Should(gbytes.Say(destroySessionPrefix + "started"))
Eventually(logger).Should(gbytes.Say(freeSessionPrefix + "started"))
Eventually(logger).Should(gbytes.Say(freeSessionPrefix + "interrupting-process"))
Eventually(logger).Should(gbytes.Say(freeSessionPrefix + "finished"))
Eventually(logger).Should(gbytes.Say(destroySessionPrefix + "succeeded"))
})
It("completes without failure", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(2))
Expect(emitter.EmitArgsForCall(1).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Failed).To(BeFalse())
})
It("reports in the result that it was stopped", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(2))
Expect(emitter.EmitArgsForCall(1).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Stopped).To(BeTrue())
})
})
})
Context("when monitor persistently fails", func() {
JustBeforeEach(func() {
clock.Increment(time.Second)
monitorReturns <- 1
})
It("doesn't transition to running", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCreated))
Eventually(emitter.EmitCallCount).Should(Equal(0))
})
Context("when the time to start elapses", func() {
JustBeforeEach(func() {
By("ticking out to 3 seconds (note we had just ticked once)")
for i := 0; i < 3; i++ {
//ugh, got to wait until the timer is being read from before we increment time
time.Sleep(10 * time.Millisecond)
clock.Increment(time.Second)
monitorReturns <- 1
}
})
It("transitions to completed and failed", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(1))
Expect(emitter.EmitArgsForCall(0).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Failed).To(BeTrue())
})
})
})
})
Context("when marking the task as complete", func() {
BeforeEach(func() {
executorContainer, err = gardenStore.Create(logger, executorContainer)
Expect(err).NotTo(HaveOccurred())
err = gardenStore.Run(logger, executorContainer)
Expect(err).NotTo(HaveOccurred())
Eventually(clock.WatcherCount).Should(Equal(1))
clock.Increment(time.Second)
monitorReturns <- 0
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateRunning))
clock.Increment(time.Second)
monitorReturns <- 1
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
})
It("always sets the failure result first, and then the state so that things polling on sate will see the result", func() {
mutex.Lock()
defer mutex.Unlock()
n := len(orderInWhichPropertiesAreSet)
Expect(n).To(BeNumerically(">", 2))
Expect(orderInWhichPropertiesAreSet[n-2]).To(Equal(gardenstore.ContainerResultProperty))
Expect(orderInWhichPropertiesAreSet[n-1]).To(Equal(gardenstore.ContainerStateProperty))
})
})
})
Describe("Stop", func() {
Context("when the garden container is not found in the local store", func() {
var stopErr error
JustBeforeEach(func() {
stopErr = gardenStore.Stop(logger, "an-unknown-guid")
})
It("tries to destroy the real garden container", func() {
Expect(fakeGardenClient.DestroyCallCount()).To(Equal(1))
Expect(fakeGardenClient.DestroyArgsForCall(0)).To(Equal("an-unknown-guid"))
})
It("fails with container not found", func() {
Expect(stopErr).To(Equal(executor.ErrContainerNotFound))
})
Context("when destroying the garden container fails", func() {
Context("with a container not found", func() {
BeforeEach(func() {
fakeGardenClient.DestroyReturns(garden.ContainerNotFoundError{})
})
It("fails with executor's container not found error", func() {
Expect(stopErr).To(Equal(executor.ErrContainerNotFound))
})
})
Context("with any other error", func() {
var expectedError = errors.New("woops")
BeforeEach(func() {
fakeGardenClient.DestroyReturns(expectedError)
})
It("fails with the original error", func() {
Expect(stopErr).To(Equal(expectedError))
})
})
})
})
})
Describe("Metrics", func() {
var (
metrics map[string]executor.ContainerMetrics
metricsErr error
)
JustBeforeEach(func() {
metrics, metricsErr = gardenStore.Metrics(logger, []string{"some-container-handle"})
})
BeforeEach(func() {
containerMetrics := garden.Metrics{
MemoryStat: garden.ContainerMemoryStat{
TotalRss: 100, // ignored
TotalCache: 12, // ignored
TotalInactiveFile: 1, // ignored
TotalUsageTowardLimit: 987,
},
DiskStat: garden.ContainerDiskStat{
ExclusiveBytesUsed: 222,
ExclusiveInodesUsed: 333,
},
CPUStat: garden.ContainerCPUStat{
Usage: 123,
User: 456, // ignored
System: 789, // ignored
},
}
fakeGardenClient.BulkMetricsReturns(map[string]garden.ContainerMetricsEntry{
"some-container-handle": garden.ContainerMetricsEntry{
Metrics: containerMetrics,
Err: nil,
},
}, nil)
})
It("does not error", func() {
Expect(metricsErr).NotTo(HaveOccurred())
})
It("gets metrics from garden", func() {
Expect(fakeGardenClient.BulkMetricsCallCount()).To(Equal(1))
Expect(metrics).To(HaveLen(1))
Expect(metrics["some-container-handle"]).To(Equal(executor.ContainerMetrics{
MemoryUsageInBytes: 987,
DiskUsageInBytes: 222,
TimeSpentInCPU: 123,
}))
})
Context("when a container metric entry has an error", func() {
BeforeEach(func() {
fakeGardenClient.BulkMetricsReturns(map[string]garden.ContainerMetricsEntry{
"some-container-handle": garden.ContainerMetricsEntry{
Err: garden.NewError("oh no"),
},
}, nil)
})
It("does not error", func() {
Expect(metricsErr).NotTo(HaveOccurred())
})
It("ignores any container with errors", func() {
Expect(fakeGardenClient.BulkMetricsCallCount()).To(Equal(1))
Expect(metrics).To(HaveLen(0))
})
})
Context("when a bulk metrics returns an error", func() {
BeforeEach(func() {
fakeGardenClient.BulkMetricsReturns(nil, errors.New("oh no"))
})
It("does not error", func() {
Expect(metricsErr).To(HaveOccurred())
})
})
})
Describe("Transitions", func() {
var executorContainer executor.Container
BeforeEach(func() {
executorContainer = executor.Container{
Guid: "some-container-handle",
RunInfo: executor.RunInfo{
Action: models.WrapAction(action),
Monitor: models.WrapAction(action),
},
}
gardenContainer := new(gfakes.FakeContainer)
gardenContainer.RunReturns(new(gfakes.FakeProcess), nil)
fakeGardenClient.LookupReturns(gardenContainer, nil)
fakeGardenClient.CreateReturns(gardenContainer, nil)
})
expectations := []gardenStoreTransitionExpectation{
{to: "create", from: "non-existent", assertError: "occurs"},
{to: "create", from: "reserved", assertError: "occurs"},
{to: "create", from: "initializing", assertError: "does not occur"},
{to: "create", from: "created", assertError: "occurs"},
{to: "create", from: "running", assertError: "occurs"},
{to: "create", from: "completed", assertError: "occurs"},
{to: "run", from: "non-existent", assertError: "occurs"},
{to: "run", from: "reserved", assertError: "occurs"},
{to: "run", from: "initializing", assertError: "occurs"},
{to: "run", from: "created", assertError: "does not occur"},
{to: "run", from: "running", assertError: "occurs"},
{to: "run", from: "completed", assertError: "occurs"},
}
for _, expectation := range expectations {
expectation := expectation
It("error "+expectation.assertError+" when transitioning from "+expectation.from+" to "+expectation.to, func() {
expectation.driveFromState(&executorContainer)
err := expectation.transitionToState(gardenStore, executorContainer)
expectation.checkErrorResult(err)
})
}
})
})
type gardenStoreTransitionExpectation struct {
from string
to string
assertError string
}
func (expectation gardenStoreTransitionExpectation) driveFromState(container *executor.Container) {
switch expectation.from {
case "non-existent":
case "reserved":
container.State = executor.StateReserved
case "initializing":
container.State = executor.StateInitializing
case "created":
container.State = executor.StateCreated
case "running":
container.State = executor.StateRunning
case "completed":
container.State = executor.StateCompleted
default:
Fail("unknown 'from' state: " + expectation.from)
}
}
func (expectation gardenStoreTransitionExpectation) transitionToState(gardenStore *gardenstore.GardenStore, container executor.Container) error {
switch expectation.to {
case "create":
_, err := gardenStore.Create(lagertest.NewTestLogger("test"), container)
return err
case "run":
return gardenStore.Run(lagertest.NewTestLogger("test"), container)
default:
Fail("unknown 'to' state: " + expectation.to)
return nil
}
}
func (expectation gardenStoreTransitionExpectation) checkErrorResult(err error) {
switch expectation.assertError {
case "occurs":
Expect(err).To(HaveOccurred())
case "does not occur":
Expect(err).NotTo(HaveOccurred())
default:
Fail("unknown 'assertErr' expectation: " + expectation.assertError)
}
}
Go Format.
Signed-off-by: Kris Hicks <6d3f751023bc04266d4da2ff94804f4240839fe2@pivotal.io>
package gardenstore_test
import (
"encoding/json"
"errors"
"io/ioutil"
"net"
"os"
"sync"
"time"
"github.com/cloudfoundry-incubator/executor"
"github.com/cloudfoundry-incubator/bbs/models"
"github.com/cloudfoundry-incubator/executor/depot/gardenstore"
"github.com/cloudfoundry-incubator/executor/depot/gardenstore/fakes"
"github.com/cloudfoundry-incubator/executor/depot/transformer"
"github.com/cloudfoundry-incubator/garden"
gfakes "github.com/cloudfoundry-incubator/garden/fakes"
"github.com/cloudfoundry/dropsonde/log_sender/fake"
"github.com/cloudfoundry/dropsonde/logs"
"github.com/pivotal-golang/clock/fakeclock"
"github.com/pivotal-golang/lager/lagertest"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
)
var _ = Describe("GardenContainerStore", func() {
var (
fakeGardenClient *gfakes.FakeClient
ownerName = "some-owner-name"
maxCPUShares uint64 = 1024
inodeLimit uint64 = 2000000
clock *fakeclock.FakeClock
emitter *fakes.FakeEventEmitter
fakeLogSender *fake.FakeLogSender
logger *lagertest.TestLogger
gardenStore *gardenstore.GardenStore
)
action := &models.RunAction{
Path: "true",
}
BeforeEach(func() {
fakeGardenClient = new(gfakes.FakeClient)
clock = fakeclock.NewFakeClock(time.Now())
emitter = new(fakes.FakeEventEmitter)
fakeLogSender = fake.NewFakeLogSender()
logs.Initialize(fakeLogSender)
logger = lagertest.NewTestLogger("test")
var err error
gardenStore, err = gardenstore.NewGardenStore(
fakeGardenClient,
ownerName,
maxCPUShares,
inodeLimit,
100*time.Millisecond,
100*time.Millisecond,
transformer.NewTransformer(nil, nil, nil, nil, nil, nil, os.TempDir(), false, clock),
clock,
emitter,
100,
)
Expect(err).NotTo(HaveOccurred())
})
Describe("Lookup", func() {
var (
executorContainer executor.Container
lookupErr error
)
JustBeforeEach(func() {
executorContainer, lookupErr = gardenStore.Lookup(logger, "some-container-handle")
})
Context("when the container doesn't exist", func() {
BeforeEach(func() {
fakeGardenClient.LookupReturns(nil, garden.ContainerNotFoundError{})
})
It("returns a container-not-found error", func() {
Expect(lookupErr).To(Equal(executor.ErrContainerNotFound))
})
})
Context("when lookup fails", func() {
BeforeEach(func() {
fakeGardenClient.LookupReturns(nil, errors.New("didn't find it"))
})
It("returns the error", func() {
Expect(lookupErr).To(MatchError(Equal("didn't find it")))
})
})
Context("when the container exists", func() {
var gardenContainer *gfakes.FakeContainer
BeforeEach(func() {
gardenContainer = new(gfakes.FakeContainer)
gardenContainer.HandleReturns("some-container-handle")
fakeGardenClient.LookupReturns(gardenContainer, nil)
})
It("does not error", func() {
Expect(lookupErr).NotTo(HaveOccurred())
})
It("has the Garden container handle as its container guid", func() {
Expect(executorContainer.Guid).To(Equal("some-container-handle"))
})
It("looked up by the given guid", func() {
Expect(fakeGardenClient.LookupArgsForCall(0)).To(Equal("some-container-handle"))
})
Context("when the container has an executor:state property", func() {
Context("and it's Reserved", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:state": string(executor.StateReserved),
},
}, nil)
})
It("has it as its state", func() {
Expect(executorContainer.State).To(Equal(executor.StateReserved))
})
})
Context("and it's Initializing", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:state": string(executor.StateInitializing),
},
}, nil)
})
It("has it as its state", func() {
Expect(executorContainer.State).To(Equal(executor.StateInitializing))
})
})
Context("and it's Created", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:state": string(executor.StateCreated),
},
}, nil)
})
It("has it as its state", func() {
Expect(executorContainer.State).To(Equal(executor.StateCreated))
})
})
Context("and it's Running", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:state": string(executor.StateRunning),
},
}, nil)
})
It("has it as its state", func() {
Expect(executorContainer.State).To(Equal(executor.StateRunning))
})
})
Context("and it's Completed", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:state": string(executor.StateCompleted),
},
}, nil)
})
It("has it as its state", func() {
Expect(executorContainer.State).To(Equal(executor.StateCompleted))
})
})
Context("when it's some other state", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:state": "bogus-state",
},
}, nil)
})
It("returns an InvalidStateError", func() {
Expect(lookupErr).To(Equal(gardenstore.InvalidStateError{"bogus-state"}))
})
})
})
Context("when the container has an executor:allocated-at property", func() {
Context("when it's a valid integer", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:allocated-at": "123",
},
}, nil)
})
It("has it as its allocated at value", func() {
Expect(executorContainer.AllocatedAt).To(Equal(int64(123)))
})
})
Context("when it's a bogus value", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:allocated-at": "some-bogus-timestamp",
},
}, nil)
})
It("returns a MalformedPropertyError", func() {
Expect(lookupErr).To(Equal(gardenstore.MalformedPropertyError{
Property: "executor:allocated-at",
Value: "some-bogus-timestamp",
}))
})
})
})
Context("when the container has an executor:memory-mb property", func() {
Context("when it's a valid integer", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:memory-mb": "1024",
},
}, nil)
})
It("has it as its rootfs path", func() {
Expect(executorContainer.MemoryMB).To(Equal(1024))
})
})
Context("when it's a bogus value", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:memory-mb": "some-bogus-integer",
},
}, nil)
})
It("returns a MalformedPropertyError", func() {
Expect(lookupErr).To(Equal(gardenstore.MalformedPropertyError{
Property: "executor:memory-mb",
Value: "some-bogus-integer",
}))
})
})
})
Context("when the container has an executor:disk-mb property", func() {
Context("when it's a valid integer", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:disk-mb": "2048",
},
}, nil)
})
It("has it as its disk reservation", func() {
Expect(executorContainer.DiskMB).To(Equal(2048))
})
})
Context("when it's a bogus value", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:disk-mb": "some-bogus-integer",
},
}, nil)
})
It("returns a MalformedPropertyError", func() {
Expect(lookupErr).To(Equal(gardenstore.MalformedPropertyError{
Property: "executor:disk-mb",
Value: "some-bogus-integer",
}))
})
})
})
Context("when the container has an executor:cpu-weight", func() {
Context("when it's a valid integer", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:cpu-weight": "99",
},
}, nil)
})
It("has it as its cpu weight", func() {
Expect(executorContainer.CPUWeight).To(Equal(uint(99)))
})
})
Context("when it's a bogus value", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:cpu-weight": "some-bogus-integer",
},
}, nil)
})
It("returns a MalformedPropertyError", func() {
Expect(lookupErr).To(Equal(gardenstore.MalformedPropertyError{
Property: "executor:cpu-weight",
Value: "some-bogus-integer",
}))
})
})
})
Context("when the Garden container has tags", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"tag:a": "a-value",
"tag:b": "b-value",
"executor:x": "excluded-value",
"x": "another-excluded-value",
},
}, nil)
})
It("has the tags", func() {
Expect(executorContainer.Tags).To(Equal(executor.Tags{
"a": "a-value",
"b": "b-value",
}))
})
})
Context("when the Garden container has mapped ports", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
MappedPorts: []garden.PortMapping{
{HostPort: 1234, ContainerPort: 5678},
{HostPort: 4321, ContainerPort: 8765},
},
}, nil)
})
It("has the ports", func() {
Expect(executorContainer.Ports).To(Equal([]executor.PortMapping{
{HostPort: 1234, ContainerPort: 5678},
{HostPort: 4321, ContainerPort: 8765},
}))
})
})
Context("when the Garden container has an external IP", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
ExternalIP: "1.2.3.4",
}, nil)
})
It("has the ports", func() {
Expect(executorContainer.ExternalIP).To(Equal("1.2.3.4"))
})
})
Context("when the Garden container has a log config", func() {
Context("and the log is valid", func() {
index := 1
log := executor.LogConfig{
Guid: "my-guid",
SourceName: "source-name",
Index: index,
}
BeforeEach(func() {
payload, err := json.Marshal(log)
Expect(err).NotTo(HaveOccurred())
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:log-config": string(payload),
},
}, nil)
})
It("has it as its log", func() {
Expect(executorContainer.LogConfig).To(Equal(log))
})
})
Context("and the log is invalid", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:log-config": "ß",
},
}, nil)
})
It("returns an InvalidJSONError", func() {
Expect(lookupErr).To(HaveOccurred())
Expect(lookupErr.Error()).To(ContainSubstring("executor:log-config"))
Expect(lookupErr.Error()).To(ContainSubstring("ß"))
Expect(lookupErr.Error()).To(ContainSubstring("invalid character"))
})
})
})
Context("when the Garden container has a metrics config", func() {
Context("and the metrics config is valid", func() {
index := 1
metricsConfig := executor.MetricsConfig{
Guid: "my-guid",
Index: index,
}
BeforeEach(func() {
payload, err := json.Marshal(metricsConfig)
Expect(err).NotTo(HaveOccurred())
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:metrics-config": string(payload),
},
}, nil)
})
It("has it as its metrics config", func() {
Expect(executorContainer.MetricsConfig).To(Equal(metricsConfig))
})
})
Context("and the metrics config is invalid", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:metrics-config": "ß",
},
}, nil)
})
It("returns an InvalidJSONError", func() {
Expect(lookupErr).To(HaveOccurred())
Expect(lookupErr.Error()).To(ContainSubstring("executor:metrics-config"))
Expect(lookupErr.Error()).To(ContainSubstring("ß"))
Expect(lookupErr.Error()).To(ContainSubstring("invalid character"))
})
})
})
Context("when the Garden container has a run result", func() {
Context("and the run result is valid", func() {
runResult := executor.ContainerRunResult{
Failed: true,
FailureReason: "because",
}
BeforeEach(func() {
payload, err := json.Marshal(runResult)
Expect(err).NotTo(HaveOccurred())
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:result": string(payload),
},
}, nil)
})
It("has its run result", func() {
Expect(executorContainer.RunResult).To(Equal(runResult))
})
})
Context("and the run result is invalid", func() {
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{
Properties: garden.Properties{
"executor:result": "ß",
},
}, nil)
})
It("returns an InvalidJSONError", func() {
Expect(lookupErr).To(HaveOccurred())
Expect(lookupErr.Error()).To(ContainSubstring("executor:result"))
Expect(lookupErr.Error()).To(ContainSubstring("ß"))
Expect(lookupErr.Error()).To(ContainSubstring("invalid character"))
})
})
})
Context("when getting the info from Garden fails", func() {
disaster := errors.New("oh no!")
BeforeEach(func() {
gardenContainer.InfoReturns(garden.ContainerInfo{}, disaster)
})
It("returns the error", func() {
Expect(lookupErr).To(Equal(disaster))
})
})
})
})
Describe("Create", func() {
var (
executorContainer executor.Container
fakeGardenContainer *gfakes.FakeContainer
createdContainer executor.Container
createErr error
)
action := &models.RunAction{
User: "me",
Path: "ls",
}
BeforeEach(func() {
executorContainer = executor.Container{
Guid: "some-guid",
State: executor.StateInitializing,
RunInfo: executor.RunInfo{
Action: models.WrapAction(action),
LogConfig: executor.LogConfig{
Guid: "log-guid",
SourceName: "some-source-name",
Index: 1,
},
},
}
fakeGardenContainer = new(gfakes.FakeContainer)
fakeGardenContainer.HandleReturns("some-guid")
})
JustBeforeEach(func() {
createdContainer, createErr = gardenStore.Create(logger, executorContainer)
})
Context("when creating the container succeeds", func() {
BeforeEach(func() {
fakeGardenClient.CreateReturns(fakeGardenContainer, nil)
})
It("does not error", func() {
Expect(createErr).NotTo(HaveOccurred())
})
It("returns a created container", func() {
expectedCreatedContainer := executorContainer
expectedCreatedContainer.State = executor.StateCreated
Expect(createdContainer).To(Equal(expectedCreatedContainer))
})
It("emits to loggregator", func() {
logs := fakeLogSender.GetLogs()
Expect(logs).To(HaveLen(2))
emission := logs[0]
Expect(emission.AppId).To(Equal("log-guid"))
Expect(emission.SourceType).To(Equal("some-source-name"))
Expect(emission.SourceInstance).To(Equal("1"))
Expect(string(emission.Message)).To(Equal("Creating container"))
Expect(emission.MessageType).To(Equal("OUT"))
emission = logs[1]
Expect(emission.AppId).To(Equal("log-guid"))
Expect(emission.SourceType).To(Equal("some-source-name"))
Expect(emission.SourceInstance).To(Equal("1"))
Expect(string(emission.Message)).To(Equal("Successfully created container"))
Expect(emission.MessageType).To(Equal("OUT"))
})
Describe("the exchanged Garden container", func() {
It("creates it with the state as 'created'", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties[gardenstore.ContainerStateProperty]).To(Equal(string(executor.StateCreated)))
})
It("creates it with the owner property", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties[gardenstore.ContainerOwnerProperty]).To(Equal(ownerName))
})
It("creates it with the guid as the handle", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Handle).To(Equal("some-guid"))
})
Context("when the executorContainer is Privileged", func() {
BeforeEach(func() {
executorContainer.Privileged = true
})
It("creates a privileged garden container spec", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Privileged).To(BeTrue())
})
})
Context("when the executorContainer is not Privileged", func() {
BeforeEach(func() {
executorContainer.Privileged = false
})
It("creates a privileged garden container spec", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Privileged).To(BeFalse())
})
})
Context("when the Executor container has container-wide env", func() {
BeforeEach(func() {
executorContainer.Env = []executor.EnvironmentVariable{
{Name: "GLOBAL1", Value: "VALUE1"},
{Name: "GLOBAL2", Value: "VALUE2"},
}
})
It("creates the container with the env", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Env).To(Equal([]string{"GLOBAL1=VALUE1", "GLOBAL2=VALUE2"}))
})
})
Context("when the Executor container has a rootfs", func() {
BeforeEach(func() {
executorContainer.RootFSPath = "focker:///some-rootfs"
})
It("creates it with the rootfs", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.RootFSPath).To(Equal("focker:///some-rootfs"))
})
})
Context("when the Executor container an allocated at time", func() {
BeforeEach(func() {
executorContainer.AllocatedAt = 123456789
})
It("creates it with the executor:allocated-at property", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["executor:allocated-at"]).To(Equal("123456789"))
})
})
Context("when the Executor container has a rootfs", func() {
BeforeEach(func() {
executorContainer.RootFSPath = "some/root/path"
})
It("creates it with the executor:rootfs property", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["executor:rootfs"]).To(Equal("some/root/path"))
})
})
Context("when the Executor container has log", func() {
index := 1
log := executor.LogConfig{
Guid: "my-guid",
SourceName: "source-name",
Index: index,
}
BeforeEach(func() {
executorContainer.LogConfig = log
})
It("creates it with the executor:log-config property", func() {
payload, err := json.Marshal(log)
Expect(err).NotTo(HaveOccurred())
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["executor:log-config"]).To(MatchJSON(payload))
})
})
Context("when the Executor container has metrics config", func() {
index := 1
metricsConfig := executor.MetricsConfig{
Guid: "my-guid",
Index: index,
}
BeforeEach(func() {
executorContainer.MetricsConfig = metricsConfig
})
It("creates it with the executor:metrics-config property", func() {
payload, err := json.Marshal(metricsConfig)
Expect(err).NotTo(HaveOccurred())
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["executor:metrics-config"]).To(MatchJSON(payload))
})
})
Context("when the Executor container has a run result", func() {
runResult := executor.ContainerRunResult{
Failed: true,
FailureReason: "because",
}
BeforeEach(func() {
executorContainer.RunResult = runResult
})
It("creates it with the executor:result property", func() {
payload, err := json.Marshal(runResult)
Expect(err).NotTo(HaveOccurred())
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["executor:result"]).To(MatchJSON(payload))
})
})
})
Context("when the Executor container has tags", func() {
BeforeEach(func() {
executorContainer.Tags = executor.Tags{
"tag-one": "one",
"tag-two": "two",
}
})
It("creates it with the tag properties", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["tag:tag-one"]).To(Equal("one"))
Expect(containerSpec.Properties["tag:tag-two"]).To(Equal("two"))
})
})
Context("when the Executor container has mapped ports", func() {
BeforeEach(func() {
executorContainer.Ports = []executor.PortMapping{
{HostPort: 1234, ContainerPort: 5678},
{HostPort: 4321, ContainerPort: 8765},
}
})
It("creates it with the tag properties", func() {
Expect(fakeGardenContainer.NetInCallCount()).To(Equal(2))
hostPort, containerPort := fakeGardenContainer.NetInArgsForCall(0)
Expect(hostPort).To(Equal(uint32(1234)))
Expect(containerPort).To(Equal(uint32(5678)))
hostPort, containerPort = fakeGardenContainer.NetInArgsForCall(1)
Expect(hostPort).To(Equal(uint32(4321)))
Expect(containerPort).To(Equal(uint32(8765)))
})
Context("when mapping ports fails", func() {
disaster := errors.New("oh no!")
BeforeEach(func() {
fakeGardenContainer.NetInReturns(0, 0, disaster)
})
It("returns the error", func() {
Expect(createErr).To(Equal(disaster))
})
It("deletes the container from Garden", func() {
Expect(fakeGardenClient.DestroyCallCount()).To(Equal(1))
Expect(fakeGardenClient.DestroyArgsForCall(0)).To(Equal("some-guid"))
})
})
Context("when mapping ports succeeds", func() {
BeforeEach(func() {
fakeGardenContainer.NetInStub = func(hostPort, containerPort uint32) (uint32, uint32, error) {
return hostPort + 1, containerPort + 1, nil
}
})
It("updates the port mappings on the returned container with what was actually mapped", func() {
Expect(createdContainer.Ports).To(Equal([]executor.PortMapping{
{HostPort: 1235, ContainerPort: 5679},
{HostPort: 4322, ContainerPort: 8766},
}))
})
})
})
Context("when the Executor container has egress rules", func() {
var rules []*models.SecurityGroupRule
BeforeEach(func() {
rules = []*models.SecurityGroupRule{
{
Protocol: "udp",
Destinations: []string{"0.0.0.0/0"},
PortRange: &models.PortRange{
Start: 1,
End: 1024,
},
},
{
Protocol: "tcp",
Destinations: []string{"1.2.3.4-2.3.4.5"},
Ports: []uint32{80, 443},
Log: true,
},
{
Protocol: "icmp",
Destinations: []string{"1.2.3.4"},
IcmpInfo: &models.ICMPInfo{Type: 1, Code: 2},
},
{
Protocol: "all",
Destinations: []string{"9.8.7.6", "8.7.6.5"},
Log: true,
},
}
executorContainer.EgressRules = rules
})
Context("when setting egress rules", func() {
It("creates it with the egress rules", func() {
Expect(createErr).NotTo(HaveOccurred())
})
It("updates egress rules on returned container", func() {
Expect(fakeGardenContainer.NetOutCallCount()).To(Equal(4))
_, expectedNet, err := net.ParseCIDR("0.0.0.0/0")
Expect(err).NotTo(HaveOccurred())
rule := fakeGardenContainer.NetOutArgsForCall(0)
Expect(rule.Protocol).To(Equal(garden.ProtocolUDP))
Expect(rule.Networks).To(Equal([]garden.IPRange{garden.IPRangeFromIPNet(expectedNet)}))
Expect(rule.Ports).To(Equal([]garden.PortRange{{Start: 1, End: 1024}}))
Expect(rule.ICMPs).To(BeNil())
Expect(rule.Log).To(BeFalse())
rule = fakeGardenContainer.NetOutArgsForCall(1)
Expect(rule.Networks).To(Equal([]garden.IPRange{{
Start: net.ParseIP("1.2.3.4"),
End: net.ParseIP("2.3.4.5"),
}}))
Expect(rule.Ports).To(Equal([]garden.PortRange{
garden.PortRangeFromPort(80),
garden.PortRangeFromPort(443),
}))
Expect(rule.ICMPs).To(BeNil())
Expect(rule.Log).To(BeTrue())
rule = fakeGardenContainer.NetOutArgsForCall(2)
Expect(rule.Protocol).To(Equal(garden.ProtocolICMP))
Expect(rule.Networks).To(Equal([]garden.IPRange{
garden.IPRangeFromIP(net.ParseIP("1.2.3.4")),
}))
Expect(rule.Ports).To(BeEmpty())
Expect(*rule.ICMPs).To(Equal(garden.ICMPControl{
Type: garden.ICMPType(1),
Code: garden.ICMPControlCode(2),
}))
Expect(rule.Log).To(BeFalse())
rule = fakeGardenContainer.NetOutArgsForCall(3)
Expect(rule.Protocol).To(Equal(garden.ProtocolAll))
Expect(rule.Networks).To(Equal([]garden.IPRange{
garden.IPRangeFromIP(net.ParseIP("9.8.7.6")),
garden.IPRangeFromIP(net.ParseIP("8.7.6.5")),
}))
Expect(rule.Ports).To(BeEmpty())
Expect(rule.ICMPs).To(BeNil())
Expect(rule.Log).To(BeTrue())
})
})
Context("when security rule is invalid", func() {
BeforeEach(func() {
rules = []*models.SecurityGroupRule{
{
Protocol: "foo",
Destinations: []string{"0.0.0.0/0"},
PortRange: &models.PortRange{
Start: 1,
End: 1024,
},
},
}
executorContainer.EgressRules = rules
})
It("returns the error", func() {
Expect(createErr).To(HaveOccurred())
Expect(createErr).To(Equal(executor.ErrInvalidSecurityGroup))
})
})
Context("when setting egress rules fails", func() {
disaster := errors.New("NO SECURITY FOR YOU!!!")
BeforeEach(func() {
fakeGardenContainer.NetOutReturns(disaster)
})
It("returns the error", func() {
Expect(createErr).To(HaveOccurred())
})
It("deletes the container from Garden", func() {
Expect(fakeGardenClient.DestroyCallCount()).To(Equal(1))
Expect(fakeGardenClient.DestroyArgsForCall(0)).To(Equal("some-guid"))
})
})
})
Context("when a memory limit is set", func() {
BeforeEach(func() {
executorContainer.MemoryMB = 64
})
It("creates it with the memory limit", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Limits.Memory).To(Equal(garden.MemoryLimits{
LimitInBytes: 64 * 1024 * 1024,
}))
})
It("creates it with the executor:memory-mb property", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["executor:memory-mb"]).To(Equal("64"))
})
})
Context("when a disk limit is set", func() {
BeforeEach(func() {
executorContainer.DiskMB = 64
})
It("creates it the disk limit", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Limits.Disk).To(Equal(garden.DiskLimits{
ByteHard: 64 * 1024 * 1024,
InodeHard: inodeLimit,
Scope: garden.DiskLimitScopeExclusive,
}))
})
It("creates it with the executor:disk-mb property", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Properties["executor:disk-mb"]).To(Equal("64"))
})
})
Context("when a cpu limit is set", func() {
BeforeEach(func() {
executorContainer.CPUWeight = 50
})
It("creates it with the CPU shares to the ratio of the max shares", func() {
Expect(fakeGardenClient.CreateCallCount()).To(Equal(1))
containerSpec := fakeGardenClient.CreateArgsForCall(0)
Expect(containerSpec.Limits.CPU).To(Equal(garden.CPULimits{
LimitInShares: 512,
}))
})
})
Context("when gardenContainer.Info succeeds", func() {
BeforeEach(func() {
fakeGardenContainer.InfoReturns(garden.ContainerInfo{
ExternalIP: "fake-ip",
}, nil)
})
It("sets the external IP on the returned container", func() {
Expect(createdContainer.ExternalIP).To(Equal("fake-ip"))
})
})
Context("when gardenContainer.Info fails", func() {
var gardenError = errors.New("garden error")
BeforeEach(func() {
fakeGardenContainer.InfoReturns(garden.ContainerInfo{}, gardenError)
})
It("propagates the error", func() {
Expect(createErr).To(Equal(gardenError))
})
It("deletes the container from Garden", func() {
Expect(fakeGardenClient.DestroyCallCount()).To(Equal(1))
Expect(fakeGardenClient.DestroyArgsForCall(0)).To(Equal(executorContainer.Guid))
})
})
})
Context("when creating the container fails", func() {
disaster := errors.New("oh no!")
BeforeEach(func() {
fakeGardenClient.CreateReturns(nil, disaster)
})
It("returns the error", func() {
Expect(createErr).To(Equal(disaster))
})
It("emits to loggregator", func() {
logs := fakeLogSender.GetLogs()
Expect(logs).To(HaveLen(2))
emission := logs[0]
Expect(emission.AppId).To(Equal("log-guid"))
Expect(emission.SourceType).To(Equal("some-source-name"))
Expect(emission.SourceInstance).To(Equal("1"))
Expect(string(emission.Message)).To(Equal("Creating container"))
Expect(emission.MessageType).To(Equal("OUT"))
emission = logs[1]
Expect(emission.AppId).To(Equal("log-guid"))
Expect(emission.SourceType).To(Equal("some-source-name"))
Expect(emission.SourceInstance).To(Equal("1"))
Expect(string(emission.Message)).To(Equal("Failed to create container"))
Expect(emission.MessageType).To(Equal("ERR"))
})
})
})
Describe("List", func() {
var (
fakeContainer1, fakeContainer2 *gfakes.FakeContainer
)
BeforeEach(func() {
fakeContainer1 = &gfakes.FakeContainer{
HandleStub: func() string {
return "fake-handle-1"
},
}
fakeContainer2 = &gfakes.FakeContainer{
HandleStub: func() string {
return "fake-handle-2"
},
}
fakeGardenClient.ContainersReturns([]garden.Container{
fakeContainer1,
fakeContainer2,
}, nil)
fakeGardenClient.BulkInfoReturns(
map[string]garden.ContainerInfoEntry{
"fake-handle-1": garden.ContainerInfoEntry{
Info: garden.ContainerInfo{
Properties: garden.Properties{
gardenstore.ContainerStateProperty: string(executor.StateCreated),
},
},
},
"fake-handle-2": garden.ContainerInfoEntry{
Info: garden.ContainerInfo{
Properties: garden.Properties{
gardenstore.ContainerStateProperty: string(executor.StateCreated),
},
},
},
}, nil)
})
It("returns an executor container for each container in garden", func() {
containers, err := gardenStore.List(logger, nil)
Expect(err).NotTo(HaveOccurred())
Expect(containers).To(HaveLen(2))
Expect([]string{containers[0].Guid, containers[1].Guid}).To(ConsistOf("fake-handle-1", "fake-handle-2"))
Expect(containers[0].State).To(Equal(executor.StateCreated))
Expect(containers[1].State).To(Equal(executor.StateCreated))
Expect(fakeGardenClient.BulkInfoCallCount()).To(Equal(1))
Expect(fakeGardenClient.BulkInfoArgsForCall(0)).To(ConsistOf("fake-handle-1", "fake-handle-2"))
})
It("only queries garden for the containers with the right owner", func() {
_, err := gardenStore.List(logger, nil)
Expect(err).NotTo(HaveOccurred())
Expect(fakeGardenClient.ContainersArgsForCall(0)).To(Equal(garden.Properties{
gardenstore.ContainerOwnerProperty: ownerName,
}))
})
Context("when tags are specified", func() {
It("filters by the tag properties", func() {
_, err := gardenStore.List(logger, executor.Tags{"a": "b", "c": "d"})
Expect(err).NotTo(HaveOccurred())
Expect(fakeGardenClient.ContainersArgsForCall(0)).To(Equal(garden.Properties{
gardenstore.ContainerOwnerProperty: ownerName,
"tag:a": "b",
"tag:c": "d",
}))
})
})
Context("when a container's info fails to fetch", func() {
BeforeEach(func() {
fakeGardenClient.BulkInfoReturns(
map[string]garden.ContainerInfoEntry{
"fake-handle-1": garden.ContainerInfoEntry{
Err: garden.NewError("oh no"),
},
"fake-handle-2": garden.ContainerInfoEntry{
Info: garden.ContainerInfo{
Properties: garden.Properties{
gardenstore.ContainerStateProperty: string(executor.StateCreated),
},
},
},
},
nil,
)
})
It("excludes it from the result set", func() {
containers, err := gardenStore.List(logger, nil)
Expect(err).NotTo(HaveOccurred())
Expect(containers).To(HaveLen(1))
Expect(containers[0].Guid).To(Equal("fake-handle-2"))
Expect(containers[0].State).To(Equal(executor.StateCreated))
})
})
})
Describe("Destroy", func() {
const destroySessionPrefix = "test.destroy."
const freeProcessSessionPrefix = destroySessionPrefix + "freeing-step-process."
var destroyErr error
JustBeforeEach(func() {
destroyErr = gardenStore.Destroy(logger, "the-guid")
})
It("doesn't return an error", func() {
Expect(destroyErr).NotTo(HaveOccurred())
})
It("destroys the container", func() {
Expect(fakeGardenClient.DestroyArgsForCall(0)).To(Equal("the-guid"))
})
It("logs its lifecycle", func() {
Expect(logger).To(gbytes.Say(destroySessionPrefix + "started"))
Expect(logger).To(gbytes.Say(freeProcessSessionPrefix + "started"))
Expect(logger).To(gbytes.Say(freeProcessSessionPrefix + "finished"))
Expect(logger).To(gbytes.Say(destroySessionPrefix + "succeeded"))
})
Context("when the Garden client fails to destroy the given container", func() {
var gardenDestroyErr = errors.New("destroy-err")
BeforeEach(func() {
fakeGardenClient.DestroyReturns(gardenDestroyErr)
})
It("returns the Garden error", func() {
Expect(destroyErr).To(Equal(gardenDestroyErr))
})
It("logs the error", func() {
Expect(logger).To(gbytes.Say(destroySessionPrefix + "failed-to-destroy-garden-container"))
})
})
Context("when the Garden client returns ContainerNotFoundError", func() {
BeforeEach(func() {
fakeGardenClient.DestroyReturns(garden.ContainerNotFoundError{
Handle: "some-handle",
})
})
It("doesn't return an error", func() {
Expect(destroyErr).NotTo(HaveOccurred())
})
})
})
Describe("GetFiles", func() {
Context("when the container exists", func() {
var (
container *gfakes.FakeContainer
fakeStream *gbytes.Buffer
)
BeforeEach(func() {
fakeStream = gbytes.BufferWithBytes([]byte("stuff"))
container = &gfakes.FakeContainer{}
container.StreamOutReturns(fakeStream, nil)
fakeGardenClient.LookupReturns(container, nil)
})
It("gets the files", func() {
stream, err := gardenStore.GetFiles(logger, "the-guid", "the-path")
Expect(err).NotTo(HaveOccurred())
Expect(container.StreamOutArgsForCall(0)).To(Equal(garden.StreamOutSpec{Path: "the-path", User: "root"}))
bytes, err := ioutil.ReadAll(stream)
Expect(err).NotTo(HaveOccurred())
Expect(string(bytes)).To(Equal("stuff"))
stream.Close()
Expect(fakeStream.Closed()).To(BeTrue())
})
})
Context("when the container doesn't exist", func() {
BeforeEach(func() {
fakeGardenClient.LookupReturns(nil, garden.ContainerNotFoundError{})
})
It("returns a container-not-found error", func() {
_, err := gardenStore.GetFiles(logger, "the-guid", "the-path")
Expect(err).To(Equal(executor.ErrContainerNotFound))
})
})
})
Describe("Ping", func() {
Context("when pinging succeeds", func() {
It("succeeds", func() {
err := gardenStore.Ping()
Expect(err).NotTo(HaveOccurred())
})
})
Context("when pinging fails", func() {
disaster := errors.New("welp")
BeforeEach(func() {
fakeGardenClient.PingReturns(disaster)
})
It("returns a container-not-found error", func() {
Expect(gardenStore.Ping()).To(Equal(disaster))
})
})
})
Describe("Run", func() {
const (
runSessionPrefix = "test.run."
stepSessionPrefix = runSessionPrefix + "run-step-process."
)
var (
processes map[string]*gfakes.FakeProcess
containerProperties map[string]string
orderInWhichPropertiesAreSet []string
gardenContainer *gfakes.FakeContainer
executorContainer executor.Container
err error
monitorReturns chan int
runReturns chan int
runAction *models.RunAction
monitorAction *models.RunAction
mutex sync.Mutex
)
BeforeEach(func() {
runAction = &models.RunAction{User: "me", Path: "run"}
monitorAction = &models.RunAction{User: "me", Path: "monitor"}
mutex.Lock()
defer mutex.Unlock()
monitorReturns = make(chan int)
runReturns = make(chan int)
executorContainer = executor.Container{
Guid: "some-container-handle",
State: executor.StateInitializing,
RunInfo: executor.RunInfo{
Action: models.WrapAction(runAction),
Monitor: models.WrapAction(monitorAction),
StartTimeout: 3,
},
}
runSignalled := make(chan struct{})
monitorSignalled := make(chan struct{})
processes = make(map[string]*gfakes.FakeProcess)
processes["run"] = new(gfakes.FakeProcess)
processes["run"].WaitStub = func() (int, error) {
select {
case status := <-runReturns:
return status, nil
case <-runSignalled:
return 143, nil
}
}
processes["run"].SignalStub = func(garden.Signal) error {
close(runSignalled)
return nil
}
processes["monitor"] = new(gfakes.FakeProcess)
processes["monitor"].WaitStub = func() (int, error) {
select {
case status := <-monitorReturns:
return status, nil
case <-monitorSignalled:
return 143, nil
}
}
processes["monitor"].SignalStub = func(garden.Signal) error {
close(monitorSignalled)
return nil
}
containerProperties = make(map[string]string)
containerProperties[gardenstore.ContainerStateProperty] = string(executor.StateCreated)
orderInWhichPropertiesAreSet = []string{}
gardenContainer = new(gfakes.FakeContainer)
gardenContainer.HandleReturns("some-container-handle")
gardenContainer.SetPropertyStub = func(key, value string) error {
mutex.Lock()
containerProperties[key] = value
orderInWhichPropertiesAreSet = append(orderInWhichPropertiesAreSet, key)
mutex.Unlock()
return nil
}
gardenContainer.InfoStub = func() (garden.ContainerInfo, error) {
mutex.Lock()
defer mutex.Unlock()
props := map[string]string{}
for k, v := range containerProperties {
props[k] = v
}
return garden.ContainerInfo{
Properties: props,
}, nil
}
gardenContainer.RunStub = func(processSpec garden.ProcessSpec, _ garden.ProcessIO) (garden.Process, error) {
mutex.Lock()
defer mutex.Unlock()
return processes[processSpec.Path], nil
}
fakeGardenClient.LookupReturns(gardenContainer, nil)
fakeGardenClient.CreateReturns(gardenContainer, nil)
})
AfterEach(func() {
close(monitorReturns)
close(runReturns)
gardenStore.Stop(logger, "some-container-handle")
gardenStore.Destroy(logger, "some-container-handle")
})
containerStateGetter := func() string {
mutex.Lock()
defer mutex.Unlock()
return containerProperties[gardenstore.ContainerStateProperty]
}
containerResult := func() executor.ContainerRunResult {
mutex.Lock()
defer mutex.Unlock()
resultJSON := containerProperties[gardenstore.ContainerResultProperty]
result := executor.ContainerRunResult{}
err := json.Unmarshal([]byte(resultJSON), &result)
Expect(err).NotTo(HaveOccurred())
return result
}
Context("when the garden container lookup fails", func() {
JustBeforeEach(func() {
executorContainer, err = gardenStore.Create(logger, executorContainer)
Expect(err).NotTo(HaveOccurred())
gardenStore.Run(logger, executorContainer)
})
Context("when the lookup fails because the container is not found", func() {
BeforeEach(func() {
fakeGardenClient.LookupReturns(gardenContainer, garden.ContainerNotFoundError{"some-container-handle"})
})
It("logs that the container was not found", func() {
Expect(logger).To(gbytes.Say(runSessionPrefix + "lookup-failed"))
Expect(logger).To(gbytes.Say("some-container-handle"))
})
It("does not run the container", func() {
Consistently(gardenContainer.RunCallCount).Should(Equal(0))
})
})
Context("when the lookup fails for some other reason", func() {
BeforeEach(func() {
fakeGardenClient.LookupReturns(gardenContainer, errors.New("whoops"))
})
It("logs the error", func() {
Expect(logger).To(gbytes.Say(runSessionPrefix + "lookup-failed"))
})
It("does not run the container", func() {
Consistently(gardenContainer.RunCallCount).Should(Equal(0))
})
})
})
Context("when there is no monitor action", func() {
BeforeEach(func() {
executorContainer.Monitor = nil
executorContainer, err = gardenStore.Create(logger, executorContainer)
Expect(err).NotTo(HaveOccurred())
err = gardenStore.Run(logger, executorContainer)
Expect(err).NotTo(HaveOccurred())
})
It("transitions to running as soon as it starts running", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateRunning))
Eventually(emitter.EmitCallCount).Should(Equal(1))
Expect(emitter.EmitArgsForCall(0).EventType()).To(Equal(executor.EventTypeContainerRunning))
})
Context("when the running action exits succesfully", func() {
BeforeEach(func() {
//wait for the run event to have gone through
Eventually(emitter.EmitCallCount).Should(Equal(1))
runReturns <- 0
})
It("transitions to complete and succeeded", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(2))
Expect(emitter.EmitArgsForCall(1).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Failed).To(BeFalse())
})
It("logs the successful exit and the transition to complete", func() {
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "step-finished-normally"))
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "transitioning-to-complete"))
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "succeeded-transitioning-to-complete"))
})
})
Context("when the running action exits unsuccesfully", func() {
BeforeEach(func() {
//wait for the run event to have gone through
Eventually(emitter.EmitCallCount).Should(Equal(1))
runReturns <- 1
})
It("transitions to complete and failed", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(2))
Expect(emitter.EmitArgsForCall(1).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Failed).To(BeTrue())
Expect(containerResult().FailureReason).To(ContainSubstring("Exited with status 1"))
})
It("logs the unsuccessful exit", func() {
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "step-finished-with-error"))
})
})
})
Context("when there is a monitor action", func() {
JustBeforeEach(func() {
executorContainer, err = gardenStore.Create(logger, executorContainer)
Expect(err).NotTo(HaveOccurred())
err = gardenStore.Run(logger, executorContainer)
Expect(err).NotTo(HaveOccurred())
Eventually(clock.WatcherCount).Should(Equal(1))
})
Context("when the monitor action succeeds", func() {
JustBeforeEach(func() {
clock.Increment(time.Second)
monitorReturns <- 0
})
It("marks the container as running and emits an event", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateRunning))
Eventually(emitter.EmitCallCount).Should(Equal(1))
Expect(emitter.EmitArgsForCall(0).EventType()).To(Equal(executor.EventTypeContainerRunning))
})
It("logs the run session lifecycle", func() {
Expect(logger).To(gbytes.Say(runSessionPrefix + "started"))
Expect(logger).To(gbytes.Say(runSessionPrefix + "found-garden-container"))
Expect(logger).To(gbytes.Say(runSessionPrefix + "stored-step-process"))
Expect(logger).To(gbytes.Say(runSessionPrefix + "finished"))
})
It("logs that the step process started and transitioned to running", func() {
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "started"))
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "transitioning-to-running"))
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "succeeded-transitioning-to-running"))
})
Context("when the monitor action subsequently fails", func() {
JustBeforeEach(func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateRunning))
clock.Increment(time.Second)
monitorReturns <- 1
})
It("marks the container completed", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(2))
Expect(emitter.EmitArgsForCall(1).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Failed).To(BeTrue())
})
})
Context("when Stop is called", func() {
const stopSessionPrefix = "test.stop."
var stopped chan struct{}
BeforeEach(func() {
stopped = make(chan struct{})
})
JustBeforeEach(func() {
go func() {
stopped := stopped
gardenStore.Stop(logger, executorContainer.Guid)
close(stopped)
}()
})
It("logs that the step process was signaled and then finished, and was freed", func() {
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "signaled"))
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "finished"))
})
It("logs that the step process was freed", func() {
freeSessionPrefix := stopSessionPrefix + "freeing-step-process."
Eventually(logger).Should(gbytes.Say(stopSessionPrefix + "started"))
Eventually(logger).Should(gbytes.Say(freeSessionPrefix + "started"))
Eventually(logger).Should(gbytes.Say(freeSessionPrefix + "interrupting-process"))
Eventually(logger).Should(gbytes.Say(freeSessionPrefix + "finished"))
Eventually(logger).Should(gbytes.Say(stopSessionPrefix + "finished"))
})
It("completes without failure", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(2))
Expect(emitter.EmitArgsForCall(1).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Failed).To(BeFalse())
})
It("reports in the result that it was stopped", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(2))
Expect(emitter.EmitArgsForCall(1).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Stopped).To(BeTrue())
})
Context("when the step takes a while to complete", func() {
var exited chan int
BeforeEach(func() {
exited = make(chan int, 1)
processes["run"].WaitStub = func() (int, error) {
return <-exited, nil
}
})
It("waits", func() {
Consistently(stopped).ShouldNot(BeClosed())
exited <- 1
Eventually(stopped).ShouldNot(BeClosed())
})
})
})
Context("when Destroy is called", func() {
const destroySessionPrefix = "test.destroy."
var destroyed chan struct{}
BeforeEach(func() {
destroyed = make(chan struct{})
})
JustBeforeEach(func() {
go func() {
destroyed := destroyed
gardenStore.Destroy(logger, executorContainer.Guid)
close(destroyed)
}()
})
AfterEach(func() {
Eventually(destroyed).Should(BeClosed())
})
It("logs that the step process was signaled and then finished, and was freed", func() {
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "signaled"))
Eventually(logger).Should(gbytes.Say(stepSessionPrefix + "finished"))
})
It("logs that the step process was freed", func() {
freeSessionPrefix := destroySessionPrefix + "freeing-step-process."
Eventually(logger).Should(gbytes.Say(destroySessionPrefix + "started"))
Eventually(logger).Should(gbytes.Say(freeSessionPrefix + "started"))
Eventually(logger).Should(gbytes.Say(freeSessionPrefix + "interrupting-process"))
Eventually(logger).Should(gbytes.Say(freeSessionPrefix + "finished"))
Eventually(logger).Should(gbytes.Say(destroySessionPrefix + "succeeded"))
})
It("completes without failure", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(2))
Expect(emitter.EmitArgsForCall(1).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Failed).To(BeFalse())
})
It("reports in the result that it was stopped", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(2))
Expect(emitter.EmitArgsForCall(1).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Stopped).To(BeTrue())
})
})
})
Context("when monitor persistently fails", func() {
JustBeforeEach(func() {
clock.Increment(time.Second)
monitorReturns <- 1
})
It("doesn't transition to running", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCreated))
Eventually(emitter.EmitCallCount).Should(Equal(0))
})
Context("when the time to start elapses", func() {
JustBeforeEach(func() {
By("ticking out to 3 seconds (note we had just ticked once)")
for i := 0; i < 3; i++ {
//ugh, got to wait until the timer is being read from before we increment time
time.Sleep(10 * time.Millisecond)
clock.Increment(time.Second)
monitorReturns <- 1
}
})
It("transitions to completed and failed", func() {
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
Eventually(emitter.EmitCallCount).Should(Equal(1))
Expect(emitter.EmitArgsForCall(0).EventType()).To(Equal(executor.EventTypeContainerComplete))
Expect(containerResult().Failed).To(BeTrue())
})
})
})
})
Context("when marking the task as complete", func() {
BeforeEach(func() {
executorContainer, err = gardenStore.Create(logger, executorContainer)
Expect(err).NotTo(HaveOccurred())
err = gardenStore.Run(logger, executorContainer)
Expect(err).NotTo(HaveOccurred())
Eventually(clock.WatcherCount).Should(Equal(1))
clock.Increment(time.Second)
monitorReturns <- 0
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateRunning))
clock.Increment(time.Second)
monitorReturns <- 1
Eventually(containerStateGetter).Should(BeEquivalentTo(executor.StateCompleted))
})
It("always sets the failure result first, and then the state so that things polling on sate will see the result", func() {
mutex.Lock()
defer mutex.Unlock()
n := len(orderInWhichPropertiesAreSet)
Expect(n).To(BeNumerically(">", 2))
Expect(orderInWhichPropertiesAreSet[n-2]).To(Equal(gardenstore.ContainerResultProperty))
Expect(orderInWhichPropertiesAreSet[n-1]).To(Equal(gardenstore.ContainerStateProperty))
})
})
})
Describe("Stop", func() {
Context("when the garden container is not found in the local store", func() {
var stopErr error
JustBeforeEach(func() {
stopErr = gardenStore.Stop(logger, "an-unknown-guid")
})
It("tries to destroy the real garden container", func() {
Expect(fakeGardenClient.DestroyCallCount()).To(Equal(1))
Expect(fakeGardenClient.DestroyArgsForCall(0)).To(Equal("an-unknown-guid"))
})
It("fails with container not found", func() {
Expect(stopErr).To(Equal(executor.ErrContainerNotFound))
})
Context("when destroying the garden container fails", func() {
Context("with a container not found", func() {
BeforeEach(func() {
fakeGardenClient.DestroyReturns(garden.ContainerNotFoundError{})
})
It("fails with executor's container not found error", func() {
Expect(stopErr).To(Equal(executor.ErrContainerNotFound))
})
})
Context("with any other error", func() {
var expectedError = errors.New("woops")
BeforeEach(func() {
fakeGardenClient.DestroyReturns(expectedError)
})
It("fails with the original error", func() {
Expect(stopErr).To(Equal(expectedError))
})
})
})
})
})
Describe("Metrics", func() {
var (
metrics map[string]executor.ContainerMetrics
metricsErr error
)
JustBeforeEach(func() {
metrics, metricsErr = gardenStore.Metrics(logger, []string{"some-container-handle"})
})
BeforeEach(func() {
containerMetrics := garden.Metrics{
MemoryStat: garden.ContainerMemoryStat{
TotalRss: 100, // ignored
TotalCache: 12, // ignored
TotalInactiveFile: 1, // ignored
TotalUsageTowardLimit: 987,
},
DiskStat: garden.ContainerDiskStat{
ExclusiveBytesUsed: 222,
ExclusiveInodesUsed: 333,
},
CPUStat: garden.ContainerCPUStat{
Usage: 123,
User: 456, // ignored
System: 789, // ignored
},
}
fakeGardenClient.BulkMetricsReturns(map[string]garden.ContainerMetricsEntry{
"some-container-handle": garden.ContainerMetricsEntry{
Metrics: containerMetrics,
Err: nil,
},
}, nil)
})
It("does not error", func() {
Expect(metricsErr).NotTo(HaveOccurred())
})
It("gets metrics from garden", func() {
Expect(fakeGardenClient.BulkMetricsCallCount()).To(Equal(1))
Expect(metrics).To(HaveLen(1))
Expect(metrics["some-container-handle"]).To(Equal(executor.ContainerMetrics{
MemoryUsageInBytes: 987,
DiskUsageInBytes: 222,
TimeSpentInCPU: 123,
}))
})
Context("when a container metric entry has an error", func() {
BeforeEach(func() {
fakeGardenClient.BulkMetricsReturns(map[string]garden.ContainerMetricsEntry{
"some-container-handle": garden.ContainerMetricsEntry{
Err: garden.NewError("oh no"),
},
}, nil)
})
It("does not error", func() {
Expect(metricsErr).NotTo(HaveOccurred())
})
It("ignores any container with errors", func() {
Expect(fakeGardenClient.BulkMetricsCallCount()).To(Equal(1))
Expect(metrics).To(HaveLen(0))
})
})
Context("when a bulk metrics returns an error", func() {
BeforeEach(func() {
fakeGardenClient.BulkMetricsReturns(nil, errors.New("oh no"))
})
It("does not error", func() {
Expect(metricsErr).To(HaveOccurred())
})
})
})
Describe("Transitions", func() {
var executorContainer executor.Container
BeforeEach(func() {
executorContainer = executor.Container{
Guid: "some-container-handle",
RunInfo: executor.RunInfo{
Action: models.WrapAction(action),
Monitor: models.WrapAction(action),
},
}
gardenContainer := new(gfakes.FakeContainer)
gardenContainer.RunReturns(new(gfakes.FakeProcess), nil)
fakeGardenClient.LookupReturns(gardenContainer, nil)
fakeGardenClient.CreateReturns(gardenContainer, nil)
})
expectations := []gardenStoreTransitionExpectation{
{to: "create", from: "non-existent", assertError: "occurs"},
{to: "create", from: "reserved", assertError: "occurs"},
{to: "create", from: "initializing", assertError: "does not occur"},
{to: "create", from: "created", assertError: "occurs"},
{to: "create", from: "running", assertError: "occurs"},
{to: "create", from: "completed", assertError: "occurs"},
{to: "run", from: "non-existent", assertError: "occurs"},
{to: "run", from: "reserved", assertError: "occurs"},
{to: "run", from: "initializing", assertError: "occurs"},
{to: "run", from: "created", assertError: "does not occur"},
{to: "run", from: "running", assertError: "occurs"},
{to: "run", from: "completed", assertError: "occurs"},
}
for _, expectation := range expectations {
expectation := expectation
It("error "+expectation.assertError+" when transitioning from "+expectation.from+" to "+expectation.to, func() {
expectation.driveFromState(&executorContainer)
err := expectation.transitionToState(gardenStore, executorContainer)
expectation.checkErrorResult(err)
})
}
})
})
type gardenStoreTransitionExpectation struct {
from string
to string
assertError string
}
func (expectation gardenStoreTransitionExpectation) driveFromState(container *executor.Container) {
switch expectation.from {
case "non-existent":
case "reserved":
container.State = executor.StateReserved
case "initializing":
container.State = executor.StateInitializing
case "created":
container.State = executor.StateCreated
case "running":
container.State = executor.StateRunning
case "completed":
container.State = executor.StateCompleted
default:
Fail("unknown 'from' state: " + expectation.from)
}
}
func (expectation gardenStoreTransitionExpectation) transitionToState(gardenStore *gardenstore.GardenStore, container executor.Container) error {
switch expectation.to {
case "create":
_, err := gardenStore.Create(lagertest.NewTestLogger("test"), container)
return err
case "run":
return gardenStore.Run(lagertest.NewTestLogger("test"), container)
default:
Fail("unknown 'to' state: " + expectation.to)
return nil
}
}
func (expectation gardenStoreTransitionExpectation) checkErrorResult(err error) {
switch expectation.assertError {
case "occurs":
Expect(err).To(HaveOccurred())
case "does not occur":
Expect(err).NotTo(HaveOccurred())
default:
Fail("unknown 'assertErr' expectation: " + expectation.assertError)
}
}
|
package gorm_test
import (
"fmt"
"testing"
"time"
"github.com/jinzhu/gorm"
)
type Person struct {
Id int
Name string
Addresses []*Address `gorm:"many2many:person_addresses;"`
}
type PersonAddress struct {
PersonID int
AddressID int
DeletedAt time.Time
CreatedAt time.Time
}
func (*PersonAddress) Table(db *gorm.DB) string {
return "person_addresses"
}
func (*PersonAddress) Add(db *gorm.DB, foreignValue interface{}, associationValue interface{}) error {
return db.Where(map[string]interface{}{
"person_id": db.NewScope(foreignValue).PrimaryKeyValue(),
"address_id": db.NewScope(associationValue).PrimaryKeyValue(),
}).Assign(map[string]interface{}{
"person_id": foreignValue,
"address_id": associationValue,
"DeletedAt": gorm.Expr("NULL"),
}).FirstOrCreate(&PersonAddress{}).Error
}
func (*PersonAddress) Delete(db *gorm.DB, sources ...interface{}) error {
return db.Delete(&PersonAddress{}).Error
}
func (pa *PersonAddress) JoinWith(db *gorm.DB, source interface{}) *gorm.DB {
table := pa.Table(db)
return db.Table(table).Where(fmt.Sprintf("%v.deleted_at IS NULL OR %v.deleted_at <= '0001-01-02'", table, table))
}
func TestJoinTable(t *testing.T) {
DB.Exec("drop table person_addresses;")
DB.AutoMigrate(&Person{})
DB.SetJoinTableHandler(&Person{}, "Addresses", &PersonAddress{})
address1 := &Address{Address1: "address 1"}
address2 := &Address{Address1: "address 2"}
person := &Person{Name: "person", Addresses: []*Address{address1, address2}}
DB.Save(person)
DB.Model(person).Association("Addresses").Delete(address1)
if DB.Find(&[]PersonAddress{}, "person_id = ?", person.Id).RowsAffected != 1 {
t.Errorf("Should found one address")
}
if DB.Debug().Model(person).Association("Addresses").Count() != 1 {
t.Errorf("Should found one address")
}
if DB.Unscoped().Find(&[]PersonAddress{}, "person_id = ?", person.Id).RowsAffected != 2 {
t.Errorf("Found two addresses with Unscoped")
}
if DB.Model(person).Association("Addresses").Clear(); DB.Model(person).Association("Addresses").Count() != 0 {
t.Errorf("Should deleted all addresses")
}
}
Passed all tests
package gorm_test
import (
"fmt"
"testing"
"time"
"github.com/jinzhu/gorm"
)
type Person struct {
Id int
Name string
Addresses []*Address `gorm:"many2many:person_addresses;"`
}
type PersonAddress struct {
PersonID int
AddressID int
DeletedAt time.Time
CreatedAt time.Time
}
func (*PersonAddress) Table(db *gorm.DB) string {
return "person_addresses"
}
func (*PersonAddress) Add(db *gorm.DB, foreignValue interface{}, associationValue interface{}) error {
return db.Where(map[string]interface{}{
"person_id": db.NewScope(foreignValue).PrimaryKeyValue(),
"address_id": db.NewScope(associationValue).PrimaryKeyValue(),
}).Assign(map[string]interface{}{
"person_id": foreignValue,
"address_id": associationValue,
"DeletedAt": gorm.Expr("NULL"),
}).FirstOrCreate(&PersonAddress{}).Error
}
func (*PersonAddress) Delete(db *gorm.DB, sources ...interface{}) error {
return db.Delete(&PersonAddress{}).Error
}
func (pa *PersonAddress) JoinWith(db *gorm.DB, source interface{}) *gorm.DB {
table := pa.Table(db)
return db.Table(table).Joins("INNER JOIN person_addresses ON person_addresses.address_id = addresses.id").Where(fmt.Sprintf("%v.deleted_at IS NULL OR %v.deleted_at <= '0001-01-02'", table, table))
}
func TestJoinTable(t *testing.T) {
DB.Exec("drop table person_addresses;")
DB.AutoMigrate(&Person{})
DB.SetJoinTableHandler(&Person{}, "Addresses", &PersonAddress{})
address1 := &Address{Address1: "address 1"}
address2 := &Address{Address1: "address 2"}
person := &Person{Name: "person", Addresses: []*Address{address1, address2}}
DB.Save(person)
DB.Model(person).Association("Addresses").Delete(address1)
if DB.Find(&[]PersonAddress{}, "person_id = ?", person.Id).RowsAffected != 1 {
t.Errorf("Should found one address")
}
if DB.Model(person).Association("Addresses").Count() != 1 {
t.Errorf("Should found one address")
}
if DB.Unscoped().Find(&[]PersonAddress{}, "person_id = ?", person.Id).RowsAffected != 2 {
t.Errorf("Found two addresses with Unscoped")
}
if DB.Model(person).Association("Addresses").Clear(); DB.Model(person).Association("Addresses").Count() != 0 {
t.Errorf("Should deleted all addresses")
}
}
|
package gps
import (
"reflect"
"testing"
)
func TestLockedProjectSorting(t *testing.T) {
// version doesn't matter here
lps := []LockedProject{
NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), nil),
NewLockedProject(mkPI("foo"), NewVersion("nada"), nil),
NewLockedProject(mkPI("bar"), NewVersion("zip"), nil),
NewLockedProject(mkPI("qux"), NewVersion("zilch"), nil),
}
lps2 := make([]LockedProject, len(lps))
copy(lps2, lps)
SortLockedProjects(lps2)
// only the two should have switched positions
lps[0], lps[2] = lps[2], lps[0]
if !reflect.DeepEqual(lps, lps2) {
t.Errorf("SortLockedProject did not sort as expected:\n\t(GOT) %s\n\t(WNT) %s", lps2, lps)
}
}
func TestLockedProjectsEq(t *testing.T) {
lps := []LockedProject{
NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), []string{"gps"}),
NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), nil),
NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), []string{"gps", "flugle"}),
NewLockedProject(mkPI("foo"), NewVersion("nada"), []string{"foo"}),
NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), []string{"flugle", "gps"}),
NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0").Is("278a227dfc3d595a33a77ff3f841fd8ca1bc8cd0"), []string{"gps"}),
}
fix := []struct {
l1, l2 int
shouldeq bool
err string
}{
{0, 0, true, "lp does not eq self"},
{0, 5, false, "should not eq with different rev"},
{0, 1, false, "should not eq when other pkg list is empty"},
{0, 2, false, "should not eq when other pkg list is longer"},
{0, 4, false, "should not eq when pkg lists are out of order"},
{0, 3, false, "should not eq totally different lp"},
}
for _, f := range fix {
if f.shouldeq {
if !lps[f.l1].Eq(lps[f.l2]) {
t.Error(f.err)
}
if !lps[f.l2].Eq(lps[f.l1]) {
t.Error(f.err + (" (reversed)"))
}
} else {
if lps[f.l1].Eq(lps[f.l2]) {
t.Error(f.err)
}
if lps[f.l2].Eq(lps[f.l1]) {
t.Error(f.err + (" (reversed)"))
}
}
}
}
Check locks are eq with same rev
package gps
import (
"reflect"
"testing"
)
func TestLockedProjectSorting(t *testing.T) {
// version doesn't matter here
lps := []LockedProject{
NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), nil),
NewLockedProject(mkPI("foo"), NewVersion("nada"), nil),
NewLockedProject(mkPI("bar"), NewVersion("zip"), nil),
NewLockedProject(mkPI("qux"), NewVersion("zilch"), nil),
}
lps2 := make([]LockedProject, len(lps))
copy(lps2, lps)
SortLockedProjects(lps2)
// only the two should have switched positions
lps[0], lps[2] = lps[2], lps[0]
if !reflect.DeepEqual(lps, lps2) {
t.Errorf("SortLockedProject did not sort as expected:\n\t(GOT) %s\n\t(WNT) %s", lps2, lps)
}
}
func TestLockedProjectsEq(t *testing.T) {
lps := []LockedProject{
NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), []string{"gps"}),
NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), nil),
NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), []string{"gps", "flugle"}),
NewLockedProject(mkPI("foo"), NewVersion("nada"), []string{"foo"}),
NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), []string{"flugle", "gps"}),
NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0").Is("278a227dfc3d595a33a77ff3f841fd8ca1bc8cd0"), []string{"gps"}),
}
fix := []struct {
l1, l2 int
shouldeq bool
err string
}{
{0, 0, true, "lp does not eq self"},
{0, 5, false, "should not eq with different rev"},
{5, 5, true, "should eq with same rev"},
{0, 1, false, "should not eq when other pkg list is empty"},
{0, 2, false, "should not eq when other pkg list is longer"},
{0, 4, false, "should not eq when pkg lists are out of order"},
{0, 3, false, "should not eq totally different lp"},
}
for _, f := range fix {
if f.shouldeq {
if !lps[f.l1].Eq(lps[f.l2]) {
t.Error(f.err)
}
if !lps[f.l2].Eq(lps[f.l1]) {
t.Error(f.err + (" (reversed)"))
}
} else {
if lps[f.l1].Eq(lps[f.l2]) {
t.Error(f.err)
}
if lps[f.l2].Eq(lps[f.l1]) {
t.Error(f.err + (" (reversed)"))
}
}
}
}
|
// Copyright 2015 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package mgrconfig
import (
"encoding/json"
"fmt"
"path/filepath"
"regexp"
"strings"
"github.com/google/syzkaller/pkg/config"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/prog"
_ "github.com/google/syzkaller/sys"
"github.com/google/syzkaller/vm"
)
type Config struct {
Name string // Instance name (used for identification and as GCE instance prefix)
Target string // Target OS/arch, e.g. "linux/arm64" or "linux/amd64/386" (amd64 OS with 386 test process)
Http string // TCP address to serve HTTP stats page (e.g. "localhost:50000")
Rpc string // TCP address to serve RPC for fuzzer processes (optional)
Workdir string
Vmlinux string
Kernel_Src string // kernel source directory
Tag string // arbitrary optional tag that is saved along with crash reports (e.g. branch/commit)
Image string // linux image for VMs
Sshkey string // ssh key for the image (may be empty for some VM types)
Ssh_User string // ssh user ("root" by default)
Hub_Client string
Hub_Addr string
Hub_Key string
Dashboard_Client string
Dashboard_Addr string
Dashboard_Key string
Syzkaller string // path to syzkaller checkout (syz-manager will look for binaries in bin subdir)
Procs int // number of parallel processes inside of every VM
Sandbox string // type of sandbox to use during fuzzing:
// "none": don't do anything special (has false positives, e.g. due to killing init)
// "setuid": impersonate into user nobody (65534), default
// "namespace": create a new namespace for fuzzer using CLONE_NEWNS/CLONE_NEWNET/CLONE_NEWPID/etc,
// requires building kernel with CONFIG_NAMESPACES, CONFIG_UTS_NS, CONFIG_USER_NS, CONFIG_PID_NS and CONFIG_NET_NS.
Cover bool // use kcov coverage (default: true)
Leak bool // do memory leak checking
Reproduce bool // reproduce, localize and minimize crashers (on by default)
Enable_Syscalls []string
Disable_Syscalls []string
Suppressions []string // don't save reports matching these regexps, but reboot VM after them
Ignores []string // completely ignore reports matching these regexps (don't save nor reboot)
Type string // VM type (qemu, kvm, local)
VM json.RawMessage // VM-type-specific config
// Implementation details beyond this point.
ParsedSuppressions []*regexp.Regexp `json:"-"`
ParsedIgnores []*regexp.Regexp `json:"-"`
// Parsed Target:
TargetOS string `json:"-"`
TargetArch string `json:"-"`
TargetVMArch string `json:"-"`
// Syzkaller binaries that we are going to use:
SyzFuzzerBin string `json:"-"`
SyzExecprogBin string `json:"-"`
SyzExecutorBin string `json:"-"`
}
func LoadData(data []byte) (*Config, error) {
return load(data, "")
}
func LoadFile(filename string) (*Config, error) {
return load(nil, filename)
}
func DefaultValues() *Config {
return &Config{
Ssh_User: "root",
Cover: true,
Reproduce: true,
Sandbox: "setuid",
Rpc: ":0",
Procs: 1,
}
}
func load(data []byte, filename string) (*Config, error) {
cfg := DefaultValues()
if data != nil {
if err := config.LoadData(data, cfg); err != nil {
return nil, err
}
} else {
if err := config.LoadFile(filename, cfg); err != nil {
return nil, err
}
}
var err error
cfg.TargetOS, cfg.TargetVMArch, cfg.TargetArch, err = SplitTarget(cfg.Target)
if err != nil {
return nil, err
}
targetBin := func(name, arch string) string {
exe := ""
if cfg.TargetOS == "windows" {
exe = ".exe"
}
return filepath.Join(cfg.Syzkaller, "bin", cfg.TargetOS+"_"+arch, name+exe)
}
cfg.SyzFuzzerBin = targetBin("syz-fuzzer", cfg.TargetVMArch)
cfg.SyzExecprogBin = targetBin("syz-execprog", cfg.TargetVMArch)
cfg.SyzExecutorBin = targetBin("syz-executor", cfg.TargetArch)
if !osutil.IsExist(cfg.SyzFuzzerBin) {
return nil, fmt.Errorf("bad config syzkaller param: can't find %v", cfg.SyzFuzzerBin)
}
if !osutil.IsExist(cfg.SyzExecprogBin) {
return nil, fmt.Errorf("bad config syzkaller param: can't find %v", cfg.SyzExecprogBin)
}
if !osutil.IsExist(cfg.SyzExecutorBin) {
return nil, fmt.Errorf("bad config syzkaller param: can't find %v", cfg.SyzExecutorBin)
}
if cfg.Http == "" {
return nil, fmt.Errorf("config param http is empty")
}
if cfg.Workdir == "" {
return nil, fmt.Errorf("config param workdir is empty")
}
if cfg.Type == "" {
return nil, fmt.Errorf("config param type is empty")
}
if cfg.Procs < 1 || cfg.Procs > 32 {
return nil, fmt.Errorf("bad config param procs: '%v', want [1, 32]", cfg.Procs)
}
switch cfg.Sandbox {
case "none", "setuid", "namespace":
default:
return nil, fmt.Errorf("config param sandbox must contain one of none/setuid/namespace")
}
cfg.Workdir = osutil.Abs(cfg.Workdir)
cfg.Vmlinux = osutil.Abs(cfg.Vmlinux)
cfg.Syzkaller = osutil.Abs(cfg.Syzkaller)
if cfg.Kernel_Src == "" {
cfg.Kernel_Src = filepath.Dir(cfg.Vmlinux) // assume in-tree build by default
}
if err := parseSuppressions(cfg); err != nil {
return nil, err
}
if cfg.Hub_Client != "" && (cfg.Name == "" || cfg.Hub_Addr == "" || cfg.Hub_Key == "") {
return nil, fmt.Errorf("hub_client is set, but name/hub_addr/hub_key is empty")
}
if cfg.Dashboard_Client != "" && (cfg.Name == "" ||
cfg.Dashboard_Addr == "" ||
cfg.Dashboard_Key == "") {
return nil, fmt.Errorf("dashboard_client is set, but name/dashboard_addr/dashboard_key is empty")
}
return cfg, nil
}
func SplitTarget(target string) (string, string, string, error) {
if target == "" {
return "", "", "", fmt.Errorf("target is empty")
}
targetParts := strings.Split(target, "/")
if len(targetParts) != 2 && len(targetParts) != 3 {
return "", "", "", fmt.Errorf("bad config param target")
}
os := targetParts[0]
vmarch := targetParts[1]
arch := targetParts[1]
if len(targetParts) == 3 {
arch = targetParts[2]
}
return os, vmarch, arch, nil
}
func ParseEnabledSyscalls(cfg *Config) (map[int]bool, error) {
match := func(call *prog.Syscall, str string) bool {
if str == call.CallName || str == call.Name {
return true
}
if len(str) > 1 && str[len(str)-1] == '*' && strings.HasPrefix(call.Name, str[:len(str)-1]) {
return true
}
return false
}
target, err := prog.GetTarget(cfg.TargetOS, cfg.TargetArch)
if err != nil {
return nil, err
}
syscalls := make(map[int]bool)
if len(cfg.Enable_Syscalls) != 0 {
for _, c := range cfg.Enable_Syscalls {
n := 0
for _, call := range target.Syscalls {
if match(call, c) {
syscalls[call.ID] = true
n++
}
}
if n == 0 {
return nil, fmt.Errorf("unknown enabled syscall: %v", c)
}
}
} else {
for _, call := range target.Syscalls {
syscalls[call.ID] = true
}
}
for _, c := range cfg.Disable_Syscalls {
n := 0
for _, call := range target.Syscalls {
if match(call, c) {
delete(syscalls, call.ID)
n++
}
}
if n == 0 {
return nil, fmt.Errorf("unknown disabled syscall: %v", c)
}
}
return syscalls, nil
}
func parseSuppressions(cfg *Config) error {
// Add some builtin suppressions.
supp := append(cfg.Suppressions, []string{
"panic: failed to start executor binary",
"panic: executor failed: pthread_create failed",
"panic: failed to create temp dir",
"fatal error: runtime: out of memory",
"fatal error: runtime: cannot allocate memory",
"fatal error: unexpected signal during runtime execution", // presubmably OOM turned into SIGBUS
"signal SIGBUS: bus error", // presubmably OOM turned into SIGBUS
"Out of memory: Kill process .* \\(syz-fuzzer\\)",
"Out of memory: Kill process .* \\(sshd\\)",
"lowmemorykiller: Killing 'syz-fuzzer'",
"lowmemorykiller: Killing 'sshd'",
}...)
for _, s := range supp {
re, err := regexp.Compile(s)
if err != nil {
return fmt.Errorf("failed to compile suppression '%v': %v", s, err)
}
cfg.ParsedSuppressions = append(cfg.ParsedSuppressions, re)
}
for _, ignore := range cfg.Ignores {
re, err := regexp.Compile(ignore)
if err != nil {
return fmt.Errorf("failed to compile ignore '%v': %v", ignore, err)
}
cfg.ParsedIgnores = append(cfg.ParsedIgnores, re)
}
return nil
}
func CreateVMEnv(cfg *Config, debug bool) *vm.Env {
return &vm.Env{
Name: cfg.Name,
OS: cfg.TargetOS,
Arch: cfg.TargetVMArch,
Workdir: cfg.Workdir,
Image: cfg.Image,
SshKey: cfg.Sshkey,
SshUser: cfg.Ssh_User,
Debug: debug,
Config: cfg.VM,
}
}
syz-manager/mgrconfig: add another flavour of OOM messages
// Copyright 2015 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package mgrconfig
import (
"encoding/json"
"fmt"
"path/filepath"
"regexp"
"strings"
"github.com/google/syzkaller/pkg/config"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/prog"
_ "github.com/google/syzkaller/sys"
"github.com/google/syzkaller/vm"
)
type Config struct {
Name string // Instance name (used for identification and as GCE instance prefix)
Target string // Target OS/arch, e.g. "linux/arm64" or "linux/amd64/386" (amd64 OS with 386 test process)
Http string // TCP address to serve HTTP stats page (e.g. "localhost:50000")
Rpc string // TCP address to serve RPC for fuzzer processes (optional)
Workdir string
Vmlinux string
Kernel_Src string // kernel source directory
Tag string // arbitrary optional tag that is saved along with crash reports (e.g. branch/commit)
Image string // linux image for VMs
Sshkey string // ssh key for the image (may be empty for some VM types)
Ssh_User string // ssh user ("root" by default)
Hub_Client string
Hub_Addr string
Hub_Key string
Dashboard_Client string
Dashboard_Addr string
Dashboard_Key string
Syzkaller string // path to syzkaller checkout (syz-manager will look for binaries in bin subdir)
Procs int // number of parallel processes inside of every VM
Sandbox string // type of sandbox to use during fuzzing:
// "none": don't do anything special (has false positives, e.g. due to killing init)
// "setuid": impersonate into user nobody (65534), default
// "namespace": create a new namespace for fuzzer using CLONE_NEWNS/CLONE_NEWNET/CLONE_NEWPID/etc,
// requires building kernel with CONFIG_NAMESPACES, CONFIG_UTS_NS, CONFIG_USER_NS, CONFIG_PID_NS and CONFIG_NET_NS.
Cover bool // use kcov coverage (default: true)
Leak bool // do memory leak checking
Reproduce bool // reproduce, localize and minimize crashers (on by default)
Enable_Syscalls []string
Disable_Syscalls []string
Suppressions []string // don't save reports matching these regexps, but reboot VM after them
Ignores []string // completely ignore reports matching these regexps (don't save nor reboot)
Type string // VM type (qemu, kvm, local)
VM json.RawMessage // VM-type-specific config
// Implementation details beyond this point.
ParsedSuppressions []*regexp.Regexp `json:"-"`
ParsedIgnores []*regexp.Regexp `json:"-"`
// Parsed Target:
TargetOS string `json:"-"`
TargetArch string `json:"-"`
TargetVMArch string `json:"-"`
// Syzkaller binaries that we are going to use:
SyzFuzzerBin string `json:"-"`
SyzExecprogBin string `json:"-"`
SyzExecutorBin string `json:"-"`
}
func LoadData(data []byte) (*Config, error) {
return load(data, "")
}
func LoadFile(filename string) (*Config, error) {
return load(nil, filename)
}
func DefaultValues() *Config {
return &Config{
Ssh_User: "root",
Cover: true,
Reproduce: true,
Sandbox: "setuid",
Rpc: ":0",
Procs: 1,
}
}
func load(data []byte, filename string) (*Config, error) {
cfg := DefaultValues()
if data != nil {
if err := config.LoadData(data, cfg); err != nil {
return nil, err
}
} else {
if err := config.LoadFile(filename, cfg); err != nil {
return nil, err
}
}
var err error
cfg.TargetOS, cfg.TargetVMArch, cfg.TargetArch, err = SplitTarget(cfg.Target)
if err != nil {
return nil, err
}
targetBin := func(name, arch string) string {
exe := ""
if cfg.TargetOS == "windows" {
exe = ".exe"
}
return filepath.Join(cfg.Syzkaller, "bin", cfg.TargetOS+"_"+arch, name+exe)
}
cfg.SyzFuzzerBin = targetBin("syz-fuzzer", cfg.TargetVMArch)
cfg.SyzExecprogBin = targetBin("syz-execprog", cfg.TargetVMArch)
cfg.SyzExecutorBin = targetBin("syz-executor", cfg.TargetArch)
if !osutil.IsExist(cfg.SyzFuzzerBin) {
return nil, fmt.Errorf("bad config syzkaller param: can't find %v", cfg.SyzFuzzerBin)
}
if !osutil.IsExist(cfg.SyzExecprogBin) {
return nil, fmt.Errorf("bad config syzkaller param: can't find %v", cfg.SyzExecprogBin)
}
if !osutil.IsExist(cfg.SyzExecutorBin) {
return nil, fmt.Errorf("bad config syzkaller param: can't find %v", cfg.SyzExecutorBin)
}
if cfg.Http == "" {
return nil, fmt.Errorf("config param http is empty")
}
if cfg.Workdir == "" {
return nil, fmt.Errorf("config param workdir is empty")
}
if cfg.Type == "" {
return nil, fmt.Errorf("config param type is empty")
}
if cfg.Procs < 1 || cfg.Procs > 32 {
return nil, fmt.Errorf("bad config param procs: '%v', want [1, 32]", cfg.Procs)
}
switch cfg.Sandbox {
case "none", "setuid", "namespace":
default:
return nil, fmt.Errorf("config param sandbox must contain one of none/setuid/namespace")
}
cfg.Workdir = osutil.Abs(cfg.Workdir)
cfg.Vmlinux = osutil.Abs(cfg.Vmlinux)
cfg.Syzkaller = osutil.Abs(cfg.Syzkaller)
if cfg.Kernel_Src == "" {
cfg.Kernel_Src = filepath.Dir(cfg.Vmlinux) // assume in-tree build by default
}
if err := parseSuppressions(cfg); err != nil {
return nil, err
}
if cfg.Hub_Client != "" && (cfg.Name == "" || cfg.Hub_Addr == "" || cfg.Hub_Key == "") {
return nil, fmt.Errorf("hub_client is set, but name/hub_addr/hub_key is empty")
}
if cfg.Dashboard_Client != "" && (cfg.Name == "" ||
cfg.Dashboard_Addr == "" ||
cfg.Dashboard_Key == "") {
return nil, fmt.Errorf("dashboard_client is set, but name/dashboard_addr/dashboard_key is empty")
}
return cfg, nil
}
func SplitTarget(target string) (string, string, string, error) {
if target == "" {
return "", "", "", fmt.Errorf("target is empty")
}
targetParts := strings.Split(target, "/")
if len(targetParts) != 2 && len(targetParts) != 3 {
return "", "", "", fmt.Errorf("bad config param target")
}
os := targetParts[0]
vmarch := targetParts[1]
arch := targetParts[1]
if len(targetParts) == 3 {
arch = targetParts[2]
}
return os, vmarch, arch, nil
}
func ParseEnabledSyscalls(cfg *Config) (map[int]bool, error) {
match := func(call *prog.Syscall, str string) bool {
if str == call.CallName || str == call.Name {
return true
}
if len(str) > 1 && str[len(str)-1] == '*' && strings.HasPrefix(call.Name, str[:len(str)-1]) {
return true
}
return false
}
target, err := prog.GetTarget(cfg.TargetOS, cfg.TargetArch)
if err != nil {
return nil, err
}
syscalls := make(map[int]bool)
if len(cfg.Enable_Syscalls) != 0 {
for _, c := range cfg.Enable_Syscalls {
n := 0
for _, call := range target.Syscalls {
if match(call, c) {
syscalls[call.ID] = true
n++
}
}
if n == 0 {
return nil, fmt.Errorf("unknown enabled syscall: %v", c)
}
}
} else {
for _, call := range target.Syscalls {
syscalls[call.ID] = true
}
}
for _, c := range cfg.Disable_Syscalls {
n := 0
for _, call := range target.Syscalls {
if match(call, c) {
delete(syscalls, call.ID)
n++
}
}
if n == 0 {
return nil, fmt.Errorf("unknown disabled syscall: %v", c)
}
}
return syscalls, nil
}
func parseSuppressions(cfg *Config) error {
// Add some builtin suppressions.
supp := append(cfg.Suppressions, []string{
"panic: failed to start executor binary",
"panic: executor failed: pthread_create failed",
"panic: failed to create temp dir",
"fatal error: runtime: out of memory",
"fatal error: runtime: cannot allocate memory",
"fatal error: unexpected signal during runtime execution", // presubmably OOM turned into SIGBUS
"signal SIGBUS: bus error", // presubmably OOM turned into SIGBUS
"Out of memory: Kill process .* \\(syz-fuzzer\\)",
"Out of memory: Kill process .* \\(sshd\\)",
"Killed process .* \\(syz-fuzzer\\)",
"Killed process .* \\(sshd\\)",
"lowmemorykiller: Killing 'syz-fuzzer'",
"lowmemorykiller: Killing 'sshd'",
}...)
for _, s := range supp {
re, err := regexp.Compile(s)
if err != nil {
return fmt.Errorf("failed to compile suppression '%v': %v", s, err)
}
cfg.ParsedSuppressions = append(cfg.ParsedSuppressions, re)
}
for _, ignore := range cfg.Ignores {
re, err := regexp.Compile(ignore)
if err != nil {
return fmt.Errorf("failed to compile ignore '%v': %v", ignore, err)
}
cfg.ParsedIgnores = append(cfg.ParsedIgnores, re)
}
return nil
}
func CreateVMEnv(cfg *Config, debug bool) *vm.Env {
return &vm.Env{
Name: cfg.Name,
OS: cfg.TargetOS,
Arch: cfg.TargetVMArch,
Workdir: cfg.Workdir,
Image: cfg.Image,
SshKey: cfg.Sshkey,
SshUser: cfg.Ssh_User,
Debug: debug,
Config: cfg.VM,
}
}
|
package middleware
import (
"net/http"
"time"
"github.com/rs/zerolog/log"
)
// Timer gets the time taken to process the request and form the response
// Timer is not the real time between writes, but is accurate enough for me (for now)
func Timer(aud *APIAudit) Adapter {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Print("Start Timer")
defer log.Print("Finish Timer")
// set APIAudit TimeStarted to current time in UTC
loc, _ := time.LoadLocation("UTC")
aud.TimeStarted = time.Now().In(loc)
log.Printf("aud.TimeStarted = %s\n", aud.TimeStarted)
h.ServeHTTP(w, r)
aud.TimeFinished = time.Now().In(loc)
duration := aud.TimeFinished.Sub(aud.TimeStarted)
aud.TimeInMillis = duration
log.Printf("aud.TimeFinished = %s\n", aud.TimeFinished)
log.Printf("aud.TimeInMillis = %s\n", aud.TimeInMillis)
})
}
}
Moved into LogResponse
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"net"
"strings"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model/components"
"k8s.io/kops/pkg/util/subnet"
"k8s.io/kops/upup/pkg/fi"
"github.com/blang/semver"
)
// legacy contains validation functions that don't match the apimachinery style
// ValidateCluster is responsible for checking the validity of the Cluster spec
func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
fieldSpec := field.NewPath("spec")
allErrs := field.ErrorList{}
// kubernetesRelease is the version with only major & minor fields
// We initialize to an arbitrary value, preferably in the supported range,
// in case the value in c.Spec.KubernetesVersion is blank or unparseable.
kubernetesRelease := semver.Version{Major: 1, Minor: 15}
// KubernetesVersion
if c.Spec.KubernetesVersion == "" {
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubernetesVersion"), ""))
} else {
sv, err := util.ParseKubernetesVersion(c.Spec.KubernetesVersion)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubernetesVersion"), c.Spec.KubernetesVersion, "unable to determine kubernetes version"))
} else {
kubernetesRelease = semver.Version{Major: sv.Major, Minor: sv.Minor}
}
}
if c.ObjectMeta.Name == "" {
allErrs = append(allErrs, field.Required(field.NewPath("objectMeta", "name"), "Cluster Name is required (e.g. --name=mycluster.myzone.com)"))
} else {
// Must be a dns name
errs := validation.IsDNS1123Subdomain(c.ObjectMeta.Name)
if len(errs) != 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("objectMeta", "name"), c.ObjectMeta.Name, fmt.Sprintf("Cluster Name must be a valid DNS name (e.g. --name=mycluster.myzone.com) errors: %s", strings.Join(errs, ", "))))
} else if !strings.Contains(c.ObjectMeta.Name, ".") {
// Tolerate if this is a cluster we are importing for upgrade
if c.ObjectMeta.Annotations[kops.AnnotationNameManagement] != kops.AnnotationValueManagementImported {
allErrs = append(allErrs, field.Invalid(field.NewPath("objectMeta", "name"), c.ObjectMeta.Name, "Cluster Name must be a fully-qualified DNS name (e.g. --name=mycluster.myzone.com)"))
}
}
}
if c.Spec.Assets != nil && c.Spec.Assets.ContainerProxy != nil && c.Spec.Assets.ContainerRegistry != nil {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("Assets", "ContainerProxy"), "ContainerProxy cannot be used in conjunction with ContainerRegistry as represent mutually exclusive concepts. Please consult the documentation for details."))
}
requiresSubnets := true
requiresNetworkCIDR := true
requiresSubnetCIDR := true
switch kops.CloudProviderID(c.Spec.CloudProvider) {
case "":
allErrs = append(allErrs, field.Required(fieldSpec.Child("cloudProvider"), ""))
requiresSubnets = false
requiresSubnetCIDR = false
requiresNetworkCIDR = false
case kops.CloudProviderBareMetal:
requiresSubnets = false
requiresSubnetCIDR = false
requiresNetworkCIDR = false
if c.Spec.NetworkCIDR != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networkCIDR"), "networkCIDR should not be set on bare metal"))
}
case kops.CloudProviderGCE:
requiresNetworkCIDR = false
if c.Spec.NetworkCIDR != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networkCIDR"), "networkCIDR should not be set on GCE"))
}
requiresSubnetCIDR = false
case kops.CloudProviderDO:
requiresSubnets = false
requiresSubnetCIDR = false
requiresNetworkCIDR = false
if c.Spec.NetworkCIDR != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networkCIDR"), "networkCIDR should not be set on DigitalOcean"))
}
case kops.CloudProviderALI:
requiresSubnets = false
requiresSubnetCIDR = false
requiresNetworkCIDR = false
case kops.CloudProviderAWS:
case kops.CloudProviderVSphere:
case kops.CloudProviderOpenstack:
requiresNetworkCIDR = false
requiresSubnetCIDR = false
default:
allErrs = append(allErrs, field.NotSupported(fieldSpec.Child("cloudProvider"), c.Spec.CloudProvider, []string{
string(kops.CloudProviderBareMetal),
string(kops.CloudProviderGCE),
string(kops.CloudProviderDO),
string(kops.CloudProviderALI),
string(kops.CloudProviderAWS),
string(kops.CloudProviderVSphere),
string(kops.CloudProviderOpenstack),
}))
}
if requiresSubnets && len(c.Spec.Subnets) == 0 {
// TODO: Auto choose zones from region?
allErrs = append(allErrs, field.Required(fieldSpec.Child("subnets"), "must configure at least one subnet (use --zones)"))
}
if strict && c.Spec.Kubelet == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubelet"), "kubelet not configured"))
}
if strict && c.Spec.MasterKubelet == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("masterKubelet"), "masterKubelet not configured"))
}
if strict && c.Spec.KubeControllerManager == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeControllerManager"), "kubeControllerManager not configured"))
}
if strict && c.Spec.KubeDNS == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeDNS"), "kubeDNS not configured"))
}
if strict && c.Spec.KubeScheduler == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeScheduler"), "kubeScheduler not configured"))
}
if strict && c.Spec.KubeAPIServer == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeAPIServer"), "kubeAPIServer not configured"))
}
if strict && c.Spec.KubeProxy == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeProxy"), "kubeProxy not configured"))
}
if strict && c.Spec.Docker == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("docker"), "docker not configured"))
}
// Check NetworkCIDR
var networkCIDR *net.IPNet
var err error
{
if c.Spec.NetworkCIDR == "" {
if requiresNetworkCIDR {
allErrs = append(allErrs, field.Required(fieldSpec.Child("networkCIDR"), "Cluster did not have networkCIDR set"))
}
} else {
_, networkCIDR, err = net.ParseCIDR(c.Spec.NetworkCIDR)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networkCIDR"), c.Spec.NetworkCIDR, fmt.Sprintf("Cluster had an invalid networkCIDR")))
}
}
}
// Check AdditionalNetworkCIDRs
var additionalNetworkCIDRs []*net.IPNet
{
if len(c.Spec.AdditionalNetworkCIDRs) > 0 {
for _, AdditionalNetworkCIDR := range c.Spec.AdditionalNetworkCIDRs {
_, IPNetAdditionalNetworkCIDR, err := net.ParseCIDR(AdditionalNetworkCIDR)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("additionalNetworkCIDRs"), AdditionalNetworkCIDR, fmt.Sprintf("Cluster had an invalid additionalNetworkCIDRs")))
}
additionalNetworkCIDRs = append(additionalNetworkCIDRs, IPNetAdditionalNetworkCIDR)
}
}
}
// nonMasqueradeCIDR is essentially deprecated, and we're moving to cluster-cidr instead (which is better named pod-cidr)
nonMasqueradeCIDRRequired := true
serviceClusterMustBeSubnetOfNonMasqueradeCIDR := true
if c.Spec.Networking != nil && c.Spec.Networking.GCE != nil {
nonMasqueradeCIDRRequired = false
serviceClusterMustBeSubnetOfNonMasqueradeCIDR = false
}
// Check NonMasqueradeCIDR
var nonMasqueradeCIDR *net.IPNet
{
nonMasqueradeCIDRString := c.Spec.NonMasqueradeCIDR
if nonMasqueradeCIDRString == "" {
if nonMasqueradeCIDRRequired {
allErrs = append(allErrs, field.Required(fieldSpec.Child("nonMasqueradeCIDR"), "Cluster did not have nonMasqueradeCIDR set"))
}
} else {
_, nonMasqueradeCIDR, err = net.ParseCIDR(nonMasqueradeCIDRString)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("nonMasqueradeCIDR"), nonMasqueradeCIDRString, "Cluster had an invalid nonMasqueradeCIDR"))
}
if networkCIDR != nil && subnet.Overlap(nonMasqueradeCIDR, networkCIDR) && c.Spec.Networking != nil && c.Spec.Networking.AmazonVPC == nil && c.Spec.Networking.LyftVPC == nil && (c.Spec.Networking.Cilium == nil || c.Spec.Networking.Cilium.Ipam != kops.CiliumIpamEni) {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("nonMasqueradeCIDR"), fmt.Sprintf("nonMasqueradeCIDR %q cannot overlap with networkCIDR %q", nonMasqueradeCIDRString, c.Spec.NetworkCIDR)))
}
if c.Spec.Kubelet != nil && c.Spec.Kubelet.NonMasqueradeCIDR != nonMasqueradeCIDRString {
// TODO Remove the Spec.Kubelet.NonMasqueradeCIDR field?
if strict || c.Spec.Kubelet.NonMasqueradeCIDR != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubelet", "nonMasqueradeCIDR"), "kubelet nonMasqueradeCIDR did not match cluster nonMasqueradeCIDR"))
}
}
if c.Spec.MasterKubelet != nil && c.Spec.MasterKubelet.NonMasqueradeCIDR != nonMasqueradeCIDRString {
// TODO remove the Spec.MasterKubelet.NonMasqueradeCIDR field?
if strict || c.Spec.MasterKubelet.NonMasqueradeCIDR != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("masterKubelet", "nonMasqueradeCIDR"), "masterKubelet nonMasqueradeCIDR did not match cluster nonMasqueradeCIDR"))
}
}
}
}
// Check ServiceClusterIPRange
var serviceClusterIPRange *net.IPNet
{
serviceClusterIPRangeString := c.Spec.ServiceClusterIPRange
if serviceClusterIPRangeString == "" {
if strict {
allErrs = append(allErrs, field.Required(fieldSpec.Child("serviceClusterIPRange"), "Cluster did not have serviceClusterIPRange set"))
}
} else {
_, serviceClusterIPRange, err = net.ParseCIDR(serviceClusterIPRangeString)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("serviceClusterIPRange"), serviceClusterIPRangeString, "Cluster had an invalid serviceClusterIPRange"))
} else {
if nonMasqueradeCIDR != nil && serviceClusterMustBeSubnetOfNonMasqueradeCIDR && !subnet.BelongsTo(nonMasqueradeCIDR, serviceClusterIPRange) {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("serviceClusterIPRange"), fmt.Sprintf("serviceClusterIPRange %q must be a subnet of nonMasqueradeCIDR %q", serviceClusterIPRangeString, c.Spec.NonMasqueradeCIDR)))
}
if c.Spec.KubeAPIServer != nil && c.Spec.KubeAPIServer.ServiceClusterIPRange != serviceClusterIPRangeString {
if strict || c.Spec.KubeAPIServer.ServiceClusterIPRange != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeAPIServer", "serviceClusterIPRange"), "kubeAPIServer serviceClusterIPRange did not match cluster serviceClusterIPRange"))
}
}
}
}
}
// Check ClusterCIDR
if c.Spec.KubeControllerManager != nil {
var clusterCIDR *net.IPNet
clusterCIDRString := c.Spec.KubeControllerManager.ClusterCIDR
if clusterCIDRString != "" {
_, clusterCIDR, err = net.ParseCIDR(clusterCIDRString)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeControllerManager", "clusterCIDR"), clusterCIDRString, "cluster had an invalid kubeControllerManager.clusterCIDR"))
} else if nonMasqueradeCIDR != nil && !subnet.BelongsTo(nonMasqueradeCIDR, clusterCIDR) {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeControllerManager", "clusterCIDR"), fmt.Sprintf("kubeControllerManager.clusterCIDR %q must be a subnet of nonMasqueradeCIDR %q", clusterCIDRString, c.Spec.NonMasqueradeCIDR)))
}
}
}
// @check the custom kubedns options are valid
if c.Spec.KubeDNS != nil {
if c.Spec.KubeDNS.ServerIP != "" {
address := c.Spec.KubeDNS.ServerIP
ip := net.ParseIP(address)
if ip == nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeDNS", "serverIP"), address, "Cluster had an invalid kubeDNS.serverIP"))
} else {
if serviceClusterIPRange != nil && !serviceClusterIPRange.Contains(ip) {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeDNS", "serverIP"), fmt.Sprintf("ServiceClusterIPRange %q must contain the DNS Server IP %q", c.Spec.ServiceClusterIPRange, address)))
}
if !featureflag.ExperimentalClusterDNS.Enabled() {
if c.Spec.Kubelet != nil && c.Spec.Kubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeDNS", "serverIP"), "Kubelet ClusterDNS did not match cluster kubeDNS.serverIP"))
}
if c.Spec.MasterKubelet != nil && c.Spec.MasterKubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeDNS", "serverIP"), "MasterKubelet ClusterDNS did not match cluster kubeDNS.serverIP"))
}
}
}
}
// @check the nameservers are valid
for i, x := range c.Spec.KubeDNS.UpstreamNameservers {
if ip := net.ParseIP(x); ip == nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeDNS", "upstreamNameservers").Index(i), x, "Invalid nameserver given, should be a valid ip address"))
}
}
// @check the stubdomain if any
for domain, nameservers := range c.Spec.KubeDNS.StubDomains {
if len(nameservers) <= 0 {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeDNS", "stubDomains").Key(domain), domain, "No nameservers specified for the stub domain"))
}
for i, x := range nameservers {
if ip := net.ParseIP(x); ip == nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeDNS", "stubDomains").Key(domain).Index(i), x, "Invalid nameserver given, should be a valid ip address"))
}
}
}
}
// Check CloudProvider
{
var k8sCloudProvider string
switch kops.CloudProviderID(c.Spec.CloudProvider) {
case kops.CloudProviderAWS:
k8sCloudProvider = "aws"
case kops.CloudProviderGCE:
k8sCloudProvider = "gce"
case kops.CloudProviderDO:
k8sCloudProvider = "external"
case kops.CloudProviderVSphere:
k8sCloudProvider = "vsphere"
case kops.CloudProviderBareMetal:
k8sCloudProvider = ""
case kops.CloudProviderOpenstack:
k8sCloudProvider = "openstack"
case kops.CloudProviderALI:
k8sCloudProvider = "alicloud"
default:
// We already added an error above
k8sCloudProvider = "ignore"
}
if k8sCloudProvider != "ignore" {
if c.Spec.Kubelet != nil && (strict || c.Spec.Kubelet.CloudProvider != "") {
if c.Spec.Kubelet.CloudProvider != "external" && k8sCloudProvider != c.Spec.Kubelet.CloudProvider {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubelet", "cloudProvider"), "Did not match cluster cloudProvider"))
}
}
if c.Spec.MasterKubelet != nil && (strict || c.Spec.MasterKubelet.CloudProvider != "") {
if c.Spec.MasterKubelet.CloudProvider != "external" && k8sCloudProvider != c.Spec.MasterKubelet.CloudProvider {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("masterKubelet", "cloudProvider"), "Did not match cluster cloudProvider"))
}
}
if c.Spec.KubeAPIServer != nil && (strict || c.Spec.KubeAPIServer.CloudProvider != "") {
if c.Spec.KubeAPIServer.CloudProvider != "external" && k8sCloudProvider != c.Spec.KubeAPIServer.CloudProvider {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeAPIServer", "cloudProvider"), "Did not match cluster cloudProvider"))
}
}
if c.Spec.KubeControllerManager != nil && (strict || c.Spec.KubeControllerManager.CloudProvider != "") {
if c.Spec.KubeControllerManager.CloudProvider != "external" && k8sCloudProvider != c.Spec.KubeControllerManager.CloudProvider {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeControllerManager", "cloudProvider"), "Did not match cluster cloudProvider"))
}
}
}
}
// Check that the subnet CIDRs are all consistent
{
for i, s := range c.Spec.Subnets {
fieldSubnet := fieldSpec.Child("subnets").Index(i)
if s.CIDR == "" {
if requiresSubnetCIDR && strict {
allErrs = append(allErrs, field.Required(fieldSubnet.Child("cidr"), "subnet did not have a cidr set"))
}
} else {
_, subnetCIDR, err := net.ParseCIDR(s.CIDR)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSubnet.Child("cidr"), s.CIDR, "subnet had an invalid cidr"))
} else if networkCIDR != nil && !validateSubnetCIDR(networkCIDR, additionalNetworkCIDRs, subnetCIDR) {
allErrs = append(allErrs, field.Forbidden(fieldSubnet.Child("cidr"), fmt.Sprintf("subnet %q had a cidr %q that was not a subnet of the networkCIDR %q", s.Name, s.CIDR, c.Spec.NetworkCIDR)))
}
}
}
}
// NodeAuthorization
if c.Spec.NodeAuthorization != nil {
// @check the feature gate is enabled for this
if !featureflag.EnableNodeAuthorization.Enabled() {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "nodeAuthorization"), "node authorization is experimental feature; set `export KOPS_FEATURE_FLAGS=EnableNodeAuthorization`"))
} else {
if c.Spec.NodeAuthorization.NodeAuthorizer == nil {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "nodeAuthorization"), "no node authorization policy has been set"))
} else {
path := field.NewPath("spec", "nodeAuthorization").Child("nodeAuthorizer")
if c.Spec.NodeAuthorization.NodeAuthorizer.Port < 0 || c.Spec.NodeAuthorization.NodeAuthorizer.Port >= 65535 {
allErrs = append(allErrs, field.Invalid(path.Child("port"), c.Spec.NodeAuthorization.NodeAuthorizer.Port, "invalid port"))
}
if c.Spec.NodeAuthorization.NodeAuthorizer.Timeout != nil && c.Spec.NodeAuthorization.NodeAuthorizer.Timeout.Duration <= 0 {
allErrs = append(allErrs, field.Invalid(path.Child("timeout"), c.Spec.NodeAuthorization.NodeAuthorizer.Timeout, "must be greater than zero"))
}
if c.Spec.NodeAuthorization.NodeAuthorizer.TokenTTL != nil && c.Spec.NodeAuthorization.NodeAuthorizer.TokenTTL.Duration < 0 {
allErrs = append(allErrs, field.Invalid(path.Child("tokenTTL"), c.Spec.NodeAuthorization.NodeAuthorizer.TokenTTL, "must be greater than or equal to zero"))
}
// @question: we could probably just default these settings in the model when the node-authorizer is enabled??
if c.Spec.KubeAPIServer == nil {
allErrs = append(allErrs, field.Required(field.NewPath("spec", "kubeAPIServer"), "bootstrap token authentication is not enabled in the kube-apiserver"))
} else if c.Spec.KubeAPIServer.EnableBootstrapAuthToken == nil {
allErrs = append(allErrs, field.Required(field.NewPath("spec", "kubeAPIServer", "enableBootstrapAuthToken"), "kube-apiserver has not been configured to use bootstrap tokens"))
} else if !fi.BoolValue(c.Spec.KubeAPIServer.EnableBootstrapAuthToken) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "kubeAPIServer", "enableBootstrapAuthToken"), "bootstrap tokens in the kube-apiserver has been disabled"))
}
}
}
}
// UpdatePolicy
if c.Spec.UpdatePolicy != nil {
switch *c.Spec.UpdatePolicy {
case kops.UpdatePolicyExternal:
// Valid
default:
allErrs = append(allErrs, field.NotSupported(fieldSpec.Child("updatePolicy"), *c.Spec.UpdatePolicy, []string{kops.UpdatePolicyExternal}))
}
}
// KubeProxy
if c.Spec.KubeProxy != nil {
kubeProxyPath := fieldSpec.Child("kubeProxy")
master := c.Spec.KubeProxy.Master
for i, x := range c.Spec.KubeProxy.IPVSExcludeCIDRS {
if _, _, err := net.ParseCIDR(x); err != nil {
allErrs = append(allErrs, field.Invalid(kubeProxyPath.Child("ipvsExcludeCidrs").Index(i), x, "Invalid network CIDR"))
}
}
if master != "" && !isValidAPIServersURL(master) {
allErrs = append(allErrs, field.Invalid(kubeProxyPath.Child("master"), master, "Not a valid APIServer URL"))
}
}
// KubeAPIServer
if c.Spec.KubeAPIServer != nil {
if kubernetesRelease.GTE(semver.MustParse("1.10.0")) {
if len(c.Spec.KubeAPIServer.AdmissionControl) > 0 {
if len(c.Spec.KubeAPIServer.DisableAdmissionPlugins) > 0 {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeAPIServer", "disableAdmissionPlugins"),
"disableAdmissionPlugins is mutually exclusive, you cannot use both admissionControl and disableAdmissionPlugins together"))
}
}
}
}
// Kubelet
allErrs = append(allErrs, validateKubelet(c.Spec.Kubelet, c, fieldSpec.Child("kubelet"))...)
allErrs = append(allErrs, validateKubelet(c.Spec.MasterKubelet, c, fieldSpec.Child("masterKubelet"))...)
// Topology support
if c.Spec.Topology != nil {
if c.Spec.Topology.Masters != "" && c.Spec.Topology.Nodes != "" {
allErrs = append(allErrs, IsValidValue(fieldSpec.Child("topology", "masters"), &c.Spec.Topology.Masters, kops.SupportedTopologies)...)
allErrs = append(allErrs, IsValidValue(fieldSpec.Child("topology", "nodes"), &c.Spec.Topology.Nodes, kops.SupportedTopologies)...)
} else {
allErrs = append(allErrs, field.Required(fieldSpec.Child("masters"), "topology requires non-nil values for masters and nodes"))
}
if c.Spec.Topology.Bastion != nil {
bastion := c.Spec.Topology.Bastion
if c.Spec.Topology.Masters == kops.TopologyPublic || c.Spec.Topology.Nodes == kops.TopologyPublic {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("topology", "bastion"), "bastion requires masters and nodes to have private topology"))
}
if bastion.IdleTimeoutSeconds != nil && *bastion.IdleTimeoutSeconds <= 0 {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("topology", "bastion", "idleTimeoutSeconds"), *bastion.IdleTimeoutSeconds, "bastion idleTimeoutSeconds should be greater than zero"))
}
if bastion.IdleTimeoutSeconds != nil && *bastion.IdleTimeoutSeconds > 3600 {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("topology", "bastion", "idleTimeoutSeconds"), *bastion.IdleTimeoutSeconds, "bastion idleTimeoutSeconds cannot be greater than one hour"))
}
}
}
// Egress specification support
{
for i, s := range c.Spec.Subnets {
if s.Egress == "" {
continue
}
fieldSubnet := fieldSpec.Child("subnets").Index(i)
if !strings.HasPrefix(s.Egress, "nat-") && !strings.HasPrefix(s.Egress, "i-") && s.Egress != kops.EgressExternal {
allErrs = append(allErrs, field.Invalid(fieldSubnet.Child("egress"), s.Egress, "egress must be of type NAT Gateway or NAT EC2 Instance or 'External'"))
}
if s.Egress != kops.EgressExternal && s.Type != "Private" {
allErrs = append(allErrs, field.Forbidden(fieldSubnet.Child("egress"), "egress can only be specified for private subnets"))
}
}
}
// Etcd
{
fieldEtcdClusters := fieldSpec.Child("etcdClusters")
if len(c.Spec.EtcdClusters) == 0 {
allErrs = append(allErrs, field.Required(fieldEtcdClusters, ""))
} else {
for i, x := range c.Spec.EtcdClusters {
allErrs = append(allErrs, validateEtcdClusterSpecLegacy(x, fieldEtcdClusters.Index(i))...)
}
allErrs = append(allErrs, validateEtcdTLS(c.Spec.EtcdClusters, fieldEtcdClusters)...)
allErrs = append(allErrs, validateEtcdStorage(c.Spec.EtcdClusters, fieldEtcdClusters)...)
}
}
allErrs = append(allErrs, newValidateCluster(c)...)
return allErrs
}
// validateSubnetCIDR is responsible for validating subnets are part of the CIDRs assigned to the cluster.
func validateSubnetCIDR(networkCIDR *net.IPNet, additionalNetworkCIDRs []*net.IPNet, subnetCIDR *net.IPNet) bool {
if subnet.BelongsTo(networkCIDR, subnetCIDR) {
return true
}
for _, additionalNetworkCIDR := range additionalNetworkCIDRs {
if subnet.BelongsTo(additionalNetworkCIDR, subnetCIDR) {
return true
}
}
return false
}
// validateEtcdClusterSpecLegacy is responsible for validating the etcd cluster spec
func validateEtcdClusterSpecLegacy(spec *kops.EtcdClusterSpec, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if spec.Name == "" {
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), "etcdCluster did not have name"))
}
if len(spec.Members) == 0 {
allErrs = append(allErrs, field.Required(fieldPath.Child("members"), "No members defined in etcd cluster"))
} else if (len(spec.Members) % 2) == 0 {
// Not technically a requirement, but doesn't really make sense to allow
allErrs = append(allErrs, field.Invalid(fieldPath.Child("members"), len(spec.Members), "Should be an odd number of master-zones for quorum. Use --zones and --master-zones to declare node zones and master zones separately"))
}
allErrs = append(allErrs, validateEtcdVersion(spec, fieldPath, nil)...)
for _, m := range spec.Members {
allErrs = append(allErrs, validateEtcdMemberSpec(m, fieldPath)...)
}
return allErrs
}
// validateEtcdTLS checks the TLS settings for etcd are valid
func validateEtcdTLS(specs []*kops.EtcdClusterSpec, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
var usingTLS int
for _, x := range specs {
if x.EnableEtcdTLS {
usingTLS++
}
}
// check both clusters are using tls if one is enabled
if usingTLS > 0 && usingTLS != len(specs) {
allErrs = append(allErrs, field.Forbidden(fieldPath.Index(0).Child("enableEtcdTLS"), "both etcd clusters must have TLS enabled or none at all"))
}
return allErrs
}
// validateEtcdStorage is responsible for checking versions are identical.
func validateEtcdStorage(specs []*kops.EtcdClusterSpec, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
version := specs[0].Version
for i, x := range specs {
if x.Version != "" && x.Version != version {
allErrs = append(allErrs, field.Forbidden(fieldPath.Index(i).Child("version"), fmt.Sprintf("cluster: %q, has a different storage version: %q, both must be the same", x.Name, x.Version)))
}
}
return allErrs
}
// validateEtcdVersion is responsible for validating the storage version of etcd
// @TODO semvar package doesn't appear to ignore a 'v' in v1.1.1; could be a problem later down the line
func validateEtcdVersion(spec *kops.EtcdClusterSpec, fieldPath *field.Path, minimalVersion *semver.Version) field.ErrorList {
// @check if the storage is specified that it's valid
if minimalVersion == nil {
v := semver.MustParse("0.0.0")
minimalVersion = &v
}
version := spec.Version
if spec.Version == "" {
version = components.DefaultEtcd2Version
}
sem, err := semver.Parse(strings.TrimPrefix(version, "v"))
if err != nil {
return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, "the storage version is invalid")}
}
// we only support v3 and v2 for now
if sem.Major == 3 || sem.Major == 2 {
if sem.LT(*minimalVersion) {
return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, fmt.Sprintf("minimum version required is %s", minimalVersion.String()))}
}
return nil
}
return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, "unsupported storage version, we only support major versions 2 and 3")}
}
// validateEtcdMemberSpec is responsible for validate the cluster member
func validateEtcdMemberSpec(spec *kops.EtcdMemberSpec, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if spec.Name == "" {
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), "etcdMember did not have name"))
}
if fi.StringValue(spec.InstanceGroup) == "" {
allErrs = append(allErrs, field.Required(fieldPath.Child("instanceGroup"), "etcdMember did not have instanceGroup"))
}
return allErrs
}
// DeepValidate is responsible for validating the instancegroups within the cluster spec
func DeepValidate(c *kops.Cluster, groups []*kops.InstanceGroup, strict bool) error {
if errs := ValidateCluster(c, strict); len(errs) != 0 {
return errs.ToAggregate()
}
if len(groups) == 0 {
return fmt.Errorf("must configure at least one InstanceGroup")
}
masterGroupCount := 0
nodeGroupCount := 0
for _, g := range groups {
if g.IsMaster() {
masterGroupCount++
} else {
nodeGroupCount++
}
}
if masterGroupCount == 0 {
return fmt.Errorf("must configure at least one Master InstanceGroup")
}
if nodeGroupCount == 0 {
return fmt.Errorf("must configure at least one Node InstanceGroup")
}
for _, g := range groups {
errs := CrossValidateInstanceGroup(g, c, strict)
// Additional cloud-specific validation rules,
// such as making sure that identifiers match the expected formats for the given cloud
switch kops.CloudProviderID(c.Spec.CloudProvider) {
case kops.CloudProviderAWS:
errs = append(errs, awsValidateInstanceGroup(g)...)
default:
if len(g.Spec.Volumes) > 0 {
errs = append(errs, field.Forbidden(field.NewPath("spec", "volumes"), "instancegroup volumes are only available with aws at present"))
}
}
if len(errs) != 0 {
return errs.ToAggregate()
}
}
return nil
}
func validateKubelet(k *kops.KubeletConfigSpec, c *kops.Cluster, kubeletPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if k != nil {
{
// Flag removed in 1.6
if k.APIServers != "" {
allErrs = append(allErrs, field.Forbidden(
kubeletPath.Child("apiServers"),
"api-servers flag was removed in 1.6"))
}
}
if c.IsKubernetesGTE("1.10") {
// Flag removed in 1.10
if k.RequireKubeconfig != nil {
allErrs = append(allErrs, field.Forbidden(
kubeletPath.Child("requireKubeconfig"),
"require-kubeconfig flag was removed in 1.10. (Please be sure you are not using a cluster config from `kops get cluster --full`)"))
}
}
if k.BootstrapKubeconfig != "" {
if c.Spec.KubeAPIServer == nil {
allErrs = append(allErrs, field.Required(kubeletPath.Root().Child("spec").Child("kubeAPIServer"), "bootstrap token require the NodeRestriction admissions controller"))
}
}
if k.TopologyManagerPolicy != "" {
allErrs = append(allErrs, IsValidValue(kubeletPath.Child("topologyManagerPolicy"), &k.TopologyManagerPolicy, []string{"none", "best-effort", "restricted", "single-numa-node"})...)
if !c.IsKubernetesGTE("1.18") {
allErrs = append(allErrs, field.Forbidden(kubeletPath.Child("topologyManagerPolicy"), "topologyManagerPolicy requires at least Kubernetes 1.18"))
}
}
}
return allErrs
}
Return errors early if kubernetesVersion cannot be determined
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"net"
"strings"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model/components"
"k8s.io/kops/pkg/util/subnet"
"k8s.io/kops/upup/pkg/fi"
"github.com/blang/semver"
)
// legacy contains validation functions that don't match the apimachinery style
// ValidateCluster is responsible for checking the validity of the Cluster spec
func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
fieldSpec := field.NewPath("spec")
allErrs := field.ErrorList{}
// KubernetesVersion
// This is one case we return error because a large part of the rest of the validation logic depend on a valid kubernetes version.
if c.Spec.KubernetesVersion == "" {
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubernetesVersion"), ""))
return allErrs
} else if _, err := util.ParseKubernetesVersion(c.Spec.KubernetesVersion); err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubernetesVersion"), c.Spec.KubernetesVersion, "unable to determine kubernetes version"))
return allErrs
}
if c.ObjectMeta.Name == "" {
allErrs = append(allErrs, field.Required(field.NewPath("objectMeta", "name"), "Cluster Name is required (e.g. --name=mycluster.myzone.com)"))
} else {
// Must be a dns name
errs := validation.IsDNS1123Subdomain(c.ObjectMeta.Name)
if len(errs) != 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("objectMeta", "name"), c.ObjectMeta.Name, fmt.Sprintf("Cluster Name must be a valid DNS name (e.g. --name=mycluster.myzone.com) errors: %s", strings.Join(errs, ", "))))
} else if !strings.Contains(c.ObjectMeta.Name, ".") {
// Tolerate if this is a cluster we are importing for upgrade
if c.ObjectMeta.Annotations[kops.AnnotationNameManagement] != kops.AnnotationValueManagementImported {
allErrs = append(allErrs, field.Invalid(field.NewPath("objectMeta", "name"), c.ObjectMeta.Name, "Cluster Name must be a fully-qualified DNS name (e.g. --name=mycluster.myzone.com)"))
}
}
}
if c.Spec.Assets != nil && c.Spec.Assets.ContainerProxy != nil && c.Spec.Assets.ContainerRegistry != nil {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("Assets", "ContainerProxy"), "ContainerProxy cannot be used in conjunction with ContainerRegistry as represent mutually exclusive concepts. Please consult the documentation for details."))
}
requiresSubnets := true
requiresNetworkCIDR := true
requiresSubnetCIDR := true
switch kops.CloudProviderID(c.Spec.CloudProvider) {
case "":
allErrs = append(allErrs, field.Required(fieldSpec.Child("cloudProvider"), ""))
requiresSubnets = false
requiresSubnetCIDR = false
requiresNetworkCIDR = false
case kops.CloudProviderBareMetal:
requiresSubnets = false
requiresSubnetCIDR = false
requiresNetworkCIDR = false
if c.Spec.NetworkCIDR != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networkCIDR"), "networkCIDR should not be set on bare metal"))
}
case kops.CloudProviderGCE:
requiresNetworkCIDR = false
if c.Spec.NetworkCIDR != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networkCIDR"), "networkCIDR should not be set on GCE"))
}
requiresSubnetCIDR = false
case kops.CloudProviderDO:
requiresSubnets = false
requiresSubnetCIDR = false
requiresNetworkCIDR = false
if c.Spec.NetworkCIDR != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networkCIDR"), "networkCIDR should not be set on DigitalOcean"))
}
case kops.CloudProviderALI:
requiresSubnets = false
requiresSubnetCIDR = false
requiresNetworkCIDR = false
case kops.CloudProviderAWS:
case kops.CloudProviderVSphere:
case kops.CloudProviderOpenstack:
requiresNetworkCIDR = false
requiresSubnetCIDR = false
default:
allErrs = append(allErrs, field.NotSupported(fieldSpec.Child("cloudProvider"), c.Spec.CloudProvider, []string{
string(kops.CloudProviderBareMetal),
string(kops.CloudProviderGCE),
string(kops.CloudProviderDO),
string(kops.CloudProviderALI),
string(kops.CloudProviderAWS),
string(kops.CloudProviderVSphere),
string(kops.CloudProviderOpenstack),
}))
}
if requiresSubnets && len(c.Spec.Subnets) == 0 {
// TODO: Auto choose zones from region?
allErrs = append(allErrs, field.Required(fieldSpec.Child("subnets"), "must configure at least one subnet (use --zones)"))
}
if strict && c.Spec.Kubelet == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubelet"), "kubelet not configured"))
}
if strict && c.Spec.MasterKubelet == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("masterKubelet"), "masterKubelet not configured"))
}
if strict && c.Spec.KubeControllerManager == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeControllerManager"), "kubeControllerManager not configured"))
}
if strict && c.Spec.KubeDNS == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeDNS"), "kubeDNS not configured"))
}
if strict && c.Spec.KubeScheduler == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeScheduler"), "kubeScheduler not configured"))
}
if strict && c.Spec.KubeAPIServer == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeAPIServer"), "kubeAPIServer not configured"))
}
if strict && c.Spec.KubeProxy == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeProxy"), "kubeProxy not configured"))
}
if strict && c.Spec.Docker == nil {
allErrs = append(allErrs, field.Required(fieldSpec.Child("docker"), "docker not configured"))
}
// Check NetworkCIDR
var networkCIDR *net.IPNet
var err error
{
if c.Spec.NetworkCIDR == "" {
if requiresNetworkCIDR {
allErrs = append(allErrs, field.Required(fieldSpec.Child("networkCIDR"), "Cluster did not have networkCIDR set"))
}
} else {
_, networkCIDR, err = net.ParseCIDR(c.Spec.NetworkCIDR)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networkCIDR"), c.Spec.NetworkCIDR, fmt.Sprintf("Cluster had an invalid networkCIDR")))
}
}
}
// Check AdditionalNetworkCIDRs
var additionalNetworkCIDRs []*net.IPNet
{
if len(c.Spec.AdditionalNetworkCIDRs) > 0 {
for _, AdditionalNetworkCIDR := range c.Spec.AdditionalNetworkCIDRs {
_, IPNetAdditionalNetworkCIDR, err := net.ParseCIDR(AdditionalNetworkCIDR)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("additionalNetworkCIDRs"), AdditionalNetworkCIDR, fmt.Sprintf("Cluster had an invalid additionalNetworkCIDRs")))
}
additionalNetworkCIDRs = append(additionalNetworkCIDRs, IPNetAdditionalNetworkCIDR)
}
}
}
// nonMasqueradeCIDR is essentially deprecated, and we're moving to cluster-cidr instead (which is better named pod-cidr)
nonMasqueradeCIDRRequired := true
serviceClusterMustBeSubnetOfNonMasqueradeCIDR := true
if c.Spec.Networking != nil && c.Spec.Networking.GCE != nil {
nonMasqueradeCIDRRequired = false
serviceClusterMustBeSubnetOfNonMasqueradeCIDR = false
}
// Check NonMasqueradeCIDR
var nonMasqueradeCIDR *net.IPNet
{
nonMasqueradeCIDRString := c.Spec.NonMasqueradeCIDR
if nonMasqueradeCIDRString == "" {
if nonMasqueradeCIDRRequired {
allErrs = append(allErrs, field.Required(fieldSpec.Child("nonMasqueradeCIDR"), "Cluster did not have nonMasqueradeCIDR set"))
}
} else {
_, nonMasqueradeCIDR, err = net.ParseCIDR(nonMasqueradeCIDRString)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("nonMasqueradeCIDR"), nonMasqueradeCIDRString, "Cluster had an invalid nonMasqueradeCIDR"))
}
if networkCIDR != nil && subnet.Overlap(nonMasqueradeCIDR, networkCIDR) && c.Spec.Networking != nil && c.Spec.Networking.AmazonVPC == nil && c.Spec.Networking.LyftVPC == nil && (c.Spec.Networking.Cilium == nil || c.Spec.Networking.Cilium.Ipam != kops.CiliumIpamEni) {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("nonMasqueradeCIDR"), fmt.Sprintf("nonMasqueradeCIDR %q cannot overlap with networkCIDR %q", nonMasqueradeCIDRString, c.Spec.NetworkCIDR)))
}
if c.Spec.Kubelet != nil && c.Spec.Kubelet.NonMasqueradeCIDR != nonMasqueradeCIDRString {
// TODO Remove the Spec.Kubelet.NonMasqueradeCIDR field?
if strict || c.Spec.Kubelet.NonMasqueradeCIDR != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubelet", "nonMasqueradeCIDR"), "kubelet nonMasqueradeCIDR did not match cluster nonMasqueradeCIDR"))
}
}
if c.Spec.MasterKubelet != nil && c.Spec.MasterKubelet.NonMasqueradeCIDR != nonMasqueradeCIDRString {
// TODO remove the Spec.MasterKubelet.NonMasqueradeCIDR field?
if strict || c.Spec.MasterKubelet.NonMasqueradeCIDR != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("masterKubelet", "nonMasqueradeCIDR"), "masterKubelet nonMasqueradeCIDR did not match cluster nonMasqueradeCIDR"))
}
}
}
}
// Check ServiceClusterIPRange
var serviceClusterIPRange *net.IPNet
{
serviceClusterIPRangeString := c.Spec.ServiceClusterIPRange
if serviceClusterIPRangeString == "" {
if strict {
allErrs = append(allErrs, field.Required(fieldSpec.Child("serviceClusterIPRange"), "Cluster did not have serviceClusterIPRange set"))
}
} else {
_, serviceClusterIPRange, err = net.ParseCIDR(serviceClusterIPRangeString)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("serviceClusterIPRange"), serviceClusterIPRangeString, "Cluster had an invalid serviceClusterIPRange"))
} else {
if nonMasqueradeCIDR != nil && serviceClusterMustBeSubnetOfNonMasqueradeCIDR && !subnet.BelongsTo(nonMasqueradeCIDR, serviceClusterIPRange) {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("serviceClusterIPRange"), fmt.Sprintf("serviceClusterIPRange %q must be a subnet of nonMasqueradeCIDR %q", serviceClusterIPRangeString, c.Spec.NonMasqueradeCIDR)))
}
if c.Spec.KubeAPIServer != nil && c.Spec.KubeAPIServer.ServiceClusterIPRange != serviceClusterIPRangeString {
if strict || c.Spec.KubeAPIServer.ServiceClusterIPRange != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeAPIServer", "serviceClusterIPRange"), "kubeAPIServer serviceClusterIPRange did not match cluster serviceClusterIPRange"))
}
}
}
}
}
// Check ClusterCIDR
if c.Spec.KubeControllerManager != nil {
var clusterCIDR *net.IPNet
clusterCIDRString := c.Spec.KubeControllerManager.ClusterCIDR
if clusterCIDRString != "" {
_, clusterCIDR, err = net.ParseCIDR(clusterCIDRString)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeControllerManager", "clusterCIDR"), clusterCIDRString, "cluster had an invalid kubeControllerManager.clusterCIDR"))
} else if nonMasqueradeCIDR != nil && !subnet.BelongsTo(nonMasqueradeCIDR, clusterCIDR) {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeControllerManager", "clusterCIDR"), fmt.Sprintf("kubeControllerManager.clusterCIDR %q must be a subnet of nonMasqueradeCIDR %q", clusterCIDRString, c.Spec.NonMasqueradeCIDR)))
}
}
}
// @check the custom kubedns options are valid
if c.Spec.KubeDNS != nil {
if c.Spec.KubeDNS.ServerIP != "" {
address := c.Spec.KubeDNS.ServerIP
ip := net.ParseIP(address)
if ip == nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeDNS", "serverIP"), address, "Cluster had an invalid kubeDNS.serverIP"))
} else {
if serviceClusterIPRange != nil && !serviceClusterIPRange.Contains(ip) {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeDNS", "serverIP"), fmt.Sprintf("ServiceClusterIPRange %q must contain the DNS Server IP %q", c.Spec.ServiceClusterIPRange, address)))
}
if !featureflag.ExperimentalClusterDNS.Enabled() {
if c.Spec.Kubelet != nil && c.Spec.Kubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeDNS", "serverIP"), "Kubelet ClusterDNS did not match cluster kubeDNS.serverIP"))
}
if c.Spec.MasterKubelet != nil && c.Spec.MasterKubelet.ClusterDNS != c.Spec.KubeDNS.ServerIP {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeDNS", "serverIP"), "MasterKubelet ClusterDNS did not match cluster kubeDNS.serverIP"))
}
}
}
}
// @check the nameservers are valid
for i, x := range c.Spec.KubeDNS.UpstreamNameservers {
if ip := net.ParseIP(x); ip == nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeDNS", "upstreamNameservers").Index(i), x, "Invalid nameserver given, should be a valid ip address"))
}
}
// @check the stubdomain if any
for domain, nameservers := range c.Spec.KubeDNS.StubDomains {
if len(nameservers) <= 0 {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeDNS", "stubDomains").Key(domain), domain, "No nameservers specified for the stub domain"))
}
for i, x := range nameservers {
if ip := net.ParseIP(x); ip == nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeDNS", "stubDomains").Key(domain).Index(i), x, "Invalid nameserver given, should be a valid ip address"))
}
}
}
}
// Check CloudProvider
{
var k8sCloudProvider string
switch kops.CloudProviderID(c.Spec.CloudProvider) {
case kops.CloudProviderAWS:
k8sCloudProvider = "aws"
case kops.CloudProviderGCE:
k8sCloudProvider = "gce"
case kops.CloudProviderDO:
k8sCloudProvider = "external"
case kops.CloudProviderVSphere:
k8sCloudProvider = "vsphere"
case kops.CloudProviderBareMetal:
k8sCloudProvider = ""
case kops.CloudProviderOpenstack:
k8sCloudProvider = "openstack"
case kops.CloudProviderALI:
k8sCloudProvider = "alicloud"
default:
// We already added an error above
k8sCloudProvider = "ignore"
}
if k8sCloudProvider != "ignore" {
if c.Spec.Kubelet != nil && (strict || c.Spec.Kubelet.CloudProvider != "") {
if c.Spec.Kubelet.CloudProvider != "external" && k8sCloudProvider != c.Spec.Kubelet.CloudProvider {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubelet", "cloudProvider"), "Did not match cluster cloudProvider"))
}
}
if c.Spec.MasterKubelet != nil && (strict || c.Spec.MasterKubelet.CloudProvider != "") {
if c.Spec.MasterKubelet.CloudProvider != "external" && k8sCloudProvider != c.Spec.MasterKubelet.CloudProvider {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("masterKubelet", "cloudProvider"), "Did not match cluster cloudProvider"))
}
}
if c.Spec.KubeAPIServer != nil && (strict || c.Spec.KubeAPIServer.CloudProvider != "") {
if c.Spec.KubeAPIServer.CloudProvider != "external" && k8sCloudProvider != c.Spec.KubeAPIServer.CloudProvider {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeAPIServer", "cloudProvider"), "Did not match cluster cloudProvider"))
}
}
if c.Spec.KubeControllerManager != nil && (strict || c.Spec.KubeControllerManager.CloudProvider != "") {
if c.Spec.KubeControllerManager.CloudProvider != "external" && k8sCloudProvider != c.Spec.KubeControllerManager.CloudProvider {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeControllerManager", "cloudProvider"), "Did not match cluster cloudProvider"))
}
}
}
}
// Check that the subnet CIDRs are all consistent
{
for i, s := range c.Spec.Subnets {
fieldSubnet := fieldSpec.Child("subnets").Index(i)
if s.CIDR == "" {
if requiresSubnetCIDR && strict {
allErrs = append(allErrs, field.Required(fieldSubnet.Child("cidr"), "subnet did not have a cidr set"))
}
} else {
_, subnetCIDR, err := net.ParseCIDR(s.CIDR)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSubnet.Child("cidr"), s.CIDR, "subnet had an invalid cidr"))
} else if networkCIDR != nil && !validateSubnetCIDR(networkCIDR, additionalNetworkCIDRs, subnetCIDR) {
allErrs = append(allErrs, field.Forbidden(fieldSubnet.Child("cidr"), fmt.Sprintf("subnet %q had a cidr %q that was not a subnet of the networkCIDR %q", s.Name, s.CIDR, c.Spec.NetworkCIDR)))
}
}
}
}
// NodeAuthorization
if c.Spec.NodeAuthorization != nil {
// @check the feature gate is enabled for this
if !featureflag.EnableNodeAuthorization.Enabled() {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "nodeAuthorization"), "node authorization is experimental feature; set `export KOPS_FEATURE_FLAGS=EnableNodeAuthorization`"))
} else {
if c.Spec.NodeAuthorization.NodeAuthorizer == nil {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "nodeAuthorization"), "no node authorization policy has been set"))
} else {
path := field.NewPath("spec", "nodeAuthorization").Child("nodeAuthorizer")
if c.Spec.NodeAuthorization.NodeAuthorizer.Port < 0 || c.Spec.NodeAuthorization.NodeAuthorizer.Port >= 65535 {
allErrs = append(allErrs, field.Invalid(path.Child("port"), c.Spec.NodeAuthorization.NodeAuthorizer.Port, "invalid port"))
}
if c.Spec.NodeAuthorization.NodeAuthorizer.Timeout != nil && c.Spec.NodeAuthorization.NodeAuthorizer.Timeout.Duration <= 0 {
allErrs = append(allErrs, field.Invalid(path.Child("timeout"), c.Spec.NodeAuthorization.NodeAuthorizer.Timeout, "must be greater than zero"))
}
if c.Spec.NodeAuthorization.NodeAuthorizer.TokenTTL != nil && c.Spec.NodeAuthorization.NodeAuthorizer.TokenTTL.Duration < 0 {
allErrs = append(allErrs, field.Invalid(path.Child("tokenTTL"), c.Spec.NodeAuthorization.NodeAuthorizer.TokenTTL, "must be greater than or equal to zero"))
}
// @question: we could probably just default these settings in the model when the node-authorizer is enabled??
if c.Spec.KubeAPIServer == nil {
allErrs = append(allErrs, field.Required(field.NewPath("spec", "kubeAPIServer"), "bootstrap token authentication is not enabled in the kube-apiserver"))
} else if c.Spec.KubeAPIServer.EnableBootstrapAuthToken == nil {
allErrs = append(allErrs, field.Required(field.NewPath("spec", "kubeAPIServer", "enableBootstrapAuthToken"), "kube-apiserver has not been configured to use bootstrap tokens"))
} else if !fi.BoolValue(c.Spec.KubeAPIServer.EnableBootstrapAuthToken) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "kubeAPIServer", "enableBootstrapAuthToken"), "bootstrap tokens in the kube-apiserver has been disabled"))
}
}
}
}
// UpdatePolicy
if c.Spec.UpdatePolicy != nil {
switch *c.Spec.UpdatePolicy {
case kops.UpdatePolicyExternal:
// Valid
default:
allErrs = append(allErrs, field.NotSupported(fieldSpec.Child("updatePolicy"), *c.Spec.UpdatePolicy, []string{kops.UpdatePolicyExternal}))
}
}
// KubeProxy
if c.Spec.KubeProxy != nil {
kubeProxyPath := fieldSpec.Child("kubeProxy")
master := c.Spec.KubeProxy.Master
for i, x := range c.Spec.KubeProxy.IPVSExcludeCIDRS {
if _, _, err := net.ParseCIDR(x); err != nil {
allErrs = append(allErrs, field.Invalid(kubeProxyPath.Child("ipvsExcludeCidrs").Index(i), x, "Invalid network CIDR"))
}
}
if master != "" && !isValidAPIServersURL(master) {
allErrs = append(allErrs, field.Invalid(kubeProxyPath.Child("master"), master, "Not a valid APIServer URL"))
}
}
// KubeAPIServer
if c.Spec.KubeAPIServer != nil {
if c.IsKubernetesGTE("1.10") {
if len(c.Spec.KubeAPIServer.AdmissionControl) > 0 {
if len(c.Spec.KubeAPIServer.DisableAdmissionPlugins) > 0 {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeAPIServer", "disableAdmissionPlugins"),
"disableAdmissionPlugins is mutually exclusive, you cannot use both admissionControl and disableAdmissionPlugins together"))
}
}
}
}
// Kubelet
allErrs = append(allErrs, validateKubelet(c.Spec.Kubelet, c, fieldSpec.Child("kubelet"))...)
allErrs = append(allErrs, validateKubelet(c.Spec.MasterKubelet, c, fieldSpec.Child("masterKubelet"))...)
// Topology support
if c.Spec.Topology != nil {
if c.Spec.Topology.Masters != "" && c.Spec.Topology.Nodes != "" {
allErrs = append(allErrs, IsValidValue(fieldSpec.Child("topology", "masters"), &c.Spec.Topology.Masters, kops.SupportedTopologies)...)
allErrs = append(allErrs, IsValidValue(fieldSpec.Child("topology", "nodes"), &c.Spec.Topology.Nodes, kops.SupportedTopologies)...)
} else {
allErrs = append(allErrs, field.Required(fieldSpec.Child("masters"), "topology requires non-nil values for masters and nodes"))
}
if c.Spec.Topology.Bastion != nil {
bastion := c.Spec.Topology.Bastion
if c.Spec.Topology.Masters == kops.TopologyPublic || c.Spec.Topology.Nodes == kops.TopologyPublic {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("topology", "bastion"), "bastion requires masters and nodes to have private topology"))
}
if bastion.IdleTimeoutSeconds != nil && *bastion.IdleTimeoutSeconds <= 0 {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("topology", "bastion", "idleTimeoutSeconds"), *bastion.IdleTimeoutSeconds, "bastion idleTimeoutSeconds should be greater than zero"))
}
if bastion.IdleTimeoutSeconds != nil && *bastion.IdleTimeoutSeconds > 3600 {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("topology", "bastion", "idleTimeoutSeconds"), *bastion.IdleTimeoutSeconds, "bastion idleTimeoutSeconds cannot be greater than one hour"))
}
}
}
// Egress specification support
{
for i, s := range c.Spec.Subnets {
if s.Egress == "" {
continue
}
fieldSubnet := fieldSpec.Child("subnets").Index(i)
if !strings.HasPrefix(s.Egress, "nat-") && !strings.HasPrefix(s.Egress, "i-") && s.Egress != kops.EgressExternal {
allErrs = append(allErrs, field.Invalid(fieldSubnet.Child("egress"), s.Egress, "egress must be of type NAT Gateway or NAT EC2 Instance or 'External'"))
}
if s.Egress != kops.EgressExternal && s.Type != "Private" {
allErrs = append(allErrs, field.Forbidden(fieldSubnet.Child("egress"), "egress can only be specified for private subnets"))
}
}
}
// Etcd
{
fieldEtcdClusters := fieldSpec.Child("etcdClusters")
if len(c.Spec.EtcdClusters) == 0 {
allErrs = append(allErrs, field.Required(fieldEtcdClusters, ""))
} else {
for i, x := range c.Spec.EtcdClusters {
allErrs = append(allErrs, validateEtcdClusterSpecLegacy(x, fieldEtcdClusters.Index(i))...)
}
allErrs = append(allErrs, validateEtcdTLS(c.Spec.EtcdClusters, fieldEtcdClusters)...)
allErrs = append(allErrs, validateEtcdStorage(c.Spec.EtcdClusters, fieldEtcdClusters)...)
}
}
allErrs = append(allErrs, newValidateCluster(c)...)
return allErrs
}
// validateSubnetCIDR is responsible for validating subnets are part of the CIDRs assigned to the cluster.
func validateSubnetCIDR(networkCIDR *net.IPNet, additionalNetworkCIDRs []*net.IPNet, subnetCIDR *net.IPNet) bool {
if subnet.BelongsTo(networkCIDR, subnetCIDR) {
return true
}
for _, additionalNetworkCIDR := range additionalNetworkCIDRs {
if subnet.BelongsTo(additionalNetworkCIDR, subnetCIDR) {
return true
}
}
return false
}
// validateEtcdClusterSpecLegacy is responsible for validating the etcd cluster spec
func validateEtcdClusterSpecLegacy(spec *kops.EtcdClusterSpec, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if spec.Name == "" {
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), "etcdCluster did not have name"))
}
if len(spec.Members) == 0 {
allErrs = append(allErrs, field.Required(fieldPath.Child("members"), "No members defined in etcd cluster"))
} else if (len(spec.Members) % 2) == 0 {
// Not technically a requirement, but doesn't really make sense to allow
allErrs = append(allErrs, field.Invalid(fieldPath.Child("members"), len(spec.Members), "Should be an odd number of master-zones for quorum. Use --zones and --master-zones to declare node zones and master zones separately"))
}
allErrs = append(allErrs, validateEtcdVersion(spec, fieldPath, nil)...)
for _, m := range spec.Members {
allErrs = append(allErrs, validateEtcdMemberSpec(m, fieldPath)...)
}
return allErrs
}
// validateEtcdTLS checks the TLS settings for etcd are valid
func validateEtcdTLS(specs []*kops.EtcdClusterSpec, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
var usingTLS int
for _, x := range specs {
if x.EnableEtcdTLS {
usingTLS++
}
}
// check both clusters are using tls if one is enabled
if usingTLS > 0 && usingTLS != len(specs) {
allErrs = append(allErrs, field.Forbidden(fieldPath.Index(0).Child("enableEtcdTLS"), "both etcd clusters must have TLS enabled or none at all"))
}
return allErrs
}
// validateEtcdStorage is responsible for checking versions are identical.
func validateEtcdStorage(specs []*kops.EtcdClusterSpec, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
version := specs[0].Version
for i, x := range specs {
if x.Version != "" && x.Version != version {
allErrs = append(allErrs, field.Forbidden(fieldPath.Index(i).Child("version"), fmt.Sprintf("cluster: %q, has a different storage version: %q, both must be the same", x.Name, x.Version)))
}
}
return allErrs
}
// validateEtcdVersion is responsible for validating the storage version of etcd
// @TODO semvar package doesn't appear to ignore a 'v' in v1.1.1; could be a problem later down the line
func validateEtcdVersion(spec *kops.EtcdClusterSpec, fieldPath *field.Path, minimalVersion *semver.Version) field.ErrorList {
// @check if the storage is specified that it's valid
if minimalVersion == nil {
v := semver.MustParse("0.0.0")
minimalVersion = &v
}
version := spec.Version
if spec.Version == "" {
version = components.DefaultEtcd2Version
}
sem, err := semver.Parse(strings.TrimPrefix(version, "v"))
if err != nil {
return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, "the storage version is invalid")}
}
// we only support v3 and v2 for now
if sem.Major == 3 || sem.Major == 2 {
if sem.LT(*minimalVersion) {
return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, fmt.Sprintf("minimum version required is %s", minimalVersion.String()))}
}
return nil
}
return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, "unsupported storage version, we only support major versions 2 and 3")}
}
// validateEtcdMemberSpec is responsible for validate the cluster member
func validateEtcdMemberSpec(spec *kops.EtcdMemberSpec, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if spec.Name == "" {
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), "etcdMember did not have name"))
}
if fi.StringValue(spec.InstanceGroup) == "" {
allErrs = append(allErrs, field.Required(fieldPath.Child("instanceGroup"), "etcdMember did not have instanceGroup"))
}
return allErrs
}
// DeepValidate is responsible for validating the instancegroups within the cluster spec
func DeepValidate(c *kops.Cluster, groups []*kops.InstanceGroup, strict bool) error {
if errs := ValidateCluster(c, strict); len(errs) != 0 {
return errs.ToAggregate()
}
if len(groups) == 0 {
return fmt.Errorf("must configure at least one InstanceGroup")
}
masterGroupCount := 0
nodeGroupCount := 0
for _, g := range groups {
if g.IsMaster() {
masterGroupCount++
} else {
nodeGroupCount++
}
}
if masterGroupCount == 0 {
return fmt.Errorf("must configure at least one Master InstanceGroup")
}
if nodeGroupCount == 0 {
return fmt.Errorf("must configure at least one Node InstanceGroup")
}
for _, g := range groups {
errs := CrossValidateInstanceGroup(g, c, strict)
// Additional cloud-specific validation rules,
// such as making sure that identifiers match the expected formats for the given cloud
switch kops.CloudProviderID(c.Spec.CloudProvider) {
case kops.CloudProviderAWS:
errs = append(errs, awsValidateInstanceGroup(g)...)
default:
if len(g.Spec.Volumes) > 0 {
errs = append(errs, field.Forbidden(field.NewPath("spec", "volumes"), "instancegroup volumes are only available with aws at present"))
}
}
if len(errs) != 0 {
return errs.ToAggregate()
}
}
return nil
}
func validateKubelet(k *kops.KubeletConfigSpec, c *kops.Cluster, kubeletPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if k != nil {
{
// Flag removed in 1.6
if k.APIServers != "" {
allErrs = append(allErrs, field.Forbidden(
kubeletPath.Child("apiServers"),
"api-servers flag was removed in 1.6"))
}
}
if c.IsKubernetesGTE("1.10") {
// Flag removed in 1.10
if k.RequireKubeconfig != nil {
allErrs = append(allErrs, field.Forbidden(
kubeletPath.Child("requireKubeconfig"),
"require-kubeconfig flag was removed in 1.10. (Please be sure you are not using a cluster config from `kops get cluster --full`)"))
}
}
if k.BootstrapKubeconfig != "" {
if c.Spec.KubeAPIServer == nil {
allErrs = append(allErrs, field.Required(kubeletPath.Root().Child("spec").Child("kubeAPIServer"), "bootstrap token require the NodeRestriction admissions controller"))
}
}
if k.TopologyManagerPolicy != "" {
allErrs = append(allErrs, IsValidValue(kubeletPath.Child("topologyManagerPolicy"), &k.TopologyManagerPolicy, []string{"none", "best-effort", "restricted", "single-numa-node"})...)
if !c.IsKubernetesGTE("1.18") {
allErrs = append(allErrs, field.Forbidden(kubeletPath.Child("topologyManagerPolicy"), "topologyManagerPolicy requires at least Kubernetes 1.18"))
}
}
}
return allErrs
}
|
package ground
import (
"fmt"
"github.com/pkg/errors"
rbac "k8s.io/api/rbac/v1beta1"
storage "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
v1 "github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1"
openstack_project "github.com/sapcc/kubernikus/pkg/client/openstack/project"
"github.com/sapcc/kubernikus/pkg/controller/config"
"github.com/sapcc/kubernikus/pkg/controller/ground/bootstrap"
"github.com/sapcc/kubernikus/pkg/controller/ground/bootstrap/csi"
"github.com/sapcc/kubernikus/pkg/controller/ground/bootstrap/dns"
"github.com/sapcc/kubernikus/pkg/controller/ground/bootstrap/gpu"
"github.com/sapcc/kubernikus/pkg/util"
"github.com/sapcc/kubernikus/pkg/version"
)
func SeedKluster(clients config.Clients, factories config.Factories, images version.ImageRegistry, kluster *v1.Kluster) error {
kubernetes, err := clients.Satellites.ClientFor(kluster)
if err != nil {
return err
}
if err := SeedAllowBootstrapTokensToPostCSRs(kubernetes); err != nil {
return errors.Wrap(err, "seed allow bootstrap tokens to post CSRs")
}
if err := SeedAutoApproveNodeBootstrapTokens(kubernetes); err != nil {
return errors.Wrap(err, "seed auto approve node bootstrap tokens")
}
if err := SeedAutoRenewalNodeCertificates(kubernetes); err != nil {
return errors.Wrap(err, "seed auto renewal node certificates")
}
if err := SeedKubernikusAdmin(kubernetes); err != nil {
return errors.Wrap(err, "seed kubernikus admin")
}
if err := SeedKubernikusMember(kubernetes); err != nil {
return errors.Wrap(err, "seed kubernikus member")
}
if !kluster.Spec.NoCloud {
openstack, err := factories.Openstack.ProjectAdminClientFor(kluster.Account())
if err != nil {
return err
}
useCSI, _ := util.KlusterVersionConstraint(kluster, ">= 1.20")
if err := SeedCinderStorageClasses(kubernetes, openstack, useCSI); err != nil {
return errors.Wrap(err, "seed cinder storage classes")
}
}
if err := SeedAllowApiserverToAccessKubeletAPI(kubernetes); err != nil {
return errors.Wrap(err, "seed allow apiserver access to kubelet api")
}
coreDNSImage := ""
if images.Versions[kluster.Spec.Version].CoreDNS.Repository != "" &&
images.Versions[kluster.Spec.Version].CoreDNS.Tag != "" {
coreDNSImage = images.Versions[kluster.Spec.Version].CoreDNS.Repository + ":" + images.Versions[kluster.Spec.Version].CoreDNS.Tag
}
if ok, _ := util.KlusterVersionConstraint(kluster, ">= 1.16"); ok {
if err := dns.SeedCoreDNS116(kubernetes, coreDNSImage, kluster.Spec.DNSDomain, kluster.Spec.DNSAddress); err != nil {
return errors.Wrap(err, "seed coredns")
}
} else if ok, _ := util.KlusterVersionConstraint(kluster, ">= 1.13"); ok {
if err := dns.SeedCoreDNS(kubernetes, coreDNSImage, kluster.Spec.DNSDomain, kluster.Spec.DNSAddress); err != nil {
return errors.Wrap(err, "seed coredns")
}
} else {
if err := dns.SeedKubeDNS(kubernetes, "", "", kluster.Spec.DNSDomain, kluster.Spec.DNSAddress); err != nil {
return errors.Wrap(err, "seed kubedns")
}
}
if ok, _ := util.KlusterVersionConstraint(kluster, ">= 1.10"); ok {
if err := gpu.SeedGPUSupport(kubernetes); err != nil {
return errors.Wrap(err, "seed GPU support")
}
}
if ok, _ := util.KlusterVersionConstraint(kluster, ">= 1.20"); ok {
dynamicKubernetes, err := clients.Satellites.DynamicClientFor(kluster)
if err != nil {
return errors.Wrap(err, "dynamic client")
}
klusterSecret, err := util.KlusterSecret(clients.Kubernetes, kluster)
if err != nil {
return errors.Wrap(err, "get kluster secret")
}
if err := csi.SeedCinderCSIPlugin(kubernetes, dynamicKubernetes, klusterSecret, images.Versions[kluster.Spec.Version]); err != nil {
return errors.Wrap(err, "seed cinder CSI plugin")
}
}
if err := SeedOpenStackClusterRoleBindings(kubernetes); err != nil {
return errors.Wrap(err, "seed openstack cluster role bindings")
}
return nil
}
func SeedCinderStorageClasses(client clientset.Interface, openstack openstack_project.ProjectClient, useCSI bool) error {
if err := createStorageClass(client, "cinder-default", "", true, useCSI); err != nil {
return err
}
metadata, err := openstack.GetMetadata()
if err != nil {
return err
}
for _, avz := range metadata.AvailabilityZones {
name := fmt.Sprintf("cinder-zone-%s", avz.Name[len(avz.Name)-1:])
if err := createStorageClass(client, name, avz.Name, false, useCSI); err != nil {
return err
}
}
return nil
}
func createStorageClass(client clientset.Interface, name, avz string, isDefault bool, useCSI bool) error {
provisioner := "kubernetes.io/cinder"
expansion := false
if useCSI {
provisioner = "cinder.csi.openstack.org"
expansion = true
}
mode := storage.VolumeBindingImmediate
if avz == "" {
mode = storage.VolumeBindingWaitForFirstConsumer
}
storageClass := storage.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Provisioner: provisioner,
VolumeBindingMode: &mode,
AllowVolumeExpansion: &expansion,
}
if isDefault {
storageClass.Annotations = map[string]string{
"storageclass.kubernetes.io/is-default-class": "true",
}
}
if avz != "" {
storageClass.Parameters = map[string]string{
"availability": avz,
}
}
if _, err := client.StorageV1().StorageClasses().Create(&storageClass); err != nil {
if !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("unable to create storage class: %v", err)
}
if _, err := client.StorageV1().StorageClasses().Update(&storageClass); err != nil {
return fmt.Errorf("unable to update storage class: %v", err)
}
}
return nil
}
func DeleteCinderStorageClasses(client clientset.Interface, openstack openstack_project.ProjectClient) error {
if err := client.StorageV1().StorageClasses().Delete("cinder-default", &metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
}
metadata, err := openstack.GetMetadata()
if err != nil {
return err
}
for _, avz := range metadata.AvailabilityZones {
name := fmt.Sprintf("cinder-zone-%s", avz.Name[len(avz.Name)-1:])
if err := client.StorageV1().StorageClasses().Delete(name, &metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
}
}
return nil
}
func SeedKubernikusAdmin(client clientset.Interface) error {
return bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:admin",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: "cluster-admin",
},
Subjects: []rbac.Subject{
{
Kind: rbac.GroupKind,
Name: "os:kubernetes_admin",
},
},
})
}
func SeedKubernikusMember(client clientset.Interface) error {
return bootstrap.CreateOrUpdateRoleBinding(client, &rbac.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:member",
Namespace: "default",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "Role",
Name: "edit",
},
Subjects: []rbac.Subject{
{
Kind: rbac.GroupKind,
Name: "os:kubernetes_member",
},
},
})
}
func SeedAllowBootstrapTokensToPostCSRs(client clientset.Interface) error {
return bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:kubelet-bootstrap",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: "system:node-bootstrapper",
},
Subjects: []rbac.Subject{
{
Kind: rbac.GroupKind,
Name: "system:bootstrappers",
},
},
})
}
func SeedAllowApiserverToAccessKubeletAPI(client clientset.Interface) error {
return bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:apiserver-kubeletapi",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: "system:kubelet-api-admin",
},
Subjects: []rbac.Subject{
{
Kind: rbac.UserKind,
Name: "apiserver",
},
},
})
}
func SeedAutoApproveNodeBootstrapTokens(client clientset.Interface) error {
err := bootstrap.CreateOrUpdateClusterRole(client, &rbac.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:approve-node-client-csr",
},
Rules: []rbac.PolicyRule{
{
Verbs: []string{"create"},
APIGroups: []string{"certificates.k8s.io"},
Resources: []string{"certificatesigningrequests/nodeclient"},
},
},
})
if err != nil {
return err
}
return bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:node-client-csr-autoapprove",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: "kubernikus:approve-node-client-csr",
},
Subjects: []rbac.Subject{
{
Kind: "Group",
Name: "system:bootstrappers",
},
},
})
}
func SeedAutoRenewalNodeCertificates(client clientset.Interface) error {
err := bootstrap.CreateOrUpdateClusterRole(client, &rbac.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: "system:certificates.k8s.io:certificatesigningrequests:selfnodeclient",
},
Rules: []rbac.PolicyRule{
{
Verbs: []string{"create"},
APIGroups: []string{"certificates.k8s.io"},
Resources: []string{"certificatesigningrequests/selfnodeclient"},
},
},
})
if err != nil {
return err
}
return bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:auto-approve-renewals-for-nodes",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: "system:certificates.k8s.io:certificatesigningrequests:selfnodeclient",
},
Subjects: []rbac.Subject{
{
APIGroup: rbac.GroupName,
Kind: "Group",
Name: "system:nodes",
},
},
})
}
func SeedOpenStackClusterRoleBindings(client clientset.Interface) error {
err := bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:openstack-kubernetes-admin",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: "cluster-admin",
},
Subjects: []rbac.Subject{
{
Kind: "Group",
Name: "openstack_role:kubernetes_admin",
},
{
Kind: "User",
// It is the marshall & b64enc of the protobuf message IDTokenSubject: https://github.com/dexidp/dex/blob/master/server/oauth2.go#L300
// User ID: 00000000-0000-0000-0000-000000000001 ConnID: local
Name: "CiQwMDAwMDAwMC0wMDAwLTAwMDAtMDAwMC0wMDAwMDAwMDAwMDESBWxvY2Fs",
// For claims, we are using "sub" instead of "email" since some technical users missing emails
// If we switch to email, we can directly use email as Name field above
},
},
})
if err != nil {
return err
}
err = bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:openstack-kubernetes-member",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: "view",
},
Subjects: []rbac.Subject{
{
Kind: "Group",
Name: "openstack_role:kubernetes_member",
},
},
})
if err != nil {
return err
}
return nil
}
turns Role into ClusterRole (#546)
There is no `Role` with the name `edit`. By default, users which are assigned the `kubernikus:member` RoleBinding, will get that the `edit` Role can't be found.
This was either always broken. Or the `Role` was changed to a `ClusterRole` without us noticing. It is a user-facing role and comes with Kubernetes itself. For more info, see here: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles
Either way, it needs to be a `ClusterRole` now.
package ground
import (
"fmt"
"github.com/pkg/errors"
rbac "k8s.io/api/rbac/v1beta1"
storage "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
v1 "github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1"
openstack_project "github.com/sapcc/kubernikus/pkg/client/openstack/project"
"github.com/sapcc/kubernikus/pkg/controller/config"
"github.com/sapcc/kubernikus/pkg/controller/ground/bootstrap"
"github.com/sapcc/kubernikus/pkg/controller/ground/bootstrap/csi"
"github.com/sapcc/kubernikus/pkg/controller/ground/bootstrap/dns"
"github.com/sapcc/kubernikus/pkg/controller/ground/bootstrap/gpu"
"github.com/sapcc/kubernikus/pkg/util"
"github.com/sapcc/kubernikus/pkg/version"
)
func SeedKluster(clients config.Clients, factories config.Factories, images version.ImageRegistry, kluster *v1.Kluster) error {
kubernetes, err := clients.Satellites.ClientFor(kluster)
if err != nil {
return err
}
if err := SeedAllowBootstrapTokensToPostCSRs(kubernetes); err != nil {
return errors.Wrap(err, "seed allow bootstrap tokens to post CSRs")
}
if err := SeedAutoApproveNodeBootstrapTokens(kubernetes); err != nil {
return errors.Wrap(err, "seed auto approve node bootstrap tokens")
}
if err := SeedAutoRenewalNodeCertificates(kubernetes); err != nil {
return errors.Wrap(err, "seed auto renewal node certificates")
}
if err := SeedKubernikusAdmin(kubernetes); err != nil {
return errors.Wrap(err, "seed kubernikus admin")
}
if err := SeedKubernikusMember(kubernetes); err != nil {
return errors.Wrap(err, "seed kubernikus member")
}
if !kluster.Spec.NoCloud {
openstack, err := factories.Openstack.ProjectAdminClientFor(kluster.Account())
if err != nil {
return err
}
useCSI, _ := util.KlusterVersionConstraint(kluster, ">= 1.20")
if err := SeedCinderStorageClasses(kubernetes, openstack, useCSI); err != nil {
return errors.Wrap(err, "seed cinder storage classes")
}
}
if err := SeedAllowApiserverToAccessKubeletAPI(kubernetes); err != nil {
return errors.Wrap(err, "seed allow apiserver access to kubelet api")
}
coreDNSImage := ""
if images.Versions[kluster.Spec.Version].CoreDNS.Repository != "" &&
images.Versions[kluster.Spec.Version].CoreDNS.Tag != "" {
coreDNSImage = images.Versions[kluster.Spec.Version].CoreDNS.Repository + ":" + images.Versions[kluster.Spec.Version].CoreDNS.Tag
}
if ok, _ := util.KlusterVersionConstraint(kluster, ">= 1.16"); ok {
if err := dns.SeedCoreDNS116(kubernetes, coreDNSImage, kluster.Spec.DNSDomain, kluster.Spec.DNSAddress); err != nil {
return errors.Wrap(err, "seed coredns")
}
} else if ok, _ := util.KlusterVersionConstraint(kluster, ">= 1.13"); ok {
if err := dns.SeedCoreDNS(kubernetes, coreDNSImage, kluster.Spec.DNSDomain, kluster.Spec.DNSAddress); err != nil {
return errors.Wrap(err, "seed coredns")
}
} else {
if err := dns.SeedKubeDNS(kubernetes, "", "", kluster.Spec.DNSDomain, kluster.Spec.DNSAddress); err != nil {
return errors.Wrap(err, "seed kubedns")
}
}
if ok, _ := util.KlusterVersionConstraint(kluster, ">= 1.10"); ok {
if err := gpu.SeedGPUSupport(kubernetes); err != nil {
return errors.Wrap(err, "seed GPU support")
}
}
if ok, _ := util.KlusterVersionConstraint(kluster, ">= 1.20"); ok {
dynamicKubernetes, err := clients.Satellites.DynamicClientFor(kluster)
if err != nil {
return errors.Wrap(err, "dynamic client")
}
klusterSecret, err := util.KlusterSecret(clients.Kubernetes, kluster)
if err != nil {
return errors.Wrap(err, "get kluster secret")
}
if err := csi.SeedCinderCSIPlugin(kubernetes, dynamicKubernetes, klusterSecret, images.Versions[kluster.Spec.Version]); err != nil {
return errors.Wrap(err, "seed cinder CSI plugin")
}
}
if err := SeedOpenStackClusterRoleBindings(kubernetes); err != nil {
return errors.Wrap(err, "seed openstack cluster role bindings")
}
return nil
}
func SeedCinderStorageClasses(client clientset.Interface, openstack openstack_project.ProjectClient, useCSI bool) error {
if err := createStorageClass(client, "cinder-default", "", true, useCSI); err != nil {
return err
}
metadata, err := openstack.GetMetadata()
if err != nil {
return err
}
for _, avz := range metadata.AvailabilityZones {
name := fmt.Sprintf("cinder-zone-%s", avz.Name[len(avz.Name)-1:])
if err := createStorageClass(client, name, avz.Name, false, useCSI); err != nil {
return err
}
}
return nil
}
func createStorageClass(client clientset.Interface, name, avz string, isDefault bool, useCSI bool) error {
provisioner := "kubernetes.io/cinder"
expansion := false
if useCSI {
provisioner = "cinder.csi.openstack.org"
expansion = true
}
mode := storage.VolumeBindingImmediate
if avz == "" {
mode = storage.VolumeBindingWaitForFirstConsumer
}
storageClass := storage.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Provisioner: provisioner,
VolumeBindingMode: &mode,
AllowVolumeExpansion: &expansion,
}
if isDefault {
storageClass.Annotations = map[string]string{
"storageclass.kubernetes.io/is-default-class": "true",
}
}
if avz != "" {
storageClass.Parameters = map[string]string{
"availability": avz,
}
}
if _, err := client.StorageV1().StorageClasses().Create(&storageClass); err != nil {
if !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("unable to create storage class: %v", err)
}
if _, err := client.StorageV1().StorageClasses().Update(&storageClass); err != nil {
return fmt.Errorf("unable to update storage class: %v", err)
}
}
return nil
}
func DeleteCinderStorageClasses(client clientset.Interface, openstack openstack_project.ProjectClient) error {
if err := client.StorageV1().StorageClasses().Delete("cinder-default", &metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
}
metadata, err := openstack.GetMetadata()
if err != nil {
return err
}
for _, avz := range metadata.AvailabilityZones {
name := fmt.Sprintf("cinder-zone-%s", avz.Name[len(avz.Name)-1:])
if err := client.StorageV1().StorageClasses().Delete(name, &metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
}
}
return nil
}
func SeedKubernikusAdmin(client clientset.Interface) error {
return bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:admin",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: "cluster-admin",
},
Subjects: []rbac.Subject{
{
Kind: rbac.GroupKind,
Name: "os:kubernetes_admin",
},
},
})
}
func SeedKubernikusMember(client clientset.Interface) error {
return bootstrap.CreateOrUpdateRoleBinding(client, &rbac.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:member",
Namespace: "default",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: "edit",
},
Subjects: []rbac.Subject{
{
Kind: rbac.GroupKind,
Name: "os:kubernetes_member",
},
},
})
}
func SeedAllowBootstrapTokensToPostCSRs(client clientset.Interface) error {
return bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:kubelet-bootstrap",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: "system:node-bootstrapper",
},
Subjects: []rbac.Subject{
{
Kind: rbac.GroupKind,
Name: "system:bootstrappers",
},
},
})
}
func SeedAllowApiserverToAccessKubeletAPI(client clientset.Interface) error {
return bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:apiserver-kubeletapi",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: "system:kubelet-api-admin",
},
Subjects: []rbac.Subject{
{
Kind: rbac.UserKind,
Name: "apiserver",
},
},
})
}
func SeedAutoApproveNodeBootstrapTokens(client clientset.Interface) error {
err := bootstrap.CreateOrUpdateClusterRole(client, &rbac.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:approve-node-client-csr",
},
Rules: []rbac.PolicyRule{
{
Verbs: []string{"create"},
APIGroups: []string{"certificates.k8s.io"},
Resources: []string{"certificatesigningrequests/nodeclient"},
},
},
})
if err != nil {
return err
}
return bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:node-client-csr-autoapprove",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: "kubernikus:approve-node-client-csr",
},
Subjects: []rbac.Subject{
{
Kind: "Group",
Name: "system:bootstrappers",
},
},
})
}
func SeedAutoRenewalNodeCertificates(client clientset.Interface) error {
err := bootstrap.CreateOrUpdateClusterRole(client, &rbac.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: "system:certificates.k8s.io:certificatesigningrequests:selfnodeclient",
},
Rules: []rbac.PolicyRule{
{
Verbs: []string{"create"},
APIGroups: []string{"certificates.k8s.io"},
Resources: []string{"certificatesigningrequests/selfnodeclient"},
},
},
})
if err != nil {
return err
}
return bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:auto-approve-renewals-for-nodes",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: "system:certificates.k8s.io:certificatesigningrequests:selfnodeclient",
},
Subjects: []rbac.Subject{
{
APIGroup: rbac.GroupName,
Kind: "Group",
Name: "system:nodes",
},
},
})
}
func SeedOpenStackClusterRoleBindings(client clientset.Interface) error {
err := bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:openstack-kubernetes-admin",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: "cluster-admin",
},
Subjects: []rbac.Subject{
{
Kind: "Group",
Name: "openstack_role:kubernetes_admin",
},
{
Kind: "User",
// It is the marshall & b64enc of the protobuf message IDTokenSubject: https://github.com/dexidp/dex/blob/master/server/oauth2.go#L300
// User ID: 00000000-0000-0000-0000-000000000001 ConnID: local
Name: "CiQwMDAwMDAwMC0wMDAwLTAwMDAtMDAwMC0wMDAwMDAwMDAwMDESBWxvY2Fs",
// For claims, we are using "sub" instead of "email" since some technical users missing emails
// If we switch to email, we can directly use email as Name field above
},
},
})
if err != nil {
return err
}
err = bootstrap.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "kubernikus:openstack-kubernetes-member",
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: "view",
},
Subjects: []rbac.Subject{
{
Kind: "Group",
Name: "openstack_role:kubernetes_member",
},
},
})
if err != nil {
return err
}
return nil
}
|
// Copyright 2018 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build privileged_tests
package loader
import (
"context"
"fmt"
"os"
"path/filepath"
"testing"
"time"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
. "gopkg.in/check.v1"
)
// Hook up gocheck into the "go test" runner.
type LoaderTestSuite struct{}
var (
_ = Suite(&LoaderTestSuite{})
contextTimeout = 5 * time.Minute
)
func Test(t *testing.T) {
TestingT(t)
}
type testEP struct {
}
func (ep *testEP) InterfaceName() string {
return "cilium_test"
}
func (ep *testEP) Logger(subsystem string) *logrus.Entry {
return log
}
func (ep *testEP) StateDir() string {
return "test_loader"
}
func prepareEnv(ep *testEP) (*directoryInfo, func() error, error) {
link := netlink.Dummy{
LinkAttrs: netlink.LinkAttrs{
Name: ep.InterfaceName(),
},
}
if err := netlink.LinkAdd(&link); err != nil {
if !os.IsExist(err) {
return nil, nil, fmt.Errorf("Failed to add link: %s", err)
}
}
cleanupFn := func() error {
if err := netlink.LinkDel(&link); err != nil {
return fmt.Errorf("Failed to delete link: %s", err)
}
return nil
}
wd, err := os.Getwd()
if err != nil {
cleanupFn()
return nil, nil, fmt.Errorf("Failed to get working directory: %s", err)
}
bpfdir := filepath.Join(wd, "..", "..", "..", "bpf")
dirs := directoryInfo{
Library: bpfdir,
Runtime: bpfdir,
Output: bpfdir,
}
return &dirs, cleanupFn, nil
}
// BenchmarkCompileAndLoad benchmarks the entire compilation + loading process.
func BenchmarkCompileAndLoad(b *testing.B) {
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
defer cancel()
ep := &testEP{}
dirs, cleanup, err := prepareEnv(ep)
if err != nil {
b.Fatal(err)
}
defer cleanup()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := compileAndLoad(ctx, ep, dirs); err != nil {
b.Fatal(err)
}
}
}
// BenchmarkReplaceDatapath compiles the datapath program, then benchmarks only
// the loading of the program into the kernel.
func BenchmarkReplaceDatapath(b *testing.B) {
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
defer cancel()
ep := &testEP{}
dirs, cleanup, err := prepareEnv(ep)
if err != nil {
b.Fatal(err)
}
defer cleanup()
if err := compileDatapath(ctx, ep, dirs, false); err != nil {
b.Fatal(err)
}
objPath := fmt.Sprintf("%s/%s", dirs.Output, endpointObj)
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := replaceDatapath(ctx, ep.InterfaceName(), objPath, symbolFromEndpoint); err != nil {
b.Fatal(err)
}
}
}
loader: Move device preparation to TestMain
Create a new TestMain function for setup of the Cilium device once so
that it isn't recreated for each unit test run.
Signed-off-by: Joe Stringer <16a9a54ddf4259952e3c118c763138e83693d7fd@cilium.io>
// Copyright 2018 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build privileged_tests
package loader
import (
"context"
"fmt"
"os"
"path/filepath"
"testing"
"time"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
. "gopkg.in/check.v1"
)
// Hook up gocheck into the "go test" runner.
type LoaderTestSuite struct{}
var (
_ = Suite(&LoaderTestSuite{})
contextTimeout = 5 * time.Minute
dirInfo *directoryInfo
ep = &testEP{}
)
func Test(t *testing.T) {
TestingT(t)
}
// runTests configures devices for running the whole testsuite, and runs the
// tests. It is kept separate from TestMain() so that this function can defer
// cleanups and pass the exit code of the test run to the caller which can run
// os.Exit() with the result.
func runTests(m *testing.M) (int, error) {
var err error
if dirInfo, err = getDirs(); err != nil {
return 1, err
}
cleanup, err := prepareEnv(ep)
if err != nil {
return 1, fmt.Errorf("Failed to prepare environment: %s", err)
}
defer func() {
if err := cleanup(); err != nil {
log.Errorf(err.Error())
}
}()
return m.Run(), nil
}
func TestMain(m *testing.M) {
exitCode, err := runTests(m)
if err != nil {
log.Fatal(err)
}
os.Exit(exitCode)
}
type testEP struct {
}
func (ep *testEP) InterfaceName() string {
return "cilium_test"
}
func (ep *testEP) Logger(subsystem string) *logrus.Entry {
return log
}
func (ep *testEP) StateDir() string {
return "test_loader"
}
func prepareEnv(ep *testEP) (func() error, error) {
link := netlink.Dummy{
LinkAttrs: netlink.LinkAttrs{
Name: ep.InterfaceName(),
},
}
if err := netlink.LinkAdd(&link); err != nil {
if !os.IsExist(err) {
return nil, fmt.Errorf("Failed to add link: %s", err)
}
}
cleanupFn := func() error {
if err := netlink.LinkDel(&link); err != nil {
return fmt.Errorf("Failed to delete link: %s", err)
}
return nil
}
return cleanupFn, nil
}
func getDirs() (*directoryInfo, error) {
wd, err := os.Getwd()
if err != nil {
return nil, fmt.Errorf("Failed to get working directory: %s", err)
}
bpfdir := filepath.Join(wd, "..", "..", "..", "bpf")
dirs := directoryInfo{
Library: bpfdir,
Runtime: bpfdir,
Output: bpfdir,
}
return &dirs, nil
}
// BenchmarkCompileAndLoad benchmarks the entire compilation + loading process.
func BenchmarkCompileAndLoad(b *testing.B) {
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
defer cancel()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := compileAndLoad(ctx, ep, dirInfo); err != nil {
b.Fatal(err)
}
}
}
// BenchmarkReplaceDatapath compiles the datapath program, then benchmarks only
// the loading of the program into the kernel.
func BenchmarkReplaceDatapath(b *testing.B) {
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
defer cancel()
if err := compileDatapath(ctx, ep, dirInfo, false); err != nil {
b.Fatal(err)
}
objPath := fmt.Sprintf("%s/%s", dirInfo.Output, endpointObj)
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := replaceDatapath(ctx, ep.InterfaceName(), objPath, symbolFromEndpoint); err != nil {
b.Fatal(err)
}
}
}
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package versioned
import (
"bytes"
"context"
"crypto/x509"
"fmt"
"io"
"net"
"net/url"
"reflect"
"sort"
"strconv"
"strings"
"text/tabwriter"
"time"
"unicode"
"github.com/fatih/camelcase"
appsv1 "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/duration"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/reference"
"k8s.io/klog"
"k8s.io/kubectl/pkg/describe"
"k8s.io/kubectl/pkg/scheme"
"k8s.io/kubectl/pkg/util/certificate"
deploymentutil "k8s.io/kubectl/pkg/util/deployment"
"k8s.io/kubectl/pkg/util/event"
"k8s.io/kubectl/pkg/util/fieldpath"
"k8s.io/kubectl/pkg/util/qos"
"k8s.io/kubectl/pkg/util/rbac"
resourcehelper "k8s.io/kubectl/pkg/util/resource"
"k8s.io/kubectl/pkg/util/slice"
storageutil "k8s.io/kubectl/pkg/util/storage"
)
// Each level has 2 spaces for PrefixWriter
const (
LEVEL_0 = iota
LEVEL_1
LEVEL_2
LEVEL_3
LEVEL_4
)
// DescriberFn gives a way to easily override the function for unit testing if needed
var DescriberFn describe.DescriberFunc = Describer
// Describer returns a Describer for displaying the specified RESTMapping type or an error.
func Describer(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (describe.Describer, error) {
clientConfig, err := restClientGetter.ToRESTConfig()
if err != nil {
return nil, err
}
// try to get a describer
if describer, ok := DescriberFor(mapping.GroupVersionKind.GroupKind(), clientConfig); ok {
return describer, nil
}
// if this is a kind we don't have a describer for yet, go generic if possible
if genericDescriber, ok := GenericDescriberFor(mapping, clientConfig); ok {
return genericDescriber, nil
}
// otherwise return an unregistered error
return nil, fmt.Errorf("no description has been implemented for %s", mapping.GroupVersionKind.String())
}
// PrefixWriter can write text at various indentation levels.
type PrefixWriter interface {
// Write writes text with the specified indentation level.
Write(level int, format string, a ...interface{})
// WriteLine writes an entire line with no indentation level.
WriteLine(a ...interface{})
// Flush forces indentation to be reset.
Flush()
}
// prefixWriter implements PrefixWriter
type prefixWriter struct {
out io.Writer
}
var _ PrefixWriter = &prefixWriter{}
// NewPrefixWriter creates a new PrefixWriter.
func NewPrefixWriter(out io.Writer) PrefixWriter {
return &prefixWriter{out: out}
}
func (pw *prefixWriter) Write(level int, format string, a ...interface{}) {
levelSpace := " "
prefix := ""
for i := 0; i < level; i++ {
prefix += levelSpace
}
fmt.Fprintf(pw.out, prefix+format, a...)
}
func (pw *prefixWriter) WriteLine(a ...interface{}) {
fmt.Fprintln(pw.out, a...)
}
func (pw *prefixWriter) Flush() {
if f, ok := pw.out.(flusher); ok {
f.Flush()
}
}
func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]describe.Describer, error) {
c, err := clientset.NewForConfig(clientConfig)
if err != nil {
return nil, err
}
m := map[schema.GroupKind]describe.Describer{
{Group: corev1.GroupName, Kind: "Pod"}: &PodDescriber{c},
{Group: corev1.GroupName, Kind: "ReplicationController"}: &ReplicationControllerDescriber{c},
{Group: corev1.GroupName, Kind: "Secret"}: &SecretDescriber{c},
{Group: corev1.GroupName, Kind: "Service"}: &ServiceDescriber{c},
{Group: corev1.GroupName, Kind: "ServiceAccount"}: &ServiceAccountDescriber{c},
{Group: corev1.GroupName, Kind: "Node"}: &NodeDescriber{c},
{Group: corev1.GroupName, Kind: "LimitRange"}: &LimitRangeDescriber{c},
{Group: corev1.GroupName, Kind: "ResourceQuota"}: &ResourceQuotaDescriber{c},
{Group: corev1.GroupName, Kind: "PersistentVolume"}: &PersistentVolumeDescriber{c},
{Group: corev1.GroupName, Kind: "PersistentVolumeClaim"}: &PersistentVolumeClaimDescriber{c},
{Group: corev1.GroupName, Kind: "Namespace"}: &NamespaceDescriber{c},
{Group: corev1.GroupName, Kind: "Endpoints"}: &EndpointsDescriber{c},
{Group: corev1.GroupName, Kind: "ConfigMap"}: &ConfigMapDescriber{c},
{Group: corev1.GroupName, Kind: "PriorityClass"}: &PriorityClassDescriber{c},
{Group: discoveryv1beta1.GroupName, Kind: "EndpointSlice"}: &EndpointSliceDescriber{c},
{Group: extensionsv1beta1.GroupName, Kind: "ReplicaSet"}: &ReplicaSetDescriber{c},
{Group: extensionsv1beta1.GroupName, Kind: "NetworkPolicy"}: &NetworkPolicyDescriber{c},
{Group: extensionsv1beta1.GroupName, Kind: "PodSecurityPolicy"}: &PodSecurityPolicyDescriber{c},
{Group: autoscalingv2beta2.GroupName, Kind: "HorizontalPodAutoscaler"}: &HorizontalPodAutoscalerDescriber{c},
{Group: extensionsv1beta1.GroupName, Kind: "DaemonSet"}: &DaemonSetDescriber{c},
{Group: extensionsv1beta1.GroupName, Kind: "Deployment"}: &DeploymentDescriber{c},
{Group: extensionsv1beta1.GroupName, Kind: "Ingress"}: &IngressDescriber{c},
{Group: networkingv1beta1.GroupName, Kind: "Ingress"}: &IngressDescriber{c},
{Group: batchv1.GroupName, Kind: "Job"}: &JobDescriber{c},
{Group: batchv1.GroupName, Kind: "CronJob"}: &CronJobDescriber{c},
{Group: appsv1.GroupName, Kind: "StatefulSet"}: &StatefulSetDescriber{c},
{Group: appsv1.GroupName, Kind: "Deployment"}: &DeploymentDescriber{c},
{Group: appsv1.GroupName, Kind: "DaemonSet"}: &DaemonSetDescriber{c},
{Group: appsv1.GroupName, Kind: "ReplicaSet"}: &ReplicaSetDescriber{c},
{Group: certificatesv1beta1.GroupName, Kind: "CertificateSigningRequest"}: &CertificateSigningRequestDescriber{c},
{Group: storagev1.GroupName, Kind: "StorageClass"}: &StorageClassDescriber{c},
{Group: storagev1.GroupName, Kind: "CSINode"}: &CSINodeDescriber{c},
{Group: policyv1beta1.GroupName, Kind: "PodDisruptionBudget"}: &PodDisruptionBudgetDescriber{c},
{Group: rbacv1.GroupName, Kind: "Role"}: &RoleDescriber{c},
{Group: rbacv1.GroupName, Kind: "ClusterRole"}: &ClusterRoleDescriber{c},
{Group: rbacv1.GroupName, Kind: "RoleBinding"}: &RoleBindingDescriber{c},
{Group: rbacv1.GroupName, Kind: "ClusterRoleBinding"}: &ClusterRoleBindingDescriber{c},
{Group: networkingv1.GroupName, Kind: "NetworkPolicy"}: &NetworkPolicyDescriber{c},
{Group: schedulingv1.GroupName, Kind: "PriorityClass"}: &PriorityClassDescriber{c},
}
return m, nil
}
// DescriberFor returns the default describe functions for each of the standard
// Kubernetes types.
func DescriberFor(kind schema.GroupKind, clientConfig *rest.Config) (describe.Describer, bool) {
describers, err := describerMap(clientConfig)
if err != nil {
klog.V(1).Info(err)
return nil, false
}
f, ok := describers[kind]
return f, ok
}
// GenericDescriberFor returns a generic describer for the specified mapping
// that uses only information available from runtime.Unstructured
func GenericDescriberFor(mapping *meta.RESTMapping, clientConfig *rest.Config) (describe.Describer, bool) {
// used to fetch the resource
dynamicClient, err := dynamic.NewForConfig(clientConfig)
if err != nil {
return nil, false
}
// used to get events for the resource
clientSet, err := clientset.NewForConfig(clientConfig)
if err != nil {
return nil, false
}
eventsClient := clientSet.CoreV1()
return &genericDescriber{mapping, dynamicClient, eventsClient}, true
}
type genericDescriber struct {
mapping *meta.RESTMapping
dynamic dynamic.Interface
events corev1client.EventsGetter
}
func (g *genericDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (output string, err error) {
obj, err := g.dynamic.Resource(g.mapping.Resource).Namespace(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = g.events.Events(namespace).Search(scheme.Scheme, obj)
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", obj.GetName())
w.Write(LEVEL_0, "Namespace:\t%s\n", obj.GetNamespace())
printLabelsMultiline(w, "Labels", obj.GetLabels())
printAnnotationsMultiline(w, "Annotations", obj.GetAnnotations())
printUnstructuredContent(w, LEVEL_0, obj.UnstructuredContent(), "", ".metadata.name", ".metadata.namespace", ".metadata.labels", ".metadata.annotations")
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func printUnstructuredContent(w PrefixWriter, level int, content map[string]interface{}, skipPrefix string, skip ...string) {
fields := []string{}
for field := range content {
fields = append(fields, field)
}
sort.Strings(fields)
for _, field := range fields {
value := content[field]
switch typedValue := value.(type) {
case map[string]interface{}:
skipExpr := fmt.Sprintf("%s.%s", skipPrefix, field)
if slice.ContainsString(skip, skipExpr, nil) {
continue
}
w.Write(level, "%s:\n", smartLabelFor(field))
printUnstructuredContent(w, level+1, typedValue, skipExpr, skip...)
case []interface{}:
skipExpr := fmt.Sprintf("%s.%s", skipPrefix, field)
if slice.ContainsString(skip, skipExpr, nil) {
continue
}
w.Write(level, "%s:\n", smartLabelFor(field))
for _, child := range typedValue {
switch typedChild := child.(type) {
case map[string]interface{}:
printUnstructuredContent(w, level+1, typedChild, skipExpr, skip...)
default:
w.Write(level+1, "%v\n", typedChild)
}
}
default:
skipExpr := fmt.Sprintf("%s.%s", skipPrefix, field)
if slice.ContainsString(skip, skipExpr, nil) {
continue
}
w.Write(level, "%s:\t%v\n", smartLabelFor(field), typedValue)
}
}
}
func smartLabelFor(field string) string {
// skip creating smart label if field name contains
// special characters other than '-'
if strings.IndexFunc(field, func(r rune) bool {
return !unicode.IsLetter(r) && r != '-'
}) != -1 {
return field
}
commonAcronyms := []string{"API", "URL", "UID", "OSB", "GUID"}
parts := camelcase.Split(field)
result := make([]string, 0, len(parts))
for _, part := range parts {
if part == "_" {
continue
}
if slice.ContainsString(commonAcronyms, strings.ToUpper(part), nil) {
part = strings.ToUpper(part)
} else {
part = strings.Title(part)
}
result = append(result, part)
}
return strings.Join(result, " ")
}
// DefaultObjectDescriber can describe the default Kubernetes objects.
var DefaultObjectDescriber describe.ObjectDescriber
func init() {
d := &Describers{}
err := d.Add(
describeLimitRange,
describeQuota,
describePod,
describeService,
describeReplicationController,
describeDaemonSet,
describeNode,
describeNamespace,
)
if err != nil {
klog.Fatalf("Cannot register describers: %v", err)
}
DefaultObjectDescriber = d
}
// NamespaceDescriber generates information about a namespace
type NamespaceDescriber struct {
clientset.Interface
}
func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
ns, err := d.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
resourceQuotaList, err := d.CoreV1().ResourceQuotas(name).List(context.TODO(), metav1.ListOptions{})
if err != nil {
if errors.IsNotFound(err) {
// Server does not support resource quotas.
// Not an error, will not show resource quotas information.
resourceQuotaList = nil
} else {
return "", err
}
}
limitRangeList, err := d.CoreV1().LimitRanges(name).List(context.TODO(), metav1.ListOptions{})
if err != nil {
if errors.IsNotFound(err) {
// Server does not support limit ranges.
// Not an error, will not show limit ranges information.
limitRangeList = nil
} else {
return "", err
}
}
return describeNamespace(ns, resourceQuotaList, limitRangeList)
}
func describeNamespace(namespace *corev1.Namespace, resourceQuotaList *corev1.ResourceQuotaList, limitRangeList *corev1.LimitRangeList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", namespace.Name)
printLabelsMultiline(w, "Labels", namespace.Labels)
printAnnotationsMultiline(w, "Annotations", namespace.Annotations)
w.Write(LEVEL_0, "Status:\t%s\n", string(namespace.Status.Phase))
if resourceQuotaList != nil {
w.Write(LEVEL_0, "\n")
DescribeResourceQuotas(resourceQuotaList, w)
}
if limitRangeList != nil {
w.Write(LEVEL_0, "\n")
DescribeLimitRanges(limitRangeList, w)
}
return nil
})
}
func describeLimitRangeSpec(spec corev1.LimitRangeSpec, prefix string, w PrefixWriter) {
for i := range spec.Limits {
item := spec.Limits[i]
maxResources := item.Max
minResources := item.Min
defaultLimitResources := item.Default
defaultRequestResources := item.DefaultRequest
ratio := item.MaxLimitRequestRatio
set := map[corev1.ResourceName]bool{}
for k := range maxResources {
set[k] = true
}
for k := range minResources {
set[k] = true
}
for k := range defaultLimitResources {
set[k] = true
}
for k := range defaultRequestResources {
set[k] = true
}
for k := range ratio {
set[k] = true
}
for k := range set {
// if no value is set, we output -
maxValue := "-"
minValue := "-"
defaultLimitValue := "-"
defaultRequestValue := "-"
ratioValue := "-"
maxQuantity, maxQuantityFound := maxResources[k]
if maxQuantityFound {
maxValue = maxQuantity.String()
}
minQuantity, minQuantityFound := minResources[k]
if minQuantityFound {
minValue = minQuantity.String()
}
defaultLimitQuantity, defaultLimitQuantityFound := defaultLimitResources[k]
if defaultLimitQuantityFound {
defaultLimitValue = defaultLimitQuantity.String()
}
defaultRequestQuantity, defaultRequestQuantityFound := defaultRequestResources[k]
if defaultRequestQuantityFound {
defaultRequestValue = defaultRequestQuantity.String()
}
ratioQuantity, ratioQuantityFound := ratio[k]
if ratioQuantityFound {
ratioValue = ratioQuantity.String()
}
msg := "%s%s\t%v\t%v\t%v\t%v\t%v\t%v\n"
w.Write(LEVEL_0, msg, prefix, item.Type, k, minValue, maxValue, defaultRequestValue, defaultLimitValue, ratioValue)
}
}
}
// DescribeLimitRanges merges a set of limit range items into a single tabular description
func DescribeLimitRanges(limitRanges *corev1.LimitRangeList, w PrefixWriter) {
if len(limitRanges.Items) == 0 {
w.Write(LEVEL_0, "No LimitRange resource.\n")
return
}
w.Write(LEVEL_0, "Resource Limits\n Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n")
w.Write(LEVEL_0, " ----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n")
for _, limitRange := range limitRanges.Items {
describeLimitRangeSpec(limitRange.Spec, " ", w)
}
}
// DescribeResourceQuotas merges a set of quota items into a single tabular description of all quotas
func DescribeResourceQuotas(quotas *corev1.ResourceQuotaList, w PrefixWriter) {
if len(quotas.Items) == 0 {
w.Write(LEVEL_0, "No resource quota.\n")
return
}
sort.Sort(SortableResourceQuotas(quotas.Items))
w.Write(LEVEL_0, "Resource Quotas")
for _, q := range quotas.Items {
w.Write(LEVEL_0, "\n Name:\t%s\n", q.Name)
if len(q.Spec.Scopes) > 0 {
scopes := make([]string, 0, len(q.Spec.Scopes))
for _, scope := range q.Spec.Scopes {
scopes = append(scopes, string(scope))
}
sort.Strings(scopes)
w.Write(LEVEL_0, " Scopes:\t%s\n", strings.Join(scopes, ", "))
for _, scope := range scopes {
helpText := helpTextForResourceQuotaScope(corev1.ResourceQuotaScope(scope))
if len(helpText) > 0 {
w.Write(LEVEL_0, " * %s\n", helpText)
}
}
}
w.Write(LEVEL_0, " Resource\tUsed\tHard\n")
w.Write(LEVEL_0, " --------\t---\t---\n")
resources := make([]corev1.ResourceName, 0, len(q.Status.Hard))
for resource := range q.Status.Hard {
resources = append(resources, resource)
}
sort.Sort(SortableResourceNames(resources))
for _, resource := range resources {
hardQuantity := q.Status.Hard[resource]
usedQuantity := q.Status.Used[resource]
w.Write(LEVEL_0, " %s\t%s\t%s\n", string(resource), usedQuantity.String(), hardQuantity.String())
}
}
}
// LimitRangeDescriber generates information about a limit range
type LimitRangeDescriber struct {
clientset.Interface
}
func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
lr := d.CoreV1().LimitRanges(namespace)
limitRange, err := lr.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return describeLimitRange(limitRange)
}
func describeLimitRange(limitRange *corev1.LimitRange) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", limitRange.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", limitRange.Namespace)
w.Write(LEVEL_0, "Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n")
w.Write(LEVEL_0, "----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n")
describeLimitRangeSpec(limitRange.Spec, "", w)
return nil
})
}
// ResourceQuotaDescriber generates information about a resource quota
type ResourceQuotaDescriber struct {
clientset.Interface
}
func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
rq := d.CoreV1().ResourceQuotas(namespace)
resourceQuota, err := rq.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return describeQuota(resourceQuota)
}
func helpTextForResourceQuotaScope(scope corev1.ResourceQuotaScope) string {
switch scope {
case corev1.ResourceQuotaScopeTerminating:
return "Matches all pods that have an active deadline. These pods have a limited lifespan on a node before being actively terminated by the system."
case corev1.ResourceQuotaScopeNotTerminating:
return "Matches all pods that do not have an active deadline. These pods usually include long running pods whose container command is not expected to terminate."
case corev1.ResourceQuotaScopeBestEffort:
return "Matches all pods that do not have resource requirements set. These pods have a best effort quality of service."
case corev1.ResourceQuotaScopeNotBestEffort:
return "Matches all pods that have at least one resource requirement set. These pods have a burstable or guaranteed quality of service."
default:
return ""
}
}
func describeQuota(resourceQuota *corev1.ResourceQuota) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", resourceQuota.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", resourceQuota.Namespace)
if len(resourceQuota.Spec.Scopes) > 0 {
scopes := make([]string, 0, len(resourceQuota.Spec.Scopes))
for _, scope := range resourceQuota.Spec.Scopes {
scopes = append(scopes, string(scope))
}
sort.Strings(scopes)
w.Write(LEVEL_0, "Scopes:\t%s\n", strings.Join(scopes, ", "))
for _, scope := range scopes {
helpText := helpTextForResourceQuotaScope(corev1.ResourceQuotaScope(scope))
if len(helpText) > 0 {
w.Write(LEVEL_0, " * %s\n", helpText)
}
}
}
w.Write(LEVEL_0, "Resource\tUsed\tHard\n")
w.Write(LEVEL_0, "--------\t----\t----\n")
resources := make([]corev1.ResourceName, 0, len(resourceQuota.Status.Hard))
for resource := range resourceQuota.Status.Hard {
resources = append(resources, resource)
}
sort.Sort(SortableResourceNames(resources))
msg := "%v\t%v\t%v\n"
for i := range resources {
resource := resources[i]
hardQuantity := resourceQuota.Status.Hard[resource]
usedQuantity := resourceQuota.Status.Used[resource]
w.Write(LEVEL_0, msg, resource, usedQuantity.String(), hardQuantity.String())
}
return nil
})
}
// PodDescriber generates information about a pod and the replication controllers that
// create it.
type PodDescriber struct {
clientset.Interface
}
func (d *PodDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
pod, err := d.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
if describerSettings.ShowEvents {
eventsInterface := d.CoreV1().Events(namespace)
selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil)
options := metav1.ListOptions{FieldSelector: selector.String()}
events, err2 := eventsInterface.List(context.TODO(), options)
if describerSettings.ShowEvents && err2 == nil && len(events.Items) > 0 {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Pod '%v': error '%v', but found events.\n", name, err)
DescribeEvents(events, w)
return nil
})
}
}
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
if ref, err := reference.GetReference(scheme.Scheme, pod); err != nil {
klog.Errorf("Unable to construct reference to '%#v': %v", pod, err)
} else {
ref.Kind = ""
if _, isMirrorPod := pod.Annotations[corev1.MirrorPodAnnotationKey]; isMirrorPod {
ref.UID = types.UID(pod.Annotations[corev1.MirrorPodAnnotationKey])
}
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, ref)
}
}
return describePod(pod, events)
}
func describePod(pod *corev1.Pod, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", pod.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", pod.Namespace)
if pod.Spec.Priority != nil {
w.Write(LEVEL_0, "Priority:\t%d\n", *pod.Spec.Priority)
}
if len(pod.Spec.PriorityClassName) > 0 {
w.Write(LEVEL_0, "Priority Class Name:\t%s\n", stringOrNone(pod.Spec.PriorityClassName))
}
if pod.Spec.NodeName == "" {
w.Write(LEVEL_0, "Node:\t<none>\n")
} else {
w.Write(LEVEL_0, "Node:\t%s\n", pod.Spec.NodeName+"/"+pod.Status.HostIP)
}
if pod.Status.StartTime != nil {
w.Write(LEVEL_0, "Start Time:\t%s\n", pod.Status.StartTime.Time.Format(time.RFC1123Z))
}
printLabelsMultiline(w, "Labels", pod.Labels)
printAnnotationsMultiline(w, "Annotations", pod.Annotations)
if pod.DeletionTimestamp != nil {
w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampSince(*pod.DeletionTimestamp))
w.Write(LEVEL_0, "Termination Grace Period:\t%ds\n", *pod.DeletionGracePeriodSeconds)
} else {
w.Write(LEVEL_0, "Status:\t%s\n", string(pod.Status.Phase))
}
if len(pod.Status.Reason) > 0 {
w.Write(LEVEL_0, "Reason:\t%s\n", pod.Status.Reason)
}
if len(pod.Status.Message) > 0 {
w.Write(LEVEL_0, "Message:\t%s\n", pod.Status.Message)
}
// remove when .IP field is depreciated
w.Write(LEVEL_0, "IP:\t%s\n", pod.Status.PodIP)
describePodIPs(pod, w, "")
if controlledBy := printController(pod); len(controlledBy) > 0 {
w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy)
}
if len(pod.Status.NominatedNodeName) > 0 {
w.Write(LEVEL_0, "NominatedNodeName:\t%s\n", pod.Status.NominatedNodeName)
}
if len(pod.Spec.InitContainers) > 0 {
describeContainers("Init Containers", pod.Spec.InitContainers, pod.Status.InitContainerStatuses, EnvValueRetriever(pod), w, "")
}
describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), w, "")
if len(pod.Spec.EphemeralContainers) > 0 {
var ec []corev1.Container
for i := range pod.Spec.EphemeralContainers {
ec = append(ec, corev1.Container(pod.Spec.EphemeralContainers[i].EphemeralContainerCommon))
}
describeContainers("Ephemeral Containers", ec, pod.Status.EphemeralContainerStatuses, EnvValueRetriever(pod), w, "")
}
if len(pod.Spec.ReadinessGates) > 0 {
w.Write(LEVEL_0, "Readiness Gates:\n Type\tStatus\n")
for _, g := range pod.Spec.ReadinessGates {
status := "<none>"
for _, c := range pod.Status.Conditions {
if c.Type == g.ConditionType {
status = fmt.Sprintf("%v", c.Status)
break
}
}
w.Write(LEVEL_1, "%v \t%v \n",
g.ConditionType,
status)
}
}
if len(pod.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\n")
for _, c := range pod.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v \n",
c.Type,
c.Status)
}
}
describeVolumes(pod.Spec.Volumes, w, "")
if pod.Status.QOSClass != "" {
w.Write(LEVEL_0, "QoS Class:\t%s\n", pod.Status.QOSClass)
} else {
w.Write(LEVEL_0, "QoS Class:\t%s\n", qos.GetPodQOS(pod))
}
printLabelsMultiline(w, "Node-Selectors", pod.Spec.NodeSelector)
printPodTolerationsMultiline(w, "Tolerations", pod.Spec.Tolerations)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func printController(controllee metav1.Object) string {
if controllerRef := metav1.GetControllerOf(controllee); controllerRef != nil {
return fmt.Sprintf("%s/%s", controllerRef.Kind, controllerRef.Name)
}
return ""
}
func describePodIPs(pod *corev1.Pod, w PrefixWriter, space string) {
if len(pod.Status.PodIPs) == 0 {
w.Write(LEVEL_0, "%sIPs:\t<none>\n", space)
return
}
w.Write(LEVEL_0, "%sIPs:\n", space)
for _, ipInfo := range pod.Status.PodIPs {
w.Write(LEVEL_1, "IP:\t%s\n", ipInfo.IP)
}
}
func describeVolumes(volumes []corev1.Volume, w PrefixWriter, space string) {
if len(volumes) == 0 {
w.Write(LEVEL_0, "%sVolumes:\t<none>\n", space)
return
}
w.Write(LEVEL_0, "%sVolumes:\n", space)
for _, volume := range volumes {
nameIndent := ""
if len(space) > 0 {
nameIndent = " "
}
w.Write(LEVEL_1, "%s%v:\n", nameIndent, volume.Name)
switch {
case volume.VolumeSource.HostPath != nil:
printHostPathVolumeSource(volume.VolumeSource.HostPath, w)
case volume.VolumeSource.EmptyDir != nil:
printEmptyDirVolumeSource(volume.VolumeSource.EmptyDir, w)
case volume.VolumeSource.GCEPersistentDisk != nil:
printGCEPersistentDiskVolumeSource(volume.VolumeSource.GCEPersistentDisk, w)
case volume.VolumeSource.AWSElasticBlockStore != nil:
printAWSElasticBlockStoreVolumeSource(volume.VolumeSource.AWSElasticBlockStore, w)
case volume.VolumeSource.GitRepo != nil:
printGitRepoVolumeSource(volume.VolumeSource.GitRepo, w)
case volume.VolumeSource.Secret != nil:
printSecretVolumeSource(volume.VolumeSource.Secret, w)
case volume.VolumeSource.ConfigMap != nil:
printConfigMapVolumeSource(volume.VolumeSource.ConfigMap, w)
case volume.VolumeSource.NFS != nil:
printNFSVolumeSource(volume.VolumeSource.NFS, w)
case volume.VolumeSource.ISCSI != nil:
printISCSIVolumeSource(volume.VolumeSource.ISCSI, w)
case volume.VolumeSource.Glusterfs != nil:
printGlusterfsVolumeSource(volume.VolumeSource.Glusterfs, w)
case volume.VolumeSource.PersistentVolumeClaim != nil:
printPersistentVolumeClaimVolumeSource(volume.VolumeSource.PersistentVolumeClaim, w)
case volume.VolumeSource.RBD != nil:
printRBDVolumeSource(volume.VolumeSource.RBD, w)
case volume.VolumeSource.Quobyte != nil:
printQuobyteVolumeSource(volume.VolumeSource.Quobyte, w)
case volume.VolumeSource.DownwardAPI != nil:
printDownwardAPIVolumeSource(volume.VolumeSource.DownwardAPI, w)
case volume.VolumeSource.AzureDisk != nil:
printAzureDiskVolumeSource(volume.VolumeSource.AzureDisk, w)
case volume.VolumeSource.VsphereVolume != nil:
printVsphereVolumeSource(volume.VolumeSource.VsphereVolume, w)
case volume.VolumeSource.Cinder != nil:
printCinderVolumeSource(volume.VolumeSource.Cinder, w)
case volume.VolumeSource.PhotonPersistentDisk != nil:
printPhotonPersistentDiskVolumeSource(volume.VolumeSource.PhotonPersistentDisk, w)
case volume.VolumeSource.PortworxVolume != nil:
printPortworxVolumeSource(volume.VolumeSource.PortworxVolume, w)
case volume.VolumeSource.ScaleIO != nil:
printScaleIOVolumeSource(volume.VolumeSource.ScaleIO, w)
case volume.VolumeSource.CephFS != nil:
printCephFSVolumeSource(volume.VolumeSource.CephFS, w)
case volume.VolumeSource.StorageOS != nil:
printStorageOSVolumeSource(volume.VolumeSource.StorageOS, w)
case volume.VolumeSource.FC != nil:
printFCVolumeSource(volume.VolumeSource.FC, w)
case volume.VolumeSource.AzureFile != nil:
printAzureFileVolumeSource(volume.VolumeSource.AzureFile, w)
case volume.VolumeSource.FlexVolume != nil:
printFlexVolumeSource(volume.VolumeSource.FlexVolume, w)
case volume.VolumeSource.Flocker != nil:
printFlockerVolumeSource(volume.VolumeSource.Flocker, w)
case volume.VolumeSource.Projected != nil:
printProjectedVolumeSource(volume.VolumeSource.Projected, w)
case volume.VolumeSource.CSI != nil:
printCSIVolumeSource(volume.VolumeSource.CSI, w)
default:
w.Write(LEVEL_1, "<unknown>\n")
}
}
}
func printHostPathVolumeSource(hostPath *corev1.HostPathVolumeSource, w PrefixWriter) {
hostPathType := "<none>"
if hostPath.Type != nil {
hostPathType = string(*hostPath.Type)
}
w.Write(LEVEL_2, "Type:\tHostPath (bare host directory volume)\n"+
" Path:\t%v\n"+
" HostPathType:\t%v\n",
hostPath.Path, hostPathType)
}
func printEmptyDirVolumeSource(emptyDir *corev1.EmptyDirVolumeSource, w PrefixWriter) {
var sizeLimit string
if emptyDir.SizeLimit != nil && emptyDir.SizeLimit.Cmp(resource.Quantity{}) > 0 {
sizeLimit = fmt.Sprintf("%v", emptyDir.SizeLimit)
} else {
sizeLimit = "<unset>"
}
w.Write(LEVEL_2, "Type:\tEmptyDir (a temporary directory that shares a pod's lifetime)\n"+
" Medium:\t%v\n"+
" SizeLimit:\t%v\n",
emptyDir.Medium, sizeLimit)
}
func printGCEPersistentDiskVolumeSource(gce *corev1.GCEPersistentDiskVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tGCEPersistentDisk (a Persistent Disk resource in Google Compute Engine)\n"+
" PDName:\t%v\n"+
" FSType:\t%v\n"+
" Partition:\t%v\n"+
" ReadOnly:\t%v\n",
gce.PDName, gce.FSType, gce.Partition, gce.ReadOnly)
}
func printAWSElasticBlockStoreVolumeSource(aws *corev1.AWSElasticBlockStoreVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tAWSElasticBlockStore (a Persistent Disk resource in AWS)\n"+
" VolumeID:\t%v\n"+
" FSType:\t%v\n"+
" Partition:\t%v\n"+
" ReadOnly:\t%v\n",
aws.VolumeID, aws.FSType, aws.Partition, aws.ReadOnly)
}
func printGitRepoVolumeSource(git *corev1.GitRepoVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tGitRepo (a volume that is pulled from git when the pod is created)\n"+
" Repository:\t%v\n"+
" Revision:\t%v\n",
git.Repository, git.Revision)
}
func printSecretVolumeSource(secret *corev1.SecretVolumeSource, w PrefixWriter) {
optional := secret.Optional != nil && *secret.Optional
w.Write(LEVEL_2, "Type:\tSecret (a volume populated by a Secret)\n"+
" SecretName:\t%v\n"+
" Optional:\t%v\n",
secret.SecretName, optional)
}
func printConfigMapVolumeSource(configMap *corev1.ConfigMapVolumeSource, w PrefixWriter) {
optional := configMap.Optional != nil && *configMap.Optional
w.Write(LEVEL_2, "Type:\tConfigMap (a volume populated by a ConfigMap)\n"+
" Name:\t%v\n"+
" Optional:\t%v\n",
configMap.Name, optional)
}
func printProjectedVolumeSource(projected *corev1.ProjectedVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tProjected (a volume that contains injected data from multiple sources)\n")
for _, source := range projected.Sources {
if source.Secret != nil {
w.Write(LEVEL_2, "SecretName:\t%v\n"+
" SecretOptionalName:\t%v\n",
source.Secret.Name, source.Secret.Optional)
} else if source.DownwardAPI != nil {
w.Write(LEVEL_2, "DownwardAPI:\ttrue\n")
} else if source.ConfigMap != nil {
w.Write(LEVEL_2, "ConfigMapName:\t%v\n"+
" ConfigMapOptional:\t%v\n",
source.ConfigMap.Name, source.ConfigMap.Optional)
} else if source.ServiceAccountToken != nil {
w.Write(LEVEL_2, "TokenExpirationSeconds:\t%d\n",
*source.ServiceAccountToken.ExpirationSeconds)
}
}
}
func printNFSVolumeSource(nfs *corev1.NFSVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tNFS (an NFS mount that lasts the lifetime of a pod)\n"+
" Server:\t%v\n"+
" Path:\t%v\n"+
" ReadOnly:\t%v\n",
nfs.Server, nfs.Path, nfs.ReadOnly)
}
func printQuobyteVolumeSource(quobyte *corev1.QuobyteVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tQuobyte (a Quobyte mount on the host that shares a pod's lifetime)\n"+
" Registry:\t%v\n"+
" Volume:\t%v\n"+
" ReadOnly:\t%v\n",
quobyte.Registry, quobyte.Volume, quobyte.ReadOnly)
}
func printPortworxVolumeSource(pwxVolume *corev1.PortworxVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tPortworxVolume (a Portworx Volume resource)\n"+
" VolumeID:\t%v\n",
pwxVolume.VolumeID)
}
func printISCSIVolumeSource(iscsi *corev1.ISCSIVolumeSource, w PrefixWriter) {
initiator := "<none>"
if iscsi.InitiatorName != nil {
initiator = *iscsi.InitiatorName
}
w.Write(LEVEL_2, "Type:\tISCSI (an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod)\n"+
" TargetPortal:\t%v\n"+
" IQN:\t%v\n"+
" Lun:\t%v\n"+
" ISCSIInterface\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n"+
" Portals:\t%v\n"+
" DiscoveryCHAPAuth:\t%v\n"+
" SessionCHAPAuth:\t%v\n"+
" SecretRef:\t%v\n"+
" InitiatorName:\t%v\n",
iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiator)
}
func printISCSIPersistentVolumeSource(iscsi *corev1.ISCSIPersistentVolumeSource, w PrefixWriter) {
initiatorName := "<none>"
if iscsi.InitiatorName != nil {
initiatorName = *iscsi.InitiatorName
}
w.Write(LEVEL_2, "Type:\tISCSI (an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod)\n"+
" TargetPortal:\t%v\n"+
" IQN:\t%v\n"+
" Lun:\t%v\n"+
" ISCSIInterface\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n"+
" Portals:\t%v\n"+
" DiscoveryCHAPAuth:\t%v\n"+
" SessionCHAPAuth:\t%v\n"+
" SecretRef:\t%v\n"+
" InitiatorName:\t%v\n",
iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiatorName)
}
func printGlusterfsVolumeSource(glusterfs *corev1.GlusterfsVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+
" EndpointsName:\t%v\n"+
" Path:\t%v\n"+
" ReadOnly:\t%v\n",
glusterfs.EndpointsName, glusterfs.Path, glusterfs.ReadOnly)
}
func printGlusterfsPersistentVolumeSource(glusterfs *corev1.GlusterfsPersistentVolumeSource, w PrefixWriter) {
endpointsNamespace := "<unset>"
if glusterfs.EndpointsNamespace != nil {
endpointsNamespace = *glusterfs.EndpointsNamespace
}
w.Write(LEVEL_2, "Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+
" EndpointsName:\t%v\n"+
" EndpointsNamespace:\t%v\n"+
" Path:\t%v\n"+
" ReadOnly:\t%v\n",
glusterfs.EndpointsName, endpointsNamespace, glusterfs.Path, glusterfs.ReadOnly)
}
func printPersistentVolumeClaimVolumeSource(claim *corev1.PersistentVolumeClaimVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tPersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)\n"+
" ClaimName:\t%v\n"+
" ReadOnly:\t%v\n",
claim.ClaimName, claim.ReadOnly)
}
func printRBDVolumeSource(rbd *corev1.RBDVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+
" CephMonitors:\t%v\n"+
" RBDImage:\t%v\n"+
" FSType:\t%v\n"+
" RBDPool:\t%v\n"+
" RadosUser:\t%v\n"+
" Keyring:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n",
rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly)
}
func printRBDPersistentVolumeSource(rbd *corev1.RBDPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+
" CephMonitors:\t%v\n"+
" RBDImage:\t%v\n"+
" FSType:\t%v\n"+
" RBDPool:\t%v\n"+
" RadosUser:\t%v\n"+
" Keyring:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n",
rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly)
}
func printDownwardAPIVolumeSource(d *corev1.DownwardAPIVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tDownwardAPI (a volume populated by information about the pod)\n Items:\n")
for _, mapping := range d.Items {
if mapping.FieldRef != nil {
w.Write(LEVEL_3, "%v -> %v\n", mapping.FieldRef.FieldPath, mapping.Path)
}
if mapping.ResourceFieldRef != nil {
w.Write(LEVEL_3, "%v -> %v\n", mapping.ResourceFieldRef.Resource, mapping.Path)
}
}
}
func printAzureDiskVolumeSource(d *corev1.AzureDiskVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tAzureDisk (an Azure Data Disk mount on the host and bind mount to the pod)\n"+
" DiskName:\t%v\n"+
" DiskURI:\t%v\n"+
" Kind: \t%v\n"+
" FSType:\t%v\n"+
" CachingMode:\t%v\n"+
" ReadOnly:\t%v\n",
d.DiskName, d.DataDiskURI, *d.Kind, *d.FSType, *d.CachingMode, *d.ReadOnly)
}
func printVsphereVolumeSource(vsphere *corev1.VsphereVirtualDiskVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tvSphereVolume (a Persistent Disk resource in vSphere)\n"+
" VolumePath:\t%v\n"+
" FSType:\t%v\n"+
" StoragePolicyName:\t%v\n",
vsphere.VolumePath, vsphere.FSType, vsphere.StoragePolicyName)
}
func printPhotonPersistentDiskVolumeSource(photon *corev1.PhotonPersistentDiskVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tPhotonPersistentDisk (a Persistent Disk resource in photon platform)\n"+
" PdID:\t%v\n"+
" FSType:\t%v\n",
photon.PdID, photon.FSType)
}
func printCinderVolumeSource(cinder *corev1.CinderVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+
" VolumeID:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n"+
" SecretRef:\t%v\n",
cinder.VolumeID, cinder.FSType, cinder.ReadOnly, cinder.SecretRef)
}
func printCinderPersistentVolumeSource(cinder *corev1.CinderPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+
" VolumeID:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n"+
" SecretRef:\t%v\n",
cinder.VolumeID, cinder.FSType, cinder.ReadOnly, cinder.SecretRef)
}
func printScaleIOVolumeSource(sio *corev1.ScaleIOVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tScaleIO (a persistent volume backed by a block device in ScaleIO)\n"+
" Gateway:\t%v\n"+
" System:\t%v\n"+
" Protection Domain:\t%v\n"+
" Storage Pool:\t%v\n"+
" Storage Mode:\t%v\n"+
" VolumeName:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, sio.FSType, sio.ReadOnly)
}
func printScaleIOPersistentVolumeSource(sio *corev1.ScaleIOPersistentVolumeSource, w PrefixWriter) {
var secretNS, secretName string
if sio.SecretRef != nil {
secretName = sio.SecretRef.Name
secretNS = sio.SecretRef.Namespace
}
w.Write(LEVEL_2, "Type:\tScaleIO (a persistent volume backed by a block device in ScaleIO)\n"+
" Gateway:\t%v\n"+
" System:\t%v\n"+
" Protection Domain:\t%v\n"+
" Storage Pool:\t%v\n"+
" Storage Mode:\t%v\n"+
" VolumeName:\t%v\n"+
" SecretName:\t%v\n"+
" SecretNamespace:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, secretName, secretNS, sio.FSType, sio.ReadOnly)
}
func printLocalVolumeSource(ls *corev1.LocalVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tLocalVolume (a persistent volume backed by local storage on a node)\n"+
" Path:\t%v\n",
ls.Path)
}
func printCephFSVolumeSource(cephfs *corev1.CephFSVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCephFS (a CephFS mount on the host that shares a pod's lifetime)\n"+
" Monitors:\t%v\n"+
" Path:\t%v\n"+
" User:\t%v\n"+
" SecretFile:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n",
cephfs.Monitors, cephfs.Path, cephfs.User, cephfs.SecretFile, cephfs.SecretRef, cephfs.ReadOnly)
}
func printCephFSPersistentVolumeSource(cephfs *corev1.CephFSPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCephFS (a CephFS mount on the host that shares a pod's lifetime)\n"+
" Monitors:\t%v\n"+
" Path:\t%v\n"+
" User:\t%v\n"+
" SecretFile:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n",
cephfs.Monitors, cephfs.Path, cephfs.User, cephfs.SecretFile, cephfs.SecretRef, cephfs.ReadOnly)
}
func printStorageOSVolumeSource(storageos *corev1.StorageOSVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tStorageOS (a StorageOS Persistent Disk resource)\n"+
" VolumeName:\t%v\n"+
" VolumeNamespace:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
storageos.VolumeName, storageos.VolumeNamespace, storageos.FSType, storageos.ReadOnly)
}
func printStorageOSPersistentVolumeSource(storageos *corev1.StorageOSPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tStorageOS (a StorageOS Persistent Disk resource)\n"+
" VolumeName:\t%v\n"+
" VolumeNamespace:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
storageos.VolumeName, storageos.VolumeNamespace, storageos.FSType, storageos.ReadOnly)
}
func printFCVolumeSource(fc *corev1.FCVolumeSource, w PrefixWriter) {
lun := "<none>"
if fc.Lun != nil {
lun = strconv.Itoa(int(*fc.Lun))
}
w.Write(LEVEL_2, "Type:\tFC (a Fibre Channel disk)\n"+
" TargetWWNs:\t%v\n"+
" LUN:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
strings.Join(fc.TargetWWNs, ", "), lun, fc.FSType, fc.ReadOnly)
}
func printAzureFileVolumeSource(azureFile *corev1.AzureFileVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tAzureFile (an Azure File Service mount on the host and bind mount to the pod)\n"+
" SecretName:\t%v\n"+
" ShareName:\t%v\n"+
" ReadOnly:\t%v\n",
azureFile.SecretName, azureFile.ShareName, azureFile.ReadOnly)
}
func printAzureFilePersistentVolumeSource(azureFile *corev1.AzureFilePersistentVolumeSource, w PrefixWriter) {
ns := ""
if azureFile.SecretNamespace != nil {
ns = *azureFile.SecretNamespace
}
w.Write(LEVEL_2, "Type:\tAzureFile (an Azure File Service mount on the host and bind mount to the pod)\n"+
" SecretName:\t%v\n"+
" SecretNamespace:\t%v\n"+
" ShareName:\t%v\n"+
" ReadOnly:\t%v\n",
azureFile.SecretName, ns, azureFile.ShareName, azureFile.ReadOnly)
}
func printFlexPersistentVolumeSource(flex *corev1.FlexPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+
" Driver:\t%v\n"+
" FSType:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n"+
" Options:\t%v\n",
flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options)
}
func printFlexVolumeSource(flex *corev1.FlexVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+
" Driver:\t%v\n"+
" FSType:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n"+
" Options:\t%v\n",
flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options)
}
func printFlockerVolumeSource(flocker *corev1.FlockerVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tFlocker (a Flocker volume mounted by the Flocker agent)\n"+
" DatasetName:\t%v\n"+
" DatasetUUID:\t%v\n",
flocker.DatasetName, flocker.DatasetUUID)
}
func printCSIVolumeSource(csi *corev1.CSIVolumeSource, w PrefixWriter) {
var readOnly bool
var fsType string
if csi.ReadOnly != nil && *csi.ReadOnly {
readOnly = true
}
if csi.FSType != nil {
fsType = *csi.FSType
}
w.Write(LEVEL_2, "Type:\tCSI (a Container Storage Interface (CSI) volume source)\n"+
" Driver:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
csi.Driver, fsType, readOnly)
printCSIPersistentVolumeAttributesMultiline(w, "VolumeAttributes", csi.VolumeAttributes)
}
func printCSIPersistentVolumeSource(csi *corev1.CSIPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCSI (a Container Storage Interface (CSI) volume source)\n"+
" Driver:\t%v\n"+
" FSType:\t%v\n"+
" VolumeHandle:\t%v\n"+
" ReadOnly:\t%v\n",
csi.Driver, csi.FSType, csi.VolumeHandle, csi.ReadOnly)
printCSIPersistentVolumeAttributesMultiline(w, "VolumeAttributes", csi.VolumeAttributes)
}
func printCSIPersistentVolumeAttributesMultiline(w PrefixWriter, title string, annotations map[string]string) {
printCSIPersistentVolumeAttributesMultilineIndent(w, "", title, "\t", annotations, sets.NewString())
}
func printCSIPersistentVolumeAttributesMultilineIndent(w PrefixWriter, initialIndent, title, innerIndent string, attributes map[string]string, skip sets.String) {
w.Write(LEVEL_2, "%s%s:%s", initialIndent, title, innerIndent)
if len(attributes) == 0 {
w.WriteLine("<none>")
return
}
// to print labels in the sorted order
keys := make([]string, 0, len(attributes))
for key := range attributes {
if skip.Has(key) {
continue
}
keys = append(keys, key)
}
if len(attributes) == 0 {
w.WriteLine("<none>")
return
}
sort.Strings(keys)
for i, key := range keys {
if i != 0 {
w.Write(LEVEL_2, initialIndent)
w.Write(LEVEL_2, innerIndent)
}
line := fmt.Sprintf("%s=%s", key, attributes[key])
if len(line) > maxAnnotationLen {
w.Write(LEVEL_2, "%s...\n", line[:maxAnnotationLen])
} else {
w.Write(LEVEL_2, "%s\n", line)
}
i++
}
}
type PersistentVolumeDescriber struct {
clientset.Interface
}
func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.CoreV1().PersistentVolumes()
pv, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, pv)
}
return describePersistentVolume(pv, events)
}
func printVolumeNodeAffinity(w PrefixWriter, affinity *corev1.VolumeNodeAffinity) {
w.Write(LEVEL_0, "Node Affinity:\t")
if affinity == nil || affinity.Required == nil {
w.WriteLine("<none>")
return
}
w.WriteLine("")
if affinity.Required != nil {
w.Write(LEVEL_1, "Required Terms:\t")
if len(affinity.Required.NodeSelectorTerms) == 0 {
w.WriteLine("<none>")
} else {
w.WriteLine("")
for i, term := range affinity.Required.NodeSelectorTerms {
printNodeSelectorTermsMultilineWithIndent(w, LEVEL_2, fmt.Sprintf("Term %v", i), "\t", term.MatchExpressions)
}
}
}
}
// printLabelsMultiline prints multiple labels with a user-defined alignment.
func printNodeSelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []corev1.NodeSelectorRequirement) {
w.Write(indentLevel, "%s:%s", title, innerIndent)
if len(reqs) == 0 {
w.WriteLine("<none>")
return
}
for i, req := range reqs {
if i != 0 {
w.Write(indentLevel, "%s", innerIndent)
}
exprStr := fmt.Sprintf("%s %s", req.Key, strings.ToLower(string(req.Operator)))
if len(req.Values) > 0 {
exprStr = fmt.Sprintf("%s [%s]", exprStr, strings.Join(req.Values, ", "))
}
w.Write(LEVEL_0, "%s\n", exprStr)
}
}
func describePersistentVolume(pv *corev1.PersistentVolume, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", pv.Name)
printLabelsMultiline(w, "Labels", pv.ObjectMeta.Labels)
printAnnotationsMultiline(w, "Annotations", pv.ObjectMeta.Annotations)
w.Write(LEVEL_0, "Finalizers:\t%v\n", pv.ObjectMeta.Finalizers)
w.Write(LEVEL_0, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClass(pv))
if pv.ObjectMeta.DeletionTimestamp != nil {
w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampSince(*pv.ObjectMeta.DeletionTimestamp))
} else {
w.Write(LEVEL_0, "Status:\t%v\n", pv.Status.Phase)
}
if pv.Spec.ClaimRef != nil {
w.Write(LEVEL_0, "Claim:\t%s\n", pv.Spec.ClaimRef.Namespace+"/"+pv.Spec.ClaimRef.Name)
} else {
w.Write(LEVEL_0, "Claim:\t%s\n", "")
}
w.Write(LEVEL_0, "Reclaim Policy:\t%v\n", pv.Spec.PersistentVolumeReclaimPolicy)
w.Write(LEVEL_0, "Access Modes:\t%s\n", storageutil.GetAccessModesAsString(pv.Spec.AccessModes))
if pv.Spec.VolumeMode != nil {
w.Write(LEVEL_0, "VolumeMode:\t%v\n", *pv.Spec.VolumeMode)
}
storage := pv.Spec.Capacity[corev1.ResourceStorage]
w.Write(LEVEL_0, "Capacity:\t%s\n", storage.String())
printVolumeNodeAffinity(w, pv.Spec.NodeAffinity)
w.Write(LEVEL_0, "Message:\t%s\n", pv.Status.Message)
w.Write(LEVEL_0, "Source:\n")
switch {
case pv.Spec.HostPath != nil:
printHostPathVolumeSource(pv.Spec.HostPath, w)
case pv.Spec.GCEPersistentDisk != nil:
printGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, w)
case pv.Spec.AWSElasticBlockStore != nil:
printAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, w)
case pv.Spec.NFS != nil:
printNFSVolumeSource(pv.Spec.NFS, w)
case pv.Spec.ISCSI != nil:
printISCSIPersistentVolumeSource(pv.Spec.ISCSI, w)
case pv.Spec.Glusterfs != nil:
printGlusterfsPersistentVolumeSource(pv.Spec.Glusterfs, w)
case pv.Spec.RBD != nil:
printRBDPersistentVolumeSource(pv.Spec.RBD, w)
case pv.Spec.Quobyte != nil:
printQuobyteVolumeSource(pv.Spec.Quobyte, w)
case pv.Spec.VsphereVolume != nil:
printVsphereVolumeSource(pv.Spec.VsphereVolume, w)
case pv.Spec.Cinder != nil:
printCinderPersistentVolumeSource(pv.Spec.Cinder, w)
case pv.Spec.AzureDisk != nil:
printAzureDiskVolumeSource(pv.Spec.AzureDisk, w)
case pv.Spec.PhotonPersistentDisk != nil:
printPhotonPersistentDiskVolumeSource(pv.Spec.PhotonPersistentDisk, w)
case pv.Spec.PortworxVolume != nil:
printPortworxVolumeSource(pv.Spec.PortworxVolume, w)
case pv.Spec.ScaleIO != nil:
printScaleIOPersistentVolumeSource(pv.Spec.ScaleIO, w)
case pv.Spec.Local != nil:
printLocalVolumeSource(pv.Spec.Local, w)
case pv.Spec.CephFS != nil:
printCephFSPersistentVolumeSource(pv.Spec.CephFS, w)
case pv.Spec.StorageOS != nil:
printStorageOSPersistentVolumeSource(pv.Spec.StorageOS, w)
case pv.Spec.FC != nil:
printFCVolumeSource(pv.Spec.FC, w)
case pv.Spec.AzureFile != nil:
printAzureFilePersistentVolumeSource(pv.Spec.AzureFile, w)
case pv.Spec.FlexVolume != nil:
printFlexPersistentVolumeSource(pv.Spec.FlexVolume, w)
case pv.Spec.Flocker != nil:
printFlockerVolumeSource(pv.Spec.Flocker, w)
case pv.Spec.CSI != nil:
printCSIPersistentVolumeSource(pv.Spec.CSI, w)
default:
w.Write(LEVEL_1, "<unknown>\n")
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
type PersistentVolumeClaimDescriber struct {
clientset.Interface
}
func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.CoreV1().PersistentVolumeClaims(namespace)
pvc, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
pc := d.CoreV1().Pods(namespace)
mountPods, err := getMountPods(pc, pvc.Name)
if err != nil {
return "", err
}
events, _ := d.CoreV1().Events(namespace).Search(scheme.Scheme, pvc)
return describePersistentVolumeClaim(pvc, events, mountPods)
}
func getMountPods(c corev1client.PodInterface, pvcName string) ([]corev1.Pod, error) {
nsPods, err := c.List(context.TODO(), metav1.ListOptions{})
if err != nil {
return []corev1.Pod{}, err
}
var pods []corev1.Pod
for _, pod := range nsPods.Items {
pvcs := getPvcs(pod.Spec.Volumes)
for _, pvc := range pvcs {
if pvc.PersistentVolumeClaim.ClaimName == pvcName {
pods = append(pods, pod)
}
}
}
return pods, nil
}
func getPvcs(volumes []corev1.Volume) []corev1.Volume {
var pvcs []corev1.Volume
for _, volume := range volumes {
if volume.VolumeSource.PersistentVolumeClaim != nil {
pvcs = append(pvcs, volume)
}
}
return pvcs
}
func describePersistentVolumeClaim(pvc *corev1.PersistentVolumeClaim, events *corev1.EventList, mountPods []corev1.Pod) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", pvc.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", pvc.Namespace)
w.Write(LEVEL_0, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClaimClass(pvc))
if pvc.ObjectMeta.DeletionTimestamp != nil {
w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampSince(*pvc.ObjectMeta.DeletionTimestamp))
} else {
w.Write(LEVEL_0, "Status:\t%v\n", pvc.Status.Phase)
}
w.Write(LEVEL_0, "Volume:\t%s\n", pvc.Spec.VolumeName)
printLabelsMultiline(w, "Labels", pvc.Labels)
printAnnotationsMultiline(w, "Annotations", pvc.Annotations)
w.Write(LEVEL_0, "Finalizers:\t%v\n", pvc.ObjectMeta.Finalizers)
storage := pvc.Spec.Resources.Requests[corev1.ResourceStorage]
capacity := ""
accessModes := ""
if pvc.Spec.VolumeName != "" {
accessModes = storageutil.GetAccessModesAsString(pvc.Status.AccessModes)
storage = pvc.Status.Capacity[corev1.ResourceStorage]
capacity = storage.String()
}
w.Write(LEVEL_0, "Capacity:\t%s\n", capacity)
w.Write(LEVEL_0, "Access Modes:\t%s\n", accessModes)
if pvc.Spec.VolumeMode != nil {
w.Write(LEVEL_0, "VolumeMode:\t%v\n", *pvc.Spec.VolumeMode)
}
if pvc.Spec.DataSource != nil {
w.Write(LEVEL_0, "DataSource:\n")
if pvc.Spec.DataSource.APIGroup != nil {
w.Write(LEVEL_1, "APIGroup:\t%v\n", *pvc.Spec.DataSource.APIGroup)
}
w.Write(LEVEL_1, "Kind:\t%v\n", pvc.Spec.DataSource.Kind)
w.Write(LEVEL_1, "Name:\t%v\n", pvc.Spec.DataSource.Name)
}
printPodsMultiline(w, "Mounted By", mountPods)
if len(pvc.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n")
w.Write(LEVEL_1, "Type\tStatus\tLastProbeTime\tLastTransitionTime\tReason\tMessage\n")
w.Write(LEVEL_1, "----\t------\t-----------------\t------------------\t------\t-------\n")
for _, c := range pvc.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v \t%s \t%s \t%v \t%v\n",
c.Type,
c.Status,
c.LastProbeTime.Time.Format(time.RFC1123Z),
c.LastTransitionTime.Time.Format(time.RFC1123Z),
c.Reason,
c.Message)
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func describeContainers(label string, containers []corev1.Container, containerStatuses []corev1.ContainerStatus,
resolverFn EnvVarResolverFunc, w PrefixWriter, space string) {
statuses := map[string]corev1.ContainerStatus{}
for _, status := range containerStatuses {
statuses[status.Name] = status
}
describeContainersLabel(containers, label, space, w)
for _, container := range containers {
status, ok := statuses[container.Name]
describeContainerBasicInfo(container, status, ok, space, w)
describeContainerCommand(container, w)
if ok {
describeContainerState(status, w)
}
describeContainerResource(container, w)
describeContainerProbe(container, w)
if len(container.EnvFrom) > 0 {
describeContainerEnvFrom(container, resolverFn, w)
}
describeContainerEnvVars(container, resolverFn, w)
describeContainerVolumes(container, w)
}
}
func describeContainersLabel(containers []corev1.Container, label, space string, w PrefixWriter) {
none := ""
if len(containers) == 0 {
none = " <none>"
}
w.Write(LEVEL_0, "%s%s:%s\n", space, label, none)
}
func describeContainerBasicInfo(container corev1.Container, status corev1.ContainerStatus, ok bool, space string, w PrefixWriter) {
nameIndent := ""
if len(space) > 0 {
nameIndent = " "
}
w.Write(LEVEL_1, "%s%v:\n", nameIndent, container.Name)
if ok {
w.Write(LEVEL_2, "Container ID:\t%s\n", status.ContainerID)
}
w.Write(LEVEL_2, "Image:\t%s\n", container.Image)
if ok {
w.Write(LEVEL_2, "Image ID:\t%s\n", status.ImageID)
}
portString := describeContainerPorts(container.Ports)
if strings.Contains(portString, ",") {
w.Write(LEVEL_2, "Ports:\t%s\n", portString)
} else {
w.Write(LEVEL_2, "Port:\t%s\n", stringOrNone(portString))
}
hostPortString := describeContainerHostPorts(container.Ports)
if strings.Contains(hostPortString, ",") {
w.Write(LEVEL_2, "Host Ports:\t%s\n", hostPortString)
} else {
w.Write(LEVEL_2, "Host Port:\t%s\n", stringOrNone(hostPortString))
}
}
func describeContainerPorts(cPorts []corev1.ContainerPort) string {
ports := make([]string, 0, len(cPorts))
for _, cPort := range cPorts {
ports = append(ports, fmt.Sprintf("%d/%s", cPort.ContainerPort, cPort.Protocol))
}
return strings.Join(ports, ", ")
}
func describeContainerHostPorts(cPorts []corev1.ContainerPort) string {
ports := make([]string, 0, len(cPorts))
for _, cPort := range cPorts {
ports = append(ports, fmt.Sprintf("%d/%s", cPort.HostPort, cPort.Protocol))
}
return strings.Join(ports, ", ")
}
func describeContainerCommand(container corev1.Container, w PrefixWriter) {
if len(container.Command) > 0 {
w.Write(LEVEL_2, "Command:\n")
for _, c := range container.Command {
for _, s := range strings.Split(c, "\n") {
w.Write(LEVEL_3, "%s\n", s)
}
}
}
if len(container.Args) > 0 {
w.Write(LEVEL_2, "Args:\n")
for _, arg := range container.Args {
for _, s := range strings.Split(arg, "\n") {
w.Write(LEVEL_3, "%s\n", s)
}
}
}
}
func describeContainerResource(container corev1.Container, w PrefixWriter) {
resources := container.Resources
if len(resources.Limits) > 0 {
w.Write(LEVEL_2, "Limits:\n")
}
for _, name := range SortedResourceNames(resources.Limits) {
quantity := resources.Limits[name]
w.Write(LEVEL_3, "%s:\t%s\n", name, quantity.String())
}
if len(resources.Requests) > 0 {
w.Write(LEVEL_2, "Requests:\n")
}
for _, name := range SortedResourceNames(resources.Requests) {
quantity := resources.Requests[name]
w.Write(LEVEL_3, "%s:\t%s\n", name, quantity.String())
}
}
func describeContainerState(status corev1.ContainerStatus, w PrefixWriter) {
describeStatus("State", status.State, w)
if status.LastTerminationState.Terminated != nil {
describeStatus("Last State", status.LastTerminationState, w)
}
w.Write(LEVEL_2, "Ready:\t%v\n", printBool(status.Ready))
w.Write(LEVEL_2, "Restart Count:\t%d\n", status.RestartCount)
}
func describeContainerProbe(container corev1.Container, w PrefixWriter) {
if container.LivenessProbe != nil {
probe := DescribeProbe(container.LivenessProbe)
w.Write(LEVEL_2, "Liveness:\t%s\n", probe)
}
if container.ReadinessProbe != nil {
probe := DescribeProbe(container.ReadinessProbe)
w.Write(LEVEL_2, "Readiness:\t%s\n", probe)
}
if container.StartupProbe != nil {
probe := DescribeProbe(container.StartupProbe)
w.Write(LEVEL_2, "Startup:\t%s\n", probe)
}
}
func describeContainerVolumes(container corev1.Container, w PrefixWriter) {
// Show volumeMounts
none := ""
if len(container.VolumeMounts) == 0 {
none = "\t<none>"
}
w.Write(LEVEL_2, "Mounts:%s\n", none)
sort.Sort(SortableVolumeMounts(container.VolumeMounts))
for _, mount := range container.VolumeMounts {
flags := []string{}
if mount.ReadOnly {
flags = append(flags, "ro")
} else {
flags = append(flags, "rw")
}
if len(mount.SubPath) > 0 {
flags = append(flags, fmt.Sprintf("path=%q", mount.SubPath))
}
w.Write(LEVEL_3, "%s from %s (%s)\n", mount.MountPath, mount.Name, strings.Join(flags, ","))
}
// Show volumeDevices if exists
if len(container.VolumeDevices) > 0 {
w.Write(LEVEL_2, "Devices:%s\n", none)
sort.Sort(SortableVolumeDevices(container.VolumeDevices))
for _, device := range container.VolumeDevices {
w.Write(LEVEL_3, "%s from %s\n", device.DevicePath, device.Name)
}
}
}
func describeContainerEnvVars(container corev1.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) {
none := ""
if len(container.Env) == 0 {
none = "\t<none>"
}
w.Write(LEVEL_2, "Environment:%s\n", none)
for _, e := range container.Env {
if e.ValueFrom == nil {
for i, s := range strings.Split(e.Value, "\n") {
if i == 0 {
w.Write(LEVEL_3, "%s:\t%s\n", e.Name, s)
} else {
w.Write(LEVEL_3, "\t%s\n", s)
}
}
continue
}
switch {
case e.ValueFrom.FieldRef != nil:
var valueFrom string
if resolverFn != nil {
valueFrom = resolverFn(e)
}
w.Write(LEVEL_3, "%s:\t%s (%s:%s)\n", e.Name, valueFrom, e.ValueFrom.FieldRef.APIVersion, e.ValueFrom.FieldRef.FieldPath)
case e.ValueFrom.ResourceFieldRef != nil:
valueFrom, err := resourcehelper.ExtractContainerResourceValue(e.ValueFrom.ResourceFieldRef, &container)
if err != nil {
valueFrom = ""
}
resource := e.ValueFrom.ResourceFieldRef.Resource
if valueFrom == "0" && (resource == "limits.cpu" || resource == "limits.memory") {
valueFrom = "node allocatable"
}
w.Write(LEVEL_3, "%s:\t%s (%s)\n", e.Name, valueFrom, resource)
case e.ValueFrom.SecretKeyRef != nil:
optional := e.ValueFrom.SecretKeyRef.Optional != nil && *e.ValueFrom.SecretKeyRef.Optional
w.Write(LEVEL_3, "%s:\t<set to the key '%s' in secret '%s'>\tOptional: %t\n", e.Name, e.ValueFrom.SecretKeyRef.Key, e.ValueFrom.SecretKeyRef.Name, optional)
case e.ValueFrom.ConfigMapKeyRef != nil:
optional := e.ValueFrom.ConfigMapKeyRef.Optional != nil && *e.ValueFrom.ConfigMapKeyRef.Optional
w.Write(LEVEL_3, "%s:\t<set to the key '%s' of config map '%s'>\tOptional: %t\n", e.Name, e.ValueFrom.ConfigMapKeyRef.Key, e.ValueFrom.ConfigMapKeyRef.Name, optional)
}
}
}
func describeContainerEnvFrom(container corev1.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) {
none := ""
if len(container.EnvFrom) == 0 {
none = "\t<none>"
}
w.Write(LEVEL_2, "Environment Variables from:%s\n", none)
for _, e := range container.EnvFrom {
from := ""
name := ""
optional := false
if e.ConfigMapRef != nil {
from = "ConfigMap"
name = e.ConfigMapRef.Name
optional = e.ConfigMapRef.Optional != nil && *e.ConfigMapRef.Optional
} else if e.SecretRef != nil {
from = "Secret"
name = e.SecretRef.Name
optional = e.SecretRef.Optional != nil && *e.SecretRef.Optional
}
if len(e.Prefix) == 0 {
w.Write(LEVEL_3, "%s\t%s\tOptional: %t\n", name, from, optional)
} else {
w.Write(LEVEL_3, "%s\t%s with prefix '%s'\tOptional: %t\n", name, from, e.Prefix, optional)
}
}
}
// DescribeProbe is exported for consumers in other API groups that have probes
func DescribeProbe(probe *corev1.Probe) string {
attrs := fmt.Sprintf("delay=%ds timeout=%ds period=%ds #success=%d #failure=%d", probe.InitialDelaySeconds, probe.TimeoutSeconds, probe.PeriodSeconds, probe.SuccessThreshold, probe.FailureThreshold)
switch {
case probe.Exec != nil:
return fmt.Sprintf("exec %v %s", probe.Exec.Command, attrs)
case probe.HTTPGet != nil:
url := &url.URL{}
url.Scheme = strings.ToLower(string(probe.HTTPGet.Scheme))
if len(probe.HTTPGet.Port.String()) > 0 {
url.Host = net.JoinHostPort(probe.HTTPGet.Host, probe.HTTPGet.Port.String())
} else {
url.Host = probe.HTTPGet.Host
}
url.Path = probe.HTTPGet.Path
return fmt.Sprintf("http-get %s %s", url.String(), attrs)
case probe.TCPSocket != nil:
return fmt.Sprintf("tcp-socket %s:%s %s", probe.TCPSocket.Host, probe.TCPSocket.Port.String(), attrs)
}
return fmt.Sprintf("unknown %s", attrs)
}
type EnvVarResolverFunc func(e corev1.EnvVar) string
// EnvValueFrom is exported for use by describers in other packages
func EnvValueRetriever(pod *corev1.Pod) EnvVarResolverFunc {
return func(e corev1.EnvVar) string {
gv, err := schema.ParseGroupVersion(e.ValueFrom.FieldRef.APIVersion)
if err != nil {
return ""
}
gvk := gv.WithKind("Pod")
internalFieldPath, _, err := scheme.Scheme.ConvertFieldLabel(gvk, e.ValueFrom.FieldRef.FieldPath, "")
if err != nil {
return "" // pod validation should catch this on create
}
valueFrom, err := fieldpath.ExtractFieldPathAsString(pod, internalFieldPath)
if err != nil {
return "" // pod validation should catch this on create
}
return valueFrom
}
}
func describeStatus(stateName string, state corev1.ContainerState, w PrefixWriter) {
switch {
case state.Running != nil:
w.Write(LEVEL_2, "%s:\tRunning\n", stateName)
w.Write(LEVEL_3, "Started:\t%v\n", state.Running.StartedAt.Time.Format(time.RFC1123Z))
case state.Waiting != nil:
w.Write(LEVEL_2, "%s:\tWaiting\n", stateName)
if state.Waiting.Reason != "" {
w.Write(LEVEL_3, "Reason:\t%s\n", state.Waiting.Reason)
}
case state.Terminated != nil:
w.Write(LEVEL_2, "%s:\tTerminated\n", stateName)
if state.Terminated.Reason != "" {
w.Write(LEVEL_3, "Reason:\t%s\n", state.Terminated.Reason)
}
if state.Terminated.Message != "" {
w.Write(LEVEL_3, "Message:\t%s\n", state.Terminated.Message)
}
w.Write(LEVEL_3, "Exit Code:\t%d\n", state.Terminated.ExitCode)
if state.Terminated.Signal > 0 {
w.Write(LEVEL_3, "Signal:\t%d\n", state.Terminated.Signal)
}
w.Write(LEVEL_3, "Started:\t%s\n", state.Terminated.StartedAt.Time.Format(time.RFC1123Z))
w.Write(LEVEL_3, "Finished:\t%s\n", state.Terminated.FinishedAt.Time.Format(time.RFC1123Z))
default:
w.Write(LEVEL_2, "%s:\tWaiting\n", stateName)
}
}
func describeVolumeClaimTemplates(templates []corev1.PersistentVolumeClaim, w PrefixWriter) {
if len(templates) == 0 {
w.Write(LEVEL_0, "Volume Claims:\t<none>\n")
return
}
w.Write(LEVEL_0, "Volume Claims:\n")
for _, pvc := range templates {
w.Write(LEVEL_1, "Name:\t%s\n", pvc.Name)
w.Write(LEVEL_1, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClaimClass(&pvc))
printLabelsMultilineWithIndent(w, " ", "Labels", "\t", pvc.Labels, sets.NewString())
printLabelsMultilineWithIndent(w, " ", "Annotations", "\t", pvc.Annotations, sets.NewString())
if capacity, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; ok {
w.Write(LEVEL_1, "Capacity:\t%s\n", capacity.String())
} else {
w.Write(LEVEL_1, "Capacity:\t%s\n", "<default>")
}
w.Write(LEVEL_1, "Access Modes:\t%s\n", pvc.Spec.AccessModes)
}
}
func printBoolPtr(value *bool) string {
if value != nil {
return printBool(*value)
}
return "<unset>"
}
func printBool(value bool) string {
if value {
return "True"
}
return "False"
}
// ReplicationControllerDescriber generates information about a replication controller
// and the pods it has created.
type ReplicationControllerDescriber struct {
clientset.Interface
}
func (d *ReplicationControllerDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
rc := d.CoreV1().ReplicationControllers(namespace)
pc := d.CoreV1().Pods(namespace)
controller, err := rc.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
running, waiting, succeeded, failed, err := getPodStatusForController(pc, labels.SelectorFromSet(controller.Spec.Selector), controller.UID)
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, controller)
}
return describeReplicationController(controller, events, running, waiting, succeeded, failed)
}
func describeReplicationController(controller *corev1.ReplicationController, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", controller.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", controller.Namespace)
w.Write(LEVEL_0, "Selector:\t%s\n", labels.FormatLabels(controller.Spec.Selector))
printLabelsMultiline(w, "Labels", controller.Labels)
printAnnotationsMultiline(w, "Annotations", controller.Annotations)
w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", controller.Status.Replicas, *controller.Spec.Replicas)
w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed)
DescribePodTemplate(controller.Spec.Template, w)
if len(controller.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n")
w.Write(LEVEL_1, "----\t------\t------\n")
for _, c := range controller.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason)
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func DescribePodTemplate(template *corev1.PodTemplateSpec, w PrefixWriter) {
w.Write(LEVEL_0, "Pod Template:\n")
if template == nil {
w.Write(LEVEL_1, "<unset>")
return
}
printLabelsMultiline(w, " Labels", template.Labels)
if len(template.Annotations) > 0 {
printAnnotationsMultiline(w, " Annotations", template.Annotations)
}
if len(template.Spec.ServiceAccountName) > 0 {
w.Write(LEVEL_1, "Service Account:\t%s\n", template.Spec.ServiceAccountName)
}
if len(template.Spec.InitContainers) > 0 {
describeContainers("Init Containers", template.Spec.InitContainers, nil, nil, w, " ")
}
describeContainers("Containers", template.Spec.Containers, nil, nil, w, " ")
describeVolumes(template.Spec.Volumes, w, " ")
if len(template.Spec.PriorityClassName) > 0 {
w.Write(LEVEL_1, "Priority Class Name:\t%s\n", template.Spec.PriorityClassName)
}
}
// ReplicaSetDescriber generates information about a ReplicaSet and the pods it has created.
type ReplicaSetDescriber struct {
clientset.Interface
}
func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
rsc := d.AppsV1().ReplicaSets(namespace)
pc := d.CoreV1().Pods(namespace)
rs, err := rsc.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
if err != nil {
return "", err
}
running, waiting, succeeded, failed, getPodErr := getPodStatusForController(pc, selector, rs.UID)
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, rs)
}
return describeReplicaSet(rs, events, running, waiting, succeeded, failed, getPodErr)
}
func describeReplicaSet(rs *appsv1.ReplicaSet, events *corev1.EventList, running, waiting, succeeded, failed int, getPodErr error) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", rs.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", rs.Namespace)
w.Write(LEVEL_0, "Selector:\t%s\n", metav1.FormatLabelSelector(rs.Spec.Selector))
printLabelsMultiline(w, "Labels", rs.Labels)
printAnnotationsMultiline(w, "Annotations", rs.Annotations)
if controlledBy := printController(rs); len(controlledBy) > 0 {
w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy)
}
w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", rs.Status.Replicas, *rs.Spec.Replicas)
w.Write(LEVEL_0, "Pods Status:\t")
if getPodErr != nil {
w.Write(LEVEL_0, "error in fetching pods: %s\n", getPodErr)
} else {
w.Write(LEVEL_0, "%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed)
}
DescribePodTemplate(&rs.Spec.Template, w)
if len(rs.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n")
w.Write(LEVEL_1, "----\t------\t------\n")
for _, c := range rs.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason)
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// JobDescriber generates information about a job and the pods it has created.
type JobDescriber struct {
clientset.Interface
}
func (d *JobDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
job, err := d.BatchV1().Jobs(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, job)
}
return describeJob(job, events)
}
func describeJob(job *batchv1.Job, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", job.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", job.Namespace)
if selector, err := metav1.LabelSelectorAsSelector(job.Spec.Selector); err == nil {
w.Write(LEVEL_0, "Selector:\t%s\n", selector)
} else {
w.Write(LEVEL_0, "Selector:\tFailed to get selector: %s\n", err)
}
printLabelsMultiline(w, "Labels", job.Labels)
printAnnotationsMultiline(w, "Annotations", job.Annotations)
if controlledBy := printController(job); len(controlledBy) > 0 {
w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy)
}
w.Write(LEVEL_0, "Parallelism:\t%d\n", *job.Spec.Parallelism)
if job.Spec.Completions != nil {
w.Write(LEVEL_0, "Completions:\t%d\n", *job.Spec.Completions)
} else {
w.Write(LEVEL_0, "Completions:\t<unset>\n")
}
if job.Status.StartTime != nil {
w.Write(LEVEL_0, "Start Time:\t%s\n", job.Status.StartTime.Time.Format(time.RFC1123Z))
}
if job.Status.CompletionTime != nil {
w.Write(LEVEL_0, "Completed At:\t%s\n", job.Status.CompletionTime.Time.Format(time.RFC1123Z))
}
if job.Status.StartTime != nil && job.Status.CompletionTime != nil {
w.Write(LEVEL_0, "Duration:\t%s\n", duration.HumanDuration(job.Status.CompletionTime.Sub(job.Status.StartTime.Time)))
}
if job.Spec.ActiveDeadlineSeconds != nil {
w.Write(LEVEL_0, "Active Deadline Seconds:\t%ds\n", *job.Spec.ActiveDeadlineSeconds)
}
w.Write(LEVEL_0, "Pods Statuses:\t%d Running / %d Succeeded / %d Failed\n", job.Status.Active, job.Status.Succeeded, job.Status.Failed)
DescribePodTemplate(&job.Spec.Template, w)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// CronJobDescriber generates information about a cron job and the jobs it has created.
type CronJobDescriber struct {
client clientset.Interface
}
func (d *CronJobDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
cronJob, err := d.client.BatchV1beta1().CronJobs(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, cronJob)
}
return describeCronJob(cronJob, events)
}
func describeCronJob(cronJob *batchv1beta1.CronJob, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", cronJob.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", cronJob.Namespace)
printLabelsMultiline(w, "Labels", cronJob.Labels)
printAnnotationsMultiline(w, "Annotations", cronJob.Annotations)
w.Write(LEVEL_0, "Schedule:\t%s\n", cronJob.Spec.Schedule)
w.Write(LEVEL_0, "Concurrency Policy:\t%s\n", cronJob.Spec.ConcurrencyPolicy)
w.Write(LEVEL_0, "Suspend:\t%s\n", printBoolPtr(cronJob.Spec.Suspend))
if cronJob.Spec.SuccessfulJobsHistoryLimit != nil {
w.Write(LEVEL_0, "Successful Job History Limit:\t%d\n", *cronJob.Spec.SuccessfulJobsHistoryLimit)
} else {
w.Write(LEVEL_0, "Successful Job History Limit:\t<unset>\n")
}
if cronJob.Spec.FailedJobsHistoryLimit != nil {
w.Write(LEVEL_0, "Failed Job History Limit:\t%d\n", *cronJob.Spec.FailedJobsHistoryLimit)
} else {
w.Write(LEVEL_0, "Failed Job History Limit:\t<unset>\n")
}
if cronJob.Spec.StartingDeadlineSeconds != nil {
w.Write(LEVEL_0, "Starting Deadline Seconds:\t%ds\n", *cronJob.Spec.StartingDeadlineSeconds)
} else {
w.Write(LEVEL_0, "Starting Deadline Seconds:\t<unset>\n")
}
describeJobTemplate(cronJob.Spec.JobTemplate, w)
if cronJob.Status.LastScheduleTime != nil {
w.Write(LEVEL_0, "Last Schedule Time:\t%s\n", cronJob.Status.LastScheduleTime.Time.Format(time.RFC1123Z))
} else {
w.Write(LEVEL_0, "Last Schedule Time:\t<unset>\n")
}
printActiveJobs(w, "Active Jobs", cronJob.Status.Active)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func describeJobTemplate(jobTemplate batchv1beta1.JobTemplateSpec, w PrefixWriter) {
if jobTemplate.Spec.Selector != nil {
if selector, err := metav1.LabelSelectorAsSelector(jobTemplate.Spec.Selector); err == nil {
w.Write(LEVEL_0, "Selector:\t%s\n", selector)
} else {
w.Write(LEVEL_0, "Selector:\tFailed to get selector: %s\n", err)
}
} else {
w.Write(LEVEL_0, "Selector:\t<unset>\n")
}
if jobTemplate.Spec.Parallelism != nil {
w.Write(LEVEL_0, "Parallelism:\t%d\n", *jobTemplate.Spec.Parallelism)
} else {
w.Write(LEVEL_0, "Parallelism:\t<unset>\n")
}
if jobTemplate.Spec.Completions != nil {
w.Write(LEVEL_0, "Completions:\t%d\n", *jobTemplate.Spec.Completions)
} else {
w.Write(LEVEL_0, "Completions:\t<unset>\n")
}
if jobTemplate.Spec.ActiveDeadlineSeconds != nil {
w.Write(LEVEL_0, "Active Deadline Seconds:\t%ds\n", *jobTemplate.Spec.ActiveDeadlineSeconds)
}
DescribePodTemplate(&jobTemplate.Spec.Template, w)
}
func printActiveJobs(w PrefixWriter, title string, jobs []corev1.ObjectReference) {
w.Write(LEVEL_0, "%s:\t", title)
if len(jobs) == 0 {
w.WriteLine("<none>")
return
}
for i, job := range jobs {
if i != 0 {
w.Write(LEVEL_0, ", ")
}
w.Write(LEVEL_0, "%s", job.Name)
}
w.WriteLine("")
}
// DaemonSetDescriber generates information about a daemon set and the pods it has created.
type DaemonSetDescriber struct {
clientset.Interface
}
func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
dc := d.AppsV1().DaemonSets(namespace)
pc := d.CoreV1().Pods(namespace)
daemon, err := dc.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
selector, err := metav1.LabelSelectorAsSelector(daemon.Spec.Selector)
if err != nil {
return "", err
}
running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, daemon.UID)
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, daemon)
}
return describeDaemonSet(daemon, events, running, waiting, succeeded, failed)
}
func describeDaemonSet(daemon *appsv1.DaemonSet, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", daemon.Name)
selector, err := metav1.LabelSelectorAsSelector(daemon.Spec.Selector)
if err != nil {
// this shouldn't happen if LabelSelector passed validation
return err
}
w.Write(LEVEL_0, "Selector:\t%s\n", selector)
w.Write(LEVEL_0, "Node-Selector:\t%s\n", labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector))
printLabelsMultiline(w, "Labels", daemon.Labels)
printAnnotationsMultiline(w, "Annotations", daemon.Annotations)
w.Write(LEVEL_0, "Desired Number of Nodes Scheduled: %d\n", daemon.Status.DesiredNumberScheduled)
w.Write(LEVEL_0, "Current Number of Nodes Scheduled: %d\n", daemon.Status.CurrentNumberScheduled)
w.Write(LEVEL_0, "Number of Nodes Scheduled with Up-to-date Pods: %d\n", daemon.Status.UpdatedNumberScheduled)
w.Write(LEVEL_0, "Number of Nodes Scheduled with Available Pods: %d\n", daemon.Status.NumberAvailable)
w.Write(LEVEL_0, "Number of Nodes Misscheduled: %d\n", daemon.Status.NumberMisscheduled)
w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed)
DescribePodTemplate(&daemon.Spec.Template, w)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// SecretDescriber generates information about a secret
type SecretDescriber struct {
clientset.Interface
}
func (d *SecretDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.CoreV1().Secrets(namespace)
secret, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return describeSecret(secret)
}
func describeSecret(secret *corev1.Secret) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", secret.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", secret.Namespace)
printLabelsMultiline(w, "Labels", secret.Labels)
skipAnnotations := sets.NewString(corev1.LastAppliedConfigAnnotation)
printAnnotationsMultilineWithFilter(w, "Annotations", secret.Annotations, skipAnnotations)
w.Write(LEVEL_0, "\nType:\t%s\n", secret.Type)
w.Write(LEVEL_0, "\nData\n====\n")
for k, v := range secret.Data {
switch {
case k == corev1.ServiceAccountTokenKey && secret.Type == corev1.SecretTypeServiceAccountToken:
w.Write(LEVEL_0, "%s:\t%s\n", k, string(v))
default:
w.Write(LEVEL_0, "%s:\t%d bytes\n", k, len(v))
}
}
return nil
})
}
type IngressDescriber struct {
clientset.Interface
}
func (i *IngressDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := i.NetworkingV1beta1().Ingresses(namespace)
ing, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return i.describeIngress(ing, describerSettings)
}
func (i *IngressDescriber) describeBackend(ns string, backend *networkingv1beta1.IngressBackend) string {
endpoints, _ := i.CoreV1().Endpoints(ns).Get(context.TODO(), backend.ServiceName, metav1.GetOptions{})
service, _ := i.CoreV1().Services(ns).Get(context.TODO(), backend.ServiceName, metav1.GetOptions{})
spName := ""
for i := range service.Spec.Ports {
sp := &service.Spec.Ports[i]
switch backend.ServicePort.Type {
case intstr.String:
if backend.ServicePort.StrVal == sp.Name {
spName = sp.Name
}
case intstr.Int:
if int32(backend.ServicePort.IntVal) == sp.Port {
spName = sp.Name
}
}
}
return formatEndpoints(endpoints, sets.NewString(spName))
}
func (i *IngressDescriber) describeIngress(ing *networkingv1beta1.Ingress, describerSettings describe.DescriberSettings) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%v\n", ing.Name)
w.Write(LEVEL_0, "Namespace:\t%v\n", ing.Namespace)
w.Write(LEVEL_0, "Address:\t%v\n", loadBalancerStatusStringer(ing.Status.LoadBalancer, true))
def := ing.Spec.Backend
ns := ing.Namespace
if def == nil {
// Ingresses that don't specify a default backend inherit the
// default backend in the kube-system namespace.
def = &networkingv1beta1.IngressBackend{
ServiceName: "default-http-backend",
ServicePort: intstr.IntOrString{Type: intstr.Int, IntVal: 80},
}
ns = metav1.NamespaceSystem
}
w.Write(LEVEL_0, "Default backend:\t%s (%s)\n", backendStringer(def), i.describeBackend(ns, def))
if len(ing.Spec.TLS) != 0 {
describeIngressTLS(w, ing.Spec.TLS)
}
w.Write(LEVEL_0, "Rules:\n Host\tPath\tBackends\n")
w.Write(LEVEL_1, "----\t----\t--------\n")
count := 0
for _, rules := range ing.Spec.Rules {
if rules.HTTP == nil {
continue
}
count++
host := rules.Host
if len(host) == 0 {
host = "*"
}
w.Write(LEVEL_1, "%s\t\n", host)
for _, path := range rules.HTTP.Paths {
w.Write(LEVEL_2, "\t%s \t%s (%s)\n", path.Path, backendStringer(&path.Backend), i.describeBackend(ing.Namespace, &path.Backend))
}
}
if count == 0 {
w.Write(LEVEL_1, "%s\t%s \t%s (%s)\n", "*", "*", backendStringer(def), i.describeBackend(ns, def))
}
printAnnotationsMultiline(w, "Annotations", ing.Annotations)
if describerSettings.ShowEvents {
events, _ := i.CoreV1().Events(ing.Namespace).Search(scheme.Scheme, ing)
if events != nil {
DescribeEvents(events, w)
}
}
return nil
})
}
func describeIngressTLS(w PrefixWriter, ingTLS []networkingv1beta1.IngressTLS) {
w.Write(LEVEL_0, "TLS:\n")
for _, t := range ingTLS {
if t.SecretName == "" {
w.Write(LEVEL_1, "SNI routes %v\n", strings.Join(t.Hosts, ","))
} else {
w.Write(LEVEL_1, "%v terminates %v\n", t.SecretName, strings.Join(t.Hosts, ","))
}
}
}
// ServiceDescriber generates information about a service.
type ServiceDescriber struct {
clientset.Interface
}
func (d *ServiceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.CoreV1().Services(namespace)
service, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
endpoints, _ := d.CoreV1().Endpoints(namespace).Get(context.TODO(), name, metav1.GetOptions{})
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, service)
}
return describeService(service, endpoints, events)
}
func buildIngressString(ingress []corev1.LoadBalancerIngress) string {
var buffer bytes.Buffer
for i := range ingress {
if i != 0 {
buffer.WriteString(", ")
}
if ingress[i].IP != "" {
buffer.WriteString(ingress[i].IP)
} else {
buffer.WriteString(ingress[i].Hostname)
}
}
return buffer.String()
}
func describeService(service *corev1.Service, endpoints *corev1.Endpoints, events *corev1.EventList) (string, error) {
if endpoints == nil {
endpoints = &corev1.Endpoints{}
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", service.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", service.Namespace)
printLabelsMultiline(w, "Labels", service.Labels)
printAnnotationsMultiline(w, "Annotations", service.Annotations)
w.Write(LEVEL_0, "Selector:\t%s\n", labels.FormatLabels(service.Spec.Selector))
w.Write(LEVEL_0, "Type:\t%s\n", service.Spec.Type)
w.Write(LEVEL_0, "IP:\t%s\n", service.Spec.ClusterIP)
if service.Spec.IPFamily != nil {
w.Write(LEVEL_0, "IPFamily:\t%s\n", *(service.Spec.IPFamily))
}
if len(service.Spec.ExternalIPs) > 0 {
w.Write(LEVEL_0, "External IPs:\t%v\n", strings.Join(service.Spec.ExternalIPs, ","))
}
if service.Spec.LoadBalancerIP != "" {
w.Write(LEVEL_0, "IP:\t%s\n", service.Spec.LoadBalancerIP)
}
if service.Spec.ExternalName != "" {
w.Write(LEVEL_0, "External Name:\t%s\n", service.Spec.ExternalName)
}
if len(service.Status.LoadBalancer.Ingress) > 0 {
list := buildIngressString(service.Status.LoadBalancer.Ingress)
w.Write(LEVEL_0, "LoadBalancer Ingress:\t%s\n", list)
}
for i := range service.Spec.Ports {
sp := &service.Spec.Ports[i]
name := sp.Name
if name == "" {
name = "<unset>"
}
w.Write(LEVEL_0, "Port:\t%s\t%d/%s\n", name, sp.Port, sp.Protocol)
if sp.TargetPort.Type == intstr.Type(intstr.Int) {
w.Write(LEVEL_0, "TargetPort:\t%d/%s\n", sp.TargetPort.IntVal, sp.Protocol)
} else {
w.Write(LEVEL_0, "TargetPort:\t%s/%s\n", sp.TargetPort.StrVal, sp.Protocol)
}
if sp.NodePort != 0 {
w.Write(LEVEL_0, "NodePort:\t%s\t%d/%s\n", name, sp.NodePort, sp.Protocol)
}
w.Write(LEVEL_0, "Endpoints:\t%s\n", formatEndpoints(endpoints, sets.NewString(sp.Name)))
}
w.Write(LEVEL_0, "Session Affinity:\t%s\n", service.Spec.SessionAffinity)
if service.Spec.ExternalTrafficPolicy != "" {
w.Write(LEVEL_0, "External Traffic Policy:\t%s\n", service.Spec.ExternalTrafficPolicy)
}
if service.Spec.HealthCheckNodePort != 0 {
w.Write(LEVEL_0, "HealthCheck NodePort:\t%d\n", service.Spec.HealthCheckNodePort)
}
if len(service.Spec.LoadBalancerSourceRanges) > 0 {
w.Write(LEVEL_0, "LoadBalancer Source Ranges:\t%v\n", strings.Join(service.Spec.LoadBalancerSourceRanges, ","))
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// EndpointsDescriber generates information about an Endpoint.
type EndpointsDescriber struct {
clientset.Interface
}
func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.CoreV1().Endpoints(namespace)
ep, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, ep)
}
return describeEndpoints(ep, events)
}
func describeEndpoints(ep *corev1.Endpoints, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", ep.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", ep.Namespace)
printLabelsMultiline(w, "Labels", ep.Labels)
printAnnotationsMultiline(w, "Annotations", ep.Annotations)
w.Write(LEVEL_0, "Subsets:\n")
for i := range ep.Subsets {
subset := &ep.Subsets[i]
addresses := make([]string, 0, len(subset.Addresses))
for _, addr := range subset.Addresses {
addresses = append(addresses, addr.IP)
}
addressesString := strings.Join(addresses, ",")
if len(addressesString) == 0 {
addressesString = "<none>"
}
w.Write(LEVEL_1, "Addresses:\t%s\n", addressesString)
notReadyAddresses := make([]string, 0, len(subset.NotReadyAddresses))
for _, addr := range subset.NotReadyAddresses {
notReadyAddresses = append(notReadyAddresses, addr.IP)
}
notReadyAddressesString := strings.Join(notReadyAddresses, ",")
if len(notReadyAddressesString) == 0 {
notReadyAddressesString = "<none>"
}
w.Write(LEVEL_1, "NotReadyAddresses:\t%s\n", notReadyAddressesString)
if len(subset.Ports) > 0 {
w.Write(LEVEL_1, "Ports:\n")
w.Write(LEVEL_2, "Name\tPort\tProtocol\n")
w.Write(LEVEL_2, "----\t----\t--------\n")
for _, port := range subset.Ports {
name := port.Name
if len(name) == 0 {
name = "<unset>"
}
w.Write(LEVEL_2, "%s\t%d\t%s\n", name, port.Port, port.Protocol)
}
}
w.Write(LEVEL_0, "\n")
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// EndpointSliceDescriber generates information about an EndpointSlice.
type EndpointSliceDescriber struct {
clientset.Interface
}
func (d *EndpointSliceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.DiscoveryV1beta1().EndpointSlices(namespace)
eps, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, eps)
}
return describeEndpointSlice(eps, events)
}
func describeEndpointSlice(eps *discoveryv1beta1.EndpointSlice, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", eps.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", eps.Namespace)
printLabelsMultiline(w, "Labels", eps.Labels)
printAnnotationsMultiline(w, "Annotations", eps.Annotations)
w.Write(LEVEL_0, "AddressType:\t%s\n", string(eps.AddressType))
if len(eps.Ports) == 0 {
w.Write(LEVEL_0, "Ports: <unset>\n")
} else {
w.Write(LEVEL_0, "Ports:\n")
w.Write(LEVEL_1, "Name\tPort\tProtocol\n")
w.Write(LEVEL_1, "----\t----\t--------\n")
for _, port := range eps.Ports {
portName := "<unset>"
if port.Name != nil && len(*port.Name) > 0 {
portName = *port.Name
}
portNum := "<unset>"
if port.Port != nil {
portNum = strconv.Itoa(int(*port.Port))
}
w.Write(LEVEL_1, "%s\t%s\t%s\n", portName, portNum, *port.Protocol)
}
}
if len(eps.Endpoints) == 0 {
w.Write(LEVEL_0, "Endpoints: <none>\n")
} else {
w.Write(LEVEL_0, "Endpoints:\n")
for i := range eps.Endpoints {
endpoint := &eps.Endpoints[i]
addressesString := strings.Join(endpoint.Addresses, ",")
if len(addressesString) == 0 {
addressesString = "<none>"
}
w.Write(LEVEL_1, "- Addresses:\t%s\n", addressesString)
w.Write(LEVEL_2, "Conditions:\n")
readyText := "<unset>"
if endpoint.Conditions.Ready != nil {
readyText = strconv.FormatBool(*endpoint.Conditions.Ready)
}
w.Write(LEVEL_3, "Ready:\t%s\n", readyText)
hostnameText := "<unset>"
if endpoint.Hostname != nil {
hostnameText = *endpoint.Hostname
}
w.Write(LEVEL_2, "Hostname:\t%s\n", hostnameText)
if endpoint.TargetRef != nil {
w.Write(LEVEL_2, "TargetRef:\t%s/%s\n", endpoint.TargetRef.Kind, endpoint.TargetRef.Name)
}
printLabelsMultilineWithIndent(w, " ", "Topology", "\t", endpoint.Topology, sets.NewString())
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// ServiceAccountDescriber generates information about a service.
type ServiceAccountDescriber struct {
clientset.Interface
}
func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.CoreV1().ServiceAccounts(namespace)
serviceAccount, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
tokens := []corev1.Secret{}
// missingSecrets is the set of all secrets present in the
// serviceAccount but not present in the set of existing secrets.
missingSecrets := sets.NewString()
secrets, err := d.CoreV1().Secrets(namespace).List(context.TODO(), metav1.ListOptions{})
// errors are tolerated here in order to describe the serviceAccount with all
// of the secrets that it references, even if those secrets cannot be fetched.
if err == nil {
// existingSecrets is the set of all secrets remaining on a
// service account that are not present in the "tokens" slice.
existingSecrets := sets.NewString()
for _, s := range secrets.Items {
if s.Type == corev1.SecretTypeServiceAccountToken {
name := s.Annotations[corev1.ServiceAccountNameKey]
uid := s.Annotations[corev1.ServiceAccountUIDKey]
if name == serviceAccount.Name && uid == string(serviceAccount.UID) {
tokens = append(tokens, s)
}
}
existingSecrets.Insert(s.Name)
}
for _, s := range serviceAccount.Secrets {
if !existingSecrets.Has(s.Name) {
missingSecrets.Insert(s.Name)
}
}
for _, s := range serviceAccount.ImagePullSecrets {
if !existingSecrets.Has(s.Name) {
missingSecrets.Insert(s.Name)
}
}
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, serviceAccount)
}
return describeServiceAccount(serviceAccount, tokens, missingSecrets, events)
}
func describeServiceAccount(serviceAccount *corev1.ServiceAccount, tokens []corev1.Secret, missingSecrets sets.String, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", serviceAccount.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", serviceAccount.Namespace)
printLabelsMultiline(w, "Labels", serviceAccount.Labels)
printAnnotationsMultiline(w, "Annotations", serviceAccount.Annotations)
var (
emptyHeader = " "
pullHeader = "Image pull secrets:"
mountHeader = "Mountable secrets: "
tokenHeader = "Tokens: "
pullSecretNames = []string{}
mountSecretNames = []string{}
tokenSecretNames = []string{}
)
for _, s := range serviceAccount.ImagePullSecrets {
pullSecretNames = append(pullSecretNames, s.Name)
}
for _, s := range serviceAccount.Secrets {
mountSecretNames = append(mountSecretNames, s.Name)
}
for _, s := range tokens {
tokenSecretNames = append(tokenSecretNames, s.Name)
}
types := map[string][]string{
pullHeader: pullSecretNames,
mountHeader: mountSecretNames,
tokenHeader: tokenSecretNames,
}
for _, header := range sets.StringKeySet(types).List() {
names := types[header]
if len(names) == 0 {
w.Write(LEVEL_0, "%s\t<none>\n", header)
} else {
prefix := header
for _, name := range names {
if missingSecrets.Has(name) {
w.Write(LEVEL_0, "%s\t%s (not found)\n", prefix, name)
} else {
w.Write(LEVEL_0, "%s\t%s\n", prefix, name)
}
prefix = emptyHeader
}
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// RoleDescriber generates information about a node.
type RoleDescriber struct {
clientset.Interface
}
func (d *RoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
role, err := d.RbacV1().Roles(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
breakdownRules := []rbacv1.PolicyRule{}
for _, rule := range role.Rules {
breakdownRules = append(breakdownRules, rbac.BreakdownRule(rule)...)
}
compactRules, err := rbac.CompactRules(breakdownRules)
if err != nil {
return "", err
}
sort.Stable(rbac.SortableRuleSlice(compactRules))
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", role.Name)
printLabelsMultiline(w, "Labels", role.Labels)
printAnnotationsMultiline(w, "Annotations", role.Annotations)
w.Write(LEVEL_0, "PolicyRule:\n")
w.Write(LEVEL_1, "Resources\tNon-Resource URLs\tResource Names\tVerbs\n")
w.Write(LEVEL_1, "---------\t-----------------\t--------------\t-----\n")
for _, r := range compactRules {
w.Write(LEVEL_1, "%s\t%v\t%v\t%v\n", CombineResourceGroup(r.Resources, r.APIGroups), r.NonResourceURLs, r.ResourceNames, r.Verbs)
}
return nil
})
}
// ClusterRoleDescriber generates information about a node.
type ClusterRoleDescriber struct {
clientset.Interface
}
func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
role, err := d.RbacV1().ClusterRoles().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
breakdownRules := []rbacv1.PolicyRule{}
for _, rule := range role.Rules {
breakdownRules = append(breakdownRules, rbac.BreakdownRule(rule)...)
}
compactRules, err := rbac.CompactRules(breakdownRules)
if err != nil {
return "", err
}
sort.Stable(rbac.SortableRuleSlice(compactRules))
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", role.Name)
printLabelsMultiline(w, "Labels", role.Labels)
printAnnotationsMultiline(w, "Annotations", role.Annotations)
w.Write(LEVEL_0, "PolicyRule:\n")
w.Write(LEVEL_1, "Resources\tNon-Resource URLs\tResource Names\tVerbs\n")
w.Write(LEVEL_1, "---------\t-----------------\t--------------\t-----\n")
for _, r := range compactRules {
w.Write(LEVEL_1, "%s\t%v\t%v\t%v\n", CombineResourceGroup(r.Resources, r.APIGroups), r.NonResourceURLs, r.ResourceNames, r.Verbs)
}
return nil
})
}
func CombineResourceGroup(resource, group []string) string {
if len(resource) == 0 {
return ""
}
parts := strings.SplitN(resource[0], "/", 2)
combine := parts[0]
if len(group) > 0 && group[0] != "" {
combine = combine + "." + group[0]
}
if len(parts) == 2 {
combine = combine + "/" + parts[1]
}
return combine
}
// RoleBindingDescriber generates information about a node.
type RoleBindingDescriber struct {
clientset.Interface
}
func (d *RoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
binding, err := d.RbacV1().RoleBindings(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", binding.Name)
printLabelsMultiline(w, "Labels", binding.Labels)
printAnnotationsMultiline(w, "Annotations", binding.Annotations)
w.Write(LEVEL_0, "Role:\n")
w.Write(LEVEL_1, "Kind:\t%s\n", binding.RoleRef.Kind)
w.Write(LEVEL_1, "Name:\t%s\n", binding.RoleRef.Name)
w.Write(LEVEL_0, "Subjects:\n")
w.Write(LEVEL_1, "Kind\tName\tNamespace\n")
w.Write(LEVEL_1, "----\t----\t---------\n")
for _, s := range binding.Subjects {
w.Write(LEVEL_1, "%s\t%s\t%s\n", s.Kind, s.Name, s.Namespace)
}
return nil
})
}
// ClusterRoleBindingDescriber generates information about a node.
type ClusterRoleBindingDescriber struct {
clientset.Interface
}
func (d *ClusterRoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
binding, err := d.RbacV1().ClusterRoleBindings().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", binding.Name)
printLabelsMultiline(w, "Labels", binding.Labels)
printAnnotationsMultiline(w, "Annotations", binding.Annotations)
w.Write(LEVEL_0, "Role:\n")
w.Write(LEVEL_1, "Kind:\t%s\n", binding.RoleRef.Kind)
w.Write(LEVEL_1, "Name:\t%s\n", binding.RoleRef.Name)
w.Write(LEVEL_0, "Subjects:\n")
w.Write(LEVEL_1, "Kind\tName\tNamespace\n")
w.Write(LEVEL_1, "----\t----\t---------\n")
for _, s := range binding.Subjects {
w.Write(LEVEL_1, "%s\t%s\t%s\n", s.Kind, s.Name, s.Namespace)
}
return nil
})
}
// NodeDescriber generates information about a node.
type NodeDescriber struct {
clientset.Interface
}
func (d *NodeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
mc := d.CoreV1().Nodes()
node, err := mc.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
lease, err := d.CoordinationV1().Leases(corev1.NamespaceNodeLease).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
return "", err
}
// Corresponding Lease object doesn't exist - print it accordingly.
lease = nil
}
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + name + ",status.phase!=" + string(corev1.PodSucceeded) + ",status.phase!=" + string(corev1.PodFailed))
if err != nil {
return "", err
}
// in a policy aware setting, users may have access to a node, but not all pods
// in that case, we note that the user does not have access to the pods
canViewPods := true
nodeNonTerminatedPodsList, err := d.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{FieldSelector: fieldSelector.String()})
if err != nil {
if !errors.IsForbidden(err) {
return "", err
}
canViewPods = false
}
var events *corev1.EventList
if describerSettings.ShowEvents {
if ref, err := reference.GetReference(scheme.Scheme, node); err != nil {
klog.Errorf("Unable to construct reference to '%#v': %v", node, err)
} else {
// TODO: We haven't decided the namespace for Node object yet.
ref.UID = types.UID(ref.Name)
events, _ = d.CoreV1().Events("").Search(scheme.Scheme, ref)
}
}
return describeNode(node, lease, nodeNonTerminatedPodsList, events, canViewPods)
}
func describeNode(node *corev1.Node, lease *coordinationv1.Lease, nodeNonTerminatedPodsList *corev1.PodList, events *corev1.EventList, canViewPods bool) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", node.Name)
if roles := findNodeRoles(node); len(roles) > 0 {
w.Write(LEVEL_0, "Roles:\t%s\n", strings.Join(roles, ","))
} else {
w.Write(LEVEL_0, "Roles:\t%s\n", "<none>")
}
printLabelsMultiline(w, "Labels", node.Labels)
printAnnotationsMultiline(w, "Annotations", node.Annotations)
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", node.CreationTimestamp.Time.Format(time.RFC1123Z))
printNodeTaintsMultiline(w, "Taints", node.Spec.Taints)
w.Write(LEVEL_0, "Unschedulable:\t%v\n", node.Spec.Unschedulable)
w.Write(LEVEL_0, "Lease:\n")
holderIdentity := "<unset>"
if lease != nil && lease.Spec.HolderIdentity != nil {
holderIdentity = *lease.Spec.HolderIdentity
}
w.Write(LEVEL_1, "HolderIdentity:\t%s\n", holderIdentity)
acquireTime := "<unset>"
if lease != nil && lease.Spec.AcquireTime != nil {
acquireTime = lease.Spec.AcquireTime.Time.Format(time.RFC1123Z)
}
w.Write(LEVEL_1, "AcquireTime:\t%s\n", acquireTime)
renewTime := "<unset>"
if lease != nil && lease.Spec.RenewTime != nil {
renewTime = lease.Spec.RenewTime.Time.Format(time.RFC1123Z)
}
w.Write(LEVEL_1, "RenewTime:\t%s\n", renewTime)
if len(node.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tLastHeartbeatTime\tLastTransitionTime\tReason\tMessage\n")
w.Write(LEVEL_1, "----\t------\t-----------------\t------------------\t------\t-------\n")
for _, c := range node.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v \t%s \t%s \t%v \t%v\n",
c.Type,
c.Status,
c.LastHeartbeatTime.Time.Format(time.RFC1123Z),
c.LastTransitionTime.Time.Format(time.RFC1123Z),
c.Reason,
c.Message)
}
}
w.Write(LEVEL_0, "Addresses:\n")
for _, address := range node.Status.Addresses {
w.Write(LEVEL_1, "%s:\t%s\n", address.Type, address.Address)
}
printResourceList := func(resourceList corev1.ResourceList) {
resources := make([]corev1.ResourceName, 0, len(resourceList))
for resource := range resourceList {
resources = append(resources, resource)
}
sort.Sort(SortableResourceNames(resources))
for _, resource := range resources {
value := resourceList[resource]
w.Write(LEVEL_0, " %s:\t%s\n", resource, value.String())
}
}
if len(node.Status.Capacity) > 0 {
w.Write(LEVEL_0, "Capacity:\n")
printResourceList(node.Status.Capacity)
}
if len(node.Status.Allocatable) > 0 {
w.Write(LEVEL_0, "Allocatable:\n")
printResourceList(node.Status.Allocatable)
}
w.Write(LEVEL_0, "System Info:\n")
w.Write(LEVEL_0, " Machine ID:\t%s\n", node.Status.NodeInfo.MachineID)
w.Write(LEVEL_0, " System UUID:\t%s\n", node.Status.NodeInfo.SystemUUID)
w.Write(LEVEL_0, " Boot ID:\t%s\n", node.Status.NodeInfo.BootID)
w.Write(LEVEL_0, " Kernel Version:\t%s\n", node.Status.NodeInfo.KernelVersion)
w.Write(LEVEL_0, " OS Image:\t%s\n", node.Status.NodeInfo.OSImage)
w.Write(LEVEL_0, " Operating System:\t%s\n", node.Status.NodeInfo.OperatingSystem)
w.Write(LEVEL_0, " Architecture:\t%s\n", node.Status.NodeInfo.Architecture)
w.Write(LEVEL_0, " Container Runtime Version:\t%s\n", node.Status.NodeInfo.ContainerRuntimeVersion)
w.Write(LEVEL_0, " Kubelet Version:\t%s\n", node.Status.NodeInfo.KubeletVersion)
w.Write(LEVEL_0, " Kube-Proxy Version:\t%s\n", node.Status.NodeInfo.KubeProxyVersion)
// remove when .PodCIDR is depreciated
if len(node.Spec.PodCIDR) > 0 {
w.Write(LEVEL_0, "PodCIDR:\t%s\n", node.Spec.PodCIDR)
}
if len(node.Spec.PodCIDRs) > 0 {
w.Write(LEVEL_0, "PodCIDRs:\t%s\n", strings.Join(node.Spec.PodCIDRs, ","))
}
if len(node.Spec.ProviderID) > 0 {
w.Write(LEVEL_0, "ProviderID:\t%s\n", node.Spec.ProviderID)
}
if canViewPods && nodeNonTerminatedPodsList != nil {
describeNodeResource(nodeNonTerminatedPodsList, node, w)
} else {
w.Write(LEVEL_0, "Pods:\tnot authorized\n")
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
type StatefulSetDescriber struct {
client clientset.Interface
}
func (p *StatefulSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
ps, err := p.client.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
pc := p.client.CoreV1().Pods(namespace)
selector, err := metav1.LabelSelectorAsSelector(ps.Spec.Selector)
if err != nil {
return "", err
}
running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, ps.UID)
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = p.client.CoreV1().Events(namespace).Search(scheme.Scheme, ps)
}
return describeStatefulSet(ps, selector, events, running, waiting, succeeded, failed)
}
func describeStatefulSet(ps *appsv1.StatefulSet, selector labels.Selector, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", ps.ObjectMeta.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", ps.ObjectMeta.Namespace)
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", ps.CreationTimestamp.Time.Format(time.RFC1123Z))
w.Write(LEVEL_0, "Selector:\t%s\n", selector)
printLabelsMultiline(w, "Labels", ps.Labels)
printAnnotationsMultiline(w, "Annotations", ps.Annotations)
w.Write(LEVEL_0, "Replicas:\t%d desired | %d total\n", *ps.Spec.Replicas, ps.Status.Replicas)
w.Write(LEVEL_0, "Update Strategy:\t%s\n", ps.Spec.UpdateStrategy.Type)
if ps.Spec.UpdateStrategy.RollingUpdate != nil {
ru := ps.Spec.UpdateStrategy.RollingUpdate
if ru.Partition != nil {
w.Write(LEVEL_1, "Partition:\t%d\n", *ru.Partition)
}
}
w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed)
DescribePodTemplate(&ps.Spec.Template, w)
describeVolumeClaimTemplates(ps.Spec.VolumeClaimTemplates, w)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
type CertificateSigningRequestDescriber struct {
client clientset.Interface
}
func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
csr, err := p.client.CertificatesV1beta1().CertificateSigningRequests().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
cr, err := certificate.ParseCSR(csr)
if err != nil {
return "", fmt.Errorf("Error parsing CSR: %v", err)
}
status, err := extractCSRStatus(csr)
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = p.client.CoreV1().Events(namespace).Search(scheme.Scheme, csr)
}
return describeCertificateSigningRequest(csr, cr, status, events)
}
func describeCertificateSigningRequest(csr *certificatesv1beta1.CertificateSigningRequest, cr *x509.CertificateRequest, status string, events *corev1.EventList) (string, error) {
printListHelper := func(w PrefixWriter, prefix, name string, values []string) {
if len(values) == 0 {
return
}
w.Write(LEVEL_0, prefix+name+":\t")
w.Write(LEVEL_0, strings.Join(values, "\n"+prefix+"\t"))
w.Write(LEVEL_0, "\n")
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", csr.Name)
w.Write(LEVEL_0, "Labels:\t%s\n", labels.FormatLabels(csr.Labels))
w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(csr.Annotations))
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", csr.CreationTimestamp.Time.Format(time.RFC1123Z))
w.Write(LEVEL_0, "Requesting User:\t%s\n", csr.Spec.Username)
w.Write(LEVEL_0, "Status:\t%s\n", status)
w.Write(LEVEL_0, "Subject:\n")
w.Write(LEVEL_0, "\tCommon Name:\t%s\n", cr.Subject.CommonName)
w.Write(LEVEL_0, "\tSerial Number:\t%s\n", cr.Subject.SerialNumber)
printListHelper(w, "\t", "Organization", cr.Subject.Organization)
printListHelper(w, "\t", "Organizational Unit", cr.Subject.OrganizationalUnit)
printListHelper(w, "\t", "Country", cr.Subject.Country)
printListHelper(w, "\t", "Locality", cr.Subject.Locality)
printListHelper(w, "\t", "Province", cr.Subject.Province)
printListHelper(w, "\t", "StreetAddress", cr.Subject.StreetAddress)
printListHelper(w, "\t", "PostalCode", cr.Subject.PostalCode)
if len(cr.DNSNames)+len(cr.EmailAddresses)+len(cr.IPAddresses)+len(cr.URIs) > 0 {
w.Write(LEVEL_0, "Subject Alternative Names:\n")
printListHelper(w, "\t", "DNS Names", cr.DNSNames)
printListHelper(w, "\t", "Email Addresses", cr.EmailAddresses)
var uris []string
for _, uri := range cr.URIs {
uris = append(uris, uri.String())
}
printListHelper(w, "\t", "URIs", uris)
var ipaddrs []string
for _, ipaddr := range cr.IPAddresses {
ipaddrs = append(ipaddrs, ipaddr.String())
}
printListHelper(w, "\t", "IP Addresses", ipaddrs)
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// HorizontalPodAutoscalerDescriber generates information about a horizontal pod autoscaler.
type HorizontalPodAutoscalerDescriber struct {
client clientset.Interface
}
func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
var events *corev1.EventList
// autoscaling/v2beta2 is introduced since v1.12 and autoscaling/v1 does not have full backward compatibility
// with autoscaling/v2beta2, so describer will try to get and describe hpa v2beta2 object firstly, if it fails,
// describer will fall back to do with hpa v1 object
hpaV2beta2, err := d.client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err == nil {
if describerSettings.ShowEvents {
events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, hpaV2beta2)
}
return describeHorizontalPodAutoscalerV2beta2(hpaV2beta2, events, d)
}
hpaV1, err := d.client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err == nil {
if describerSettings.ShowEvents {
events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, hpaV1)
}
return describeHorizontalPodAutoscalerV1(hpaV1, events, d)
}
return "", err
}
func describeHorizontalPodAutoscalerV2beta2(hpa *autoscalingv2beta2.HorizontalPodAutoscaler, events *corev1.EventList, d *HorizontalPodAutoscalerDescriber) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", hpa.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", hpa.Namespace)
printLabelsMultiline(w, "Labels", hpa.Labels)
printAnnotationsMultiline(w, "Annotations", hpa.Annotations)
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", hpa.CreationTimestamp.Time.Format(time.RFC1123Z))
w.Write(LEVEL_0, "Reference:\t%s/%s\n",
hpa.Spec.ScaleTargetRef.Kind,
hpa.Spec.ScaleTargetRef.Name)
w.Write(LEVEL_0, "Metrics:\t( current / target )\n")
for i, metric := range hpa.Spec.Metrics {
switch metric.Type {
case autoscalingv2beta2.ExternalMetricSourceType:
if metric.External.Target.AverageValue != nil {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].External != nil &&
hpa.Status.CurrentMetrics[i].External.Current.AverageValue != nil {
current = hpa.Status.CurrentMetrics[i].External.Current.AverageValue.String()
}
w.Write(LEVEL_1, "%q (target average value):\t%s / %s\n", metric.External.Metric.Name, current, metric.External.Target.AverageValue.String())
} else {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].External != nil {
current = hpa.Status.CurrentMetrics[i].External.Current.Value.String()
}
w.Write(LEVEL_1, "%q (target value):\t%s / %s\n", metric.External.Metric.Name, current, metric.External.Target.Value.String())
}
case autoscalingv2beta2.PodsMetricSourceType:
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Pods != nil {
current = hpa.Status.CurrentMetrics[i].Pods.Current.AverageValue.String()
}
w.Write(LEVEL_1, "%q on pods:\t%s / %s\n", metric.Pods.Metric.Name, current, metric.Pods.Target.AverageValue.String())
case autoscalingv2beta2.ObjectMetricSourceType:
w.Write(LEVEL_1, "\"%s\" on %s/%s ", metric.Object.Metric.Name, metric.Object.DescribedObject.Kind, metric.Object.DescribedObject.Name)
if metric.Object.Target.Type == autoscalingv2beta2.AverageValueMetricType {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Object != nil {
current = hpa.Status.CurrentMetrics[i].Object.Current.AverageValue.String()
}
w.Write(LEVEL_0, "(target average value):\t%s / %s\n", current, metric.Object.Target.AverageValue.String())
} else {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Object != nil {
current = hpa.Status.CurrentMetrics[i].Object.Current.Value.String()
}
w.Write(LEVEL_0, "(target value):\t%s / %s\n", current, metric.Object.Target.Value.String())
}
case autoscalingv2beta2.ResourceMetricSourceType:
w.Write(LEVEL_1, "resource %s on pods", string(metric.Resource.Name))
if metric.Resource.Target.AverageValue != nil {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Resource != nil {
current = hpa.Status.CurrentMetrics[i].Resource.Current.AverageValue.String()
}
w.Write(LEVEL_0, ":\t%s / %s\n", current, metric.Resource.Target.AverageValue.String())
} else {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Resource != nil && hpa.Status.CurrentMetrics[i].Resource.Current.AverageUtilization != nil {
current = fmt.Sprintf("%d%% (%s)", *hpa.Status.CurrentMetrics[i].Resource.Current.AverageUtilization, hpa.Status.CurrentMetrics[i].Resource.Current.AverageValue.String())
}
target := "<auto>"
if metric.Resource.Target.AverageUtilization != nil {
target = fmt.Sprintf("%d%%", *metric.Resource.Target.AverageUtilization)
}
w.Write(LEVEL_1, "(as a percentage of request):\t%s / %s\n", current, target)
}
default:
w.Write(LEVEL_1, "<unknown metric type %q>", string(metric.Type))
}
}
minReplicas := "<unset>"
if hpa.Spec.MinReplicas != nil {
minReplicas = fmt.Sprintf("%d", *hpa.Spec.MinReplicas)
}
w.Write(LEVEL_0, "Min replicas:\t%s\n", minReplicas)
w.Write(LEVEL_0, "Max replicas:\t%d\n", hpa.Spec.MaxReplicas)
// only print the hpa behavior if present
if hpa.Spec.Behavior != nil {
w.Write(LEVEL_0, "Behavior:\n")
printDirectionBehavior(w, "Scale Up", hpa.Spec.Behavior.ScaleUp)
printDirectionBehavior(w, "Scale Down", hpa.Spec.Behavior.ScaleDown)
}
w.Write(LEVEL_0, "%s pods:\t", hpa.Spec.ScaleTargetRef.Kind)
w.Write(LEVEL_0, "%d current / %d desired\n", hpa.Status.CurrentReplicas, hpa.Status.DesiredReplicas)
if len(hpa.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n")
w.Write(LEVEL_1, "Type\tStatus\tReason\tMessage\n")
w.Write(LEVEL_1, "----\t------\t------\t-------\n")
for _, c := range hpa.Status.Conditions {
w.Write(LEVEL_1, "%v\t%v\t%v\t%v\n", c.Type, c.Status, c.Reason, c.Message)
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func printDirectionBehavior(w PrefixWriter, direction string, rules *autoscalingv2beta2.HPAScalingRules) {
if rules != nil {
w.Write(LEVEL_1, "%s:\n", direction)
if rules.StabilizationWindowSeconds != nil {
w.Write(LEVEL_2, "Stabilization Window: %d seconds\n", *rules.StabilizationWindowSeconds)
}
if len(rules.Policies) > 0 {
if rules.SelectPolicy != nil {
w.Write(LEVEL_2, "Select Policy: %s\n", *rules.SelectPolicy)
} else {
w.Write(LEVEL_2, "Select Policy: %s\n", autoscalingv2beta2.MaxPolicySelect)
}
w.Write(LEVEL_2, "Policies:\n")
for _, p := range rules.Policies {
w.Write(LEVEL_3, "- Type: %s\tValue: %d\tPeriod: %d seconds\n", p.Type, p.Value, p.PeriodSeconds)
}
}
}
}
func describeHorizontalPodAutoscalerV1(hpa *autoscalingv1.HorizontalPodAutoscaler, events *corev1.EventList, d *HorizontalPodAutoscalerDescriber) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", hpa.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", hpa.Namespace)
printLabelsMultiline(w, "Labels", hpa.Labels)
printAnnotationsMultiline(w, "Annotations", hpa.Annotations)
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", hpa.CreationTimestamp.Time.Format(time.RFC1123Z))
w.Write(LEVEL_0, "Reference:\t%s/%s\n",
hpa.Spec.ScaleTargetRef.Kind,
hpa.Spec.ScaleTargetRef.Name)
if hpa.Spec.TargetCPUUtilizationPercentage != nil {
w.Write(LEVEL_0, "Target CPU utilization:\t%d%%\n", *hpa.Spec.TargetCPUUtilizationPercentage)
current := "<unknown>"
if hpa.Status.CurrentCPUUtilizationPercentage != nil {
current = fmt.Sprintf("%d", *hpa.Status.CurrentCPUUtilizationPercentage)
}
w.Write(LEVEL_0, "Current CPU utilization:\t%s%%\n", current)
}
minReplicas := "<unset>"
if hpa.Spec.MinReplicas != nil {
minReplicas = fmt.Sprintf("%d", *hpa.Spec.MinReplicas)
}
w.Write(LEVEL_0, "Min replicas:\t%s\n", minReplicas)
w.Write(LEVEL_0, "Max replicas:\t%d\n", hpa.Spec.MaxReplicas)
w.Write(LEVEL_0, "%s pods:\t", hpa.Spec.ScaleTargetRef.Kind)
w.Write(LEVEL_0, "%d current / %d desired\n", hpa.Status.CurrentReplicas, hpa.Status.DesiredReplicas)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func describeNodeResource(nodeNonTerminatedPodsList *corev1.PodList, node *corev1.Node, w PrefixWriter) {
w.Write(LEVEL_0, "Non-terminated Pods:\t(%d in total)\n", len(nodeNonTerminatedPodsList.Items))
w.Write(LEVEL_1, "Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\tAGE\n")
w.Write(LEVEL_1, "---------\t----\t\t------------\t----------\t---------------\t-------------\t---\n")
allocatable := node.Status.Capacity
if len(node.Status.Allocatable) > 0 {
allocatable = node.Status.Allocatable
}
for _, pod := range nodeNonTerminatedPodsList.Items {
req, limit := resourcehelper.PodRequestsAndLimits(&pod)
cpuReq, cpuLimit, memoryReq, memoryLimit := req[corev1.ResourceCPU], limit[corev1.ResourceCPU], req[corev1.ResourceMemory], limit[corev1.ResourceMemory]
fractionCpuReq := float64(cpuReq.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100
fractionCpuLimit := float64(cpuLimit.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100
fractionMemoryReq := float64(memoryReq.Value()) / float64(allocatable.Memory().Value()) * 100
fractionMemoryLimit := float64(memoryLimit.Value()) / float64(allocatable.Memory().Value()) * 100
w.Write(LEVEL_1, "%s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s\n", pod.Namespace, pod.Name,
cpuReq.String(), int64(fractionCpuReq), cpuLimit.String(), int64(fractionCpuLimit),
memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit), translateTimestampSince(pod.CreationTimestamp))
}
w.Write(LEVEL_0, "Allocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n")
w.Write(LEVEL_1, "Resource\tRequests\tLimits\n")
w.Write(LEVEL_1, "--------\t--------\t------\n")
reqs, limits := getPodsTotalRequestsAndLimits(nodeNonTerminatedPodsList)
cpuReqs, cpuLimits, memoryReqs, memoryLimits, ephemeralstorageReqs, ephemeralstorageLimits :=
reqs[corev1.ResourceCPU], limits[corev1.ResourceCPU], reqs[corev1.ResourceMemory], limits[corev1.ResourceMemory], reqs[corev1.ResourceEphemeralStorage], limits[corev1.ResourceEphemeralStorage]
fractionCpuReqs := float64(0)
fractionCpuLimits := float64(0)
if allocatable.Cpu().MilliValue() != 0 {
fractionCpuReqs = float64(cpuReqs.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100
fractionCpuLimits = float64(cpuLimits.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100
}
fractionMemoryReqs := float64(0)
fractionMemoryLimits := float64(0)
if allocatable.Memory().Value() != 0 {
fractionMemoryReqs = float64(memoryReqs.Value()) / float64(allocatable.Memory().Value()) * 100
fractionMemoryLimits = float64(memoryLimits.Value()) / float64(allocatable.Memory().Value()) * 100
}
fractionEphemeralStorageReqs := float64(0)
fractionEphemeralStorageLimits := float64(0)
if allocatable.StorageEphemeral().Value() != 0 {
fractionEphemeralStorageReqs = float64(ephemeralstorageReqs.Value()) / float64(allocatable.StorageEphemeral().Value()) * 100
fractionEphemeralStorageLimits = float64(ephemeralstorageLimits.Value()) / float64(allocatable.StorageEphemeral().Value()) * 100
}
w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n",
corev1.ResourceCPU, cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits))
w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n",
corev1.ResourceMemory, memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits))
w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n",
corev1.ResourceEphemeralStorage, ephemeralstorageReqs.String(), int64(fractionEphemeralStorageReqs), ephemeralstorageLimits.String(), int64(fractionEphemeralStorageLimits))
extResources := make([]string, 0, len(allocatable))
hugePageResources := make([]string, 0, len(allocatable))
for resource := range allocatable {
if resourcehelper.IsHugePageResourceName(resource) {
hugePageResources = append(hugePageResources, string(resource))
} else if !resourcehelper.IsStandardContainerResourceName(string(resource)) && resource != corev1.ResourcePods {
extResources = append(extResources, string(resource))
}
}
sort.Strings(extResources)
sort.Strings(hugePageResources)
for _, resource := range hugePageResources {
hugePageSizeRequests, hugePageSizeLimits, hugePageSizeAllocable := reqs[corev1.ResourceName(resource)], limits[corev1.ResourceName(resource)], allocatable[corev1.ResourceName(resource)]
fractionHugePageSizeRequests := float64(0)
fractionHugePageSizeLimits := float64(0)
if hugePageSizeAllocable.Value() != 0 {
fractionHugePageSizeRequests = float64(hugePageSizeRequests.Value()) / float64(hugePageSizeAllocable.Value()) * 100
fractionHugePageSizeLimits = float64(hugePageSizeLimits.Value()) / float64(hugePageSizeAllocable.Value()) * 100
}
w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n",
resource, hugePageSizeRequests.String(), int64(fractionHugePageSizeRequests), hugePageSizeLimits.String(), int64(fractionHugePageSizeLimits))
}
for _, ext := range extResources {
extRequests, extLimits := reqs[corev1.ResourceName(ext)], limits[corev1.ResourceName(ext)]
w.Write(LEVEL_1, "%s\t%s\t%s\n", ext, extRequests.String(), extLimits.String())
}
}
func getPodsTotalRequestsAndLimits(podList *corev1.PodList) (reqs map[corev1.ResourceName]resource.Quantity, limits map[corev1.ResourceName]resource.Quantity) {
reqs, limits = map[corev1.ResourceName]resource.Quantity{}, map[corev1.ResourceName]resource.Quantity{}
for _, pod := range podList.Items {
podReqs, podLimits := resourcehelper.PodRequestsAndLimits(&pod)
for podReqName, podReqValue := range podReqs {
if value, ok := reqs[podReqName]; !ok {
reqs[podReqName] = podReqValue.DeepCopy()
} else {
value.Add(podReqValue)
reqs[podReqName] = value
}
}
for podLimitName, podLimitValue := range podLimits {
if value, ok := limits[podLimitName]; !ok {
limits[podLimitName] = podLimitValue.DeepCopy()
} else {
value.Add(podLimitValue)
limits[podLimitName] = value
}
}
}
return
}
func DescribeEvents(el *corev1.EventList, w PrefixWriter) {
if len(el.Items) == 0 {
w.Write(LEVEL_0, "Events:\t<none>\n")
return
}
w.Flush()
sort.Sort(event.SortableEvents(el.Items))
w.Write(LEVEL_0, "Events:\n Type\tReason\tAge\tFrom\tMessage\n")
w.Write(LEVEL_1, "----\t------\t----\t----\t-------\n")
for _, e := range el.Items {
var interval string
if e.Count > 1 {
interval = fmt.Sprintf("%s (x%d over %s)", translateTimestampSince(e.LastTimestamp), e.Count, translateTimestampSince(e.FirstTimestamp))
} else {
interval = translateTimestampSince(e.FirstTimestamp)
}
w.Write(LEVEL_1, "%v\t%v\t%s\t%v\t%v\n",
e.Type,
e.Reason,
interval,
formatEventSource(e.Source),
strings.TrimSpace(e.Message),
)
}
}
// DeploymentDescriber generates information about a deployment.
type DeploymentDescriber struct {
client clientset.Interface
}
func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
d, err := dd.client.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = dd.client.CoreV1().Events(namespace).Search(scheme.Scheme, d)
}
return describeDeployment(d, selector, d, events, dd)
}
func describeDeployment(d *appsv1.Deployment, selector labels.Selector, internalDeployment *appsv1.Deployment, events *corev1.EventList, dd *DeploymentDescriber) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", d.ObjectMeta.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", d.ObjectMeta.Namespace)
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", d.CreationTimestamp.Time.Format(time.RFC1123Z))
printLabelsMultiline(w, "Labels", d.Labels)
printAnnotationsMultiline(w, "Annotations", d.Annotations)
w.Write(LEVEL_0, "Selector:\t%s\n", selector)
w.Write(LEVEL_0, "Replicas:\t%d desired | %d updated | %d total | %d available | %d unavailable\n", *(d.Spec.Replicas), d.Status.UpdatedReplicas, d.Status.Replicas, d.Status.AvailableReplicas, d.Status.UnavailableReplicas)
w.Write(LEVEL_0, "StrategyType:\t%s\n", d.Spec.Strategy.Type)
w.Write(LEVEL_0, "MinReadySeconds:\t%d\n", d.Spec.MinReadySeconds)
if d.Spec.Strategy.RollingUpdate != nil {
ru := d.Spec.Strategy.RollingUpdate
w.Write(LEVEL_0, "RollingUpdateStrategy:\t%s max unavailable, %s max surge\n", ru.MaxUnavailable.String(), ru.MaxSurge.String())
}
DescribePodTemplate(&internalDeployment.Spec.Template, w)
if len(d.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n")
w.Write(LEVEL_1, "----\t------\t------\n")
for _, c := range d.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason)
}
}
oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd.client.AppsV1())
if err == nil {
w.Write(LEVEL_0, "OldReplicaSets:\t%s\n", printReplicaSetsByLabels(oldRSs))
var newRSs []*appsv1.ReplicaSet
if newRS != nil {
newRSs = append(newRSs, newRS)
}
w.Write(LEVEL_0, "NewReplicaSet:\t%s\n", printReplicaSetsByLabels(newRSs))
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func printReplicaSetsByLabels(matchingRSs []*appsv1.ReplicaSet) string {
// Format the matching ReplicaSets into strings.
rsStrings := make([]string, 0, len(matchingRSs))
for _, rs := range matchingRSs {
rsStrings = append(rsStrings, fmt.Sprintf("%s (%d/%d replicas created)", rs.Name, rs.Status.Replicas, *rs.Spec.Replicas))
}
list := strings.Join(rsStrings, ", ")
if list == "" {
return "<none>"
}
return list
}
func getPodStatusForController(c corev1client.PodInterface, selector labels.Selector, uid types.UID) (running, waiting, succeeded, failed int, err error) {
options := metav1.ListOptions{LabelSelector: selector.String()}
rcPods, err := c.List(context.TODO(), options)
if err != nil {
return
}
for _, pod := range rcPods.Items {
controllerRef := metav1.GetControllerOf(&pod)
// Skip pods that are orphans or owned by other controllers.
if controllerRef == nil || controllerRef.UID != uid {
continue
}
switch pod.Status.Phase {
case corev1.PodRunning:
running++
case corev1.PodPending:
waiting++
case corev1.PodSucceeded:
succeeded++
case corev1.PodFailed:
failed++
}
}
return
}
// ConfigMapDescriber generates information about a ConfigMap
type ConfigMapDescriber struct {
clientset.Interface
}
func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.CoreV1().ConfigMaps(namespace)
configMap, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", configMap.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", configMap.Namespace)
printLabelsMultiline(w, "Labels", configMap.Labels)
printAnnotationsMultiline(w, "Annotations", configMap.Annotations)
w.Write(LEVEL_0, "\nData\n====\n")
for k, v := range configMap.Data {
w.Write(LEVEL_0, "%s:\n----\n", k)
w.Write(LEVEL_0, "%s\n", string(v))
}
if describerSettings.ShowEvents {
events, err := d.CoreV1().Events(namespace).Search(scheme.Scheme, configMap)
if err != nil {
return err
}
if events != nil {
DescribeEvents(events, w)
}
}
return nil
})
}
// NetworkPolicyDescriber generates information about a networkingv1.NetworkPolicy
type NetworkPolicyDescriber struct {
clientset.Interface
}
func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.NetworkingV1().NetworkPolicies(namespace)
networkPolicy, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return describeNetworkPolicy(networkPolicy)
}
func describeNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", networkPolicy.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", networkPolicy.Namespace)
w.Write(LEVEL_0, "Created on:\t%s\n", networkPolicy.CreationTimestamp)
printLabelsMultiline(w, "Labels", networkPolicy.Labels)
printAnnotationsMultiline(w, "Annotations", networkPolicy.Annotations)
describeNetworkPolicySpec(networkPolicy.Spec, w)
return nil
})
}
func describeNetworkPolicySpec(nps networkingv1.NetworkPolicySpec, w PrefixWriter) {
w.Write(LEVEL_0, "Spec:\n")
w.Write(LEVEL_1, "PodSelector: ")
if len(nps.PodSelector.MatchLabels) == 0 && len(nps.PodSelector.MatchExpressions) == 0 {
w.Write(LEVEL_2, "<none> (Allowing the specific traffic to all pods in this namespace)\n")
} else {
w.Write(LEVEL_2, "%s\n", metav1.FormatLabelSelector(&nps.PodSelector))
}
ingressEnabled, egressEnabled := getPolicyType(nps)
if ingressEnabled {
w.Write(LEVEL_1, "Allowing ingress traffic:\n")
printNetworkPolicySpecIngressFrom(nps.Ingress, " ", w)
} else {
w.Write(LEVEL_1, "Not affecting ingress traffic\n")
}
if egressEnabled {
w.Write(LEVEL_1, "Allowing egress traffic:\n")
printNetworkPolicySpecEgressTo(nps.Egress, " ", w)
} else {
w.Write(LEVEL_1, "Not affecting egress traffic\n")
}
w.Write(LEVEL_1, "Policy Types: %v\n", policyTypesToString(nps.PolicyTypes))
}
func getPolicyType(nps networkingv1.NetworkPolicySpec) (bool, bool) {
var ingress, egress bool
for _, pt := range nps.PolicyTypes {
switch pt {
case networkingv1.PolicyTypeIngress:
ingress = true
case networkingv1.PolicyTypeEgress:
egress = true
}
}
return ingress, egress
}
func printNetworkPolicySpecIngressFrom(npirs []networkingv1.NetworkPolicyIngressRule, initialIndent string, w PrefixWriter) {
if len(npirs) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "<none> (Selected pods are isolated for ingress connectivity)")
return
}
for i, npir := range npirs {
if len(npir.Ports) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: <any> (traffic allowed to all ports)")
} else {
for _, port := range npir.Ports {
var proto corev1.Protocol
if port.Protocol != nil {
proto = *port.Protocol
} else {
proto = corev1.ProtocolTCP
}
w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto)
}
}
if len(npir.From) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "From: <any> (traffic not restricted by source)")
} else {
for _, from := range npir.From {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "From:")
if from.PodSelector != nil && from.NamespaceSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(from.NamespaceSelector))
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(from.PodSelector))
} else if from.PodSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(from.PodSelector))
} else if from.NamespaceSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(from.NamespaceSelector))
} else if from.IPBlock != nil {
w.Write(LEVEL_1, "%sIPBlock:\n", initialIndent)
w.Write(LEVEL_2, "%sCIDR: %s\n", initialIndent, from.IPBlock.CIDR)
w.Write(LEVEL_2, "%sExcept: %v\n", initialIndent, strings.Join(from.IPBlock.Except, ", "))
}
}
}
if i != len(npirs)-1 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "----------")
}
}
}
func printNetworkPolicySpecEgressTo(npers []networkingv1.NetworkPolicyEgressRule, initialIndent string, w PrefixWriter) {
if len(npers) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "<none> (Selected pods are isolated for egress connectivity)")
return
}
for i, nper := range npers {
if len(nper.Ports) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: <any> (traffic allowed to all ports)")
} else {
for _, port := range nper.Ports {
var proto corev1.Protocol
if port.Protocol != nil {
proto = *port.Protocol
} else {
proto = corev1.ProtocolTCP
}
w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto)
}
}
if len(nper.To) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "To: <any> (traffic not restricted by source)")
} else {
for _, to := range nper.To {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "To:")
if to.PodSelector != nil && to.NamespaceSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(to.NamespaceSelector))
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(to.PodSelector))
} else if to.PodSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(to.PodSelector))
} else if to.NamespaceSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(to.NamespaceSelector))
} else if to.IPBlock != nil {
w.Write(LEVEL_1, "%sIPBlock:\n", initialIndent)
w.Write(LEVEL_2, "%sCIDR: %s\n", initialIndent, to.IPBlock.CIDR)
w.Write(LEVEL_2, "%sExcept: %v\n", initialIndent, strings.Join(to.IPBlock.Except, ", "))
}
}
}
if i != len(npers)-1 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "----------")
}
}
}
type StorageClassDescriber struct {
clientset.Interface
}
func (s *StorageClassDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
sc, err := s.StorageV1().StorageClasses().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = s.CoreV1().Events(namespace).Search(scheme.Scheme, sc)
}
return describeStorageClass(sc, events)
}
func describeStorageClass(sc *storagev1.StorageClass, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", sc.Name)
w.Write(LEVEL_0, "IsDefaultClass:\t%s\n", storageutil.IsDefaultAnnotationText(sc.ObjectMeta))
w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(sc.Annotations))
w.Write(LEVEL_0, "Provisioner:\t%s\n", sc.Provisioner)
w.Write(LEVEL_0, "Parameters:\t%s\n", labels.FormatLabels(sc.Parameters))
w.Write(LEVEL_0, "AllowVolumeExpansion:\t%s\n", printBoolPtr(sc.AllowVolumeExpansion))
if len(sc.MountOptions) == 0 {
w.Write(LEVEL_0, "MountOptions:\t<none>\n")
} else {
w.Write(LEVEL_0, "MountOptions:\n")
for _, option := range sc.MountOptions {
w.Write(LEVEL_1, "%s\n", option)
}
}
if sc.ReclaimPolicy != nil {
w.Write(LEVEL_0, "ReclaimPolicy:\t%s\n", *sc.ReclaimPolicy)
}
if sc.VolumeBindingMode != nil {
w.Write(LEVEL_0, "VolumeBindingMode:\t%s\n", *sc.VolumeBindingMode)
}
if sc.AllowedTopologies != nil {
printAllowedTopologies(w, sc.AllowedTopologies)
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
type CSINodeDescriber struct {
clientset.Interface
}
func (c *CSINodeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
csi, err := c.StorageV1().CSINodes().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = c.CoreV1().Events(namespace).Search(scheme.Scheme, csi)
}
return describeCSINode(csi, events)
}
func describeCSINode(csi *storagev1.CSINode, events *corev1.EventList) (output string, err error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", csi.GetName())
printLabelsMultiline(w, "Labels", csi.GetLabels())
printAnnotationsMultiline(w, "Annotations", csi.GetAnnotations())
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", csi.CreationTimestamp.Time.Format(time.RFC1123Z))
w.Write(LEVEL_0, "Spec:\n")
if csi.Spec.Drivers != nil {
w.Write(LEVEL_1, "Drivers:\n")
for _, driver := range csi.Spec.Drivers {
w.Write(LEVEL_2, "%s:\n", driver.Name)
w.Write(LEVEL_3, "Allocatables:\n")
w.Write(LEVEL_4, "Count:\t%d\n", *driver.Allocatable.Count)
w.Write(LEVEL_3, "Node ID:\t%s\n", driver.NodeID)
w.Write(LEVEL_3, "Topology Keys:\t%s\n", driver.TopologyKeys)
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func printAllowedTopologies(w PrefixWriter, topologies []corev1.TopologySelectorTerm) {
w.Write(LEVEL_0, "AllowedTopologies:\t")
if len(topologies) == 0 {
w.WriteLine("<none>")
return
}
w.WriteLine("")
for i, term := range topologies {
printTopologySelectorTermsMultilineWithIndent(w, LEVEL_1, fmt.Sprintf("Term %d", i), "\t", term.MatchLabelExpressions)
}
}
func printTopologySelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []corev1.TopologySelectorLabelRequirement) {
w.Write(indentLevel, "%s:%s", title, innerIndent)
if len(reqs) == 0 {
w.WriteLine("<none>")
return
}
for i, req := range reqs {
if i != 0 {
w.Write(indentLevel, "%s", innerIndent)
}
exprStr := fmt.Sprintf("%s %s", req.Key, "in")
if len(req.Values) > 0 {
exprStr = fmt.Sprintf("%s [%s]", exprStr, strings.Join(req.Values, ", "))
}
w.Write(LEVEL_0, "%s\n", exprStr)
}
}
type PodDisruptionBudgetDescriber struct {
clientset.Interface
}
func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
pdb, err := p.PolicyV1beta1().PodDisruptionBudgets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = p.CoreV1().Events(namespace).Search(scheme.Scheme, pdb)
}
return describePodDisruptionBudget(pdb, events)
}
func describePodDisruptionBudget(pdb *policyv1beta1.PodDisruptionBudget, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", pdb.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", pdb.Namespace)
if pdb.Spec.MinAvailable != nil {
w.Write(LEVEL_0, "Min available:\t%s\n", pdb.Spec.MinAvailable.String())
} else if pdb.Spec.MaxUnavailable != nil {
w.Write(LEVEL_0, "Max unavailable:\t%s\n", pdb.Spec.MaxUnavailable.String())
}
if pdb.Spec.Selector != nil {
w.Write(LEVEL_0, "Selector:\t%s\n", metav1.FormatLabelSelector(pdb.Spec.Selector))
} else {
w.Write(LEVEL_0, "Selector:\t<unset>\n")
}
w.Write(LEVEL_0, "Status:\n")
w.Write(LEVEL_2, "Allowed disruptions:\t%d\n", pdb.Status.DisruptionsAllowed)
w.Write(LEVEL_2, "Current:\t%d\n", pdb.Status.CurrentHealthy)
w.Write(LEVEL_2, "Desired:\t%d\n", pdb.Status.DesiredHealthy)
w.Write(LEVEL_2, "Total:\t%d\n", pdb.Status.ExpectedPods)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// PriorityClassDescriber generates information about a PriorityClass.
type PriorityClassDescriber struct {
clientset.Interface
}
func (s *PriorityClassDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
pc, err := s.SchedulingV1().PriorityClasses().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = s.CoreV1().Events(namespace).Search(scheme.Scheme, pc)
}
return describePriorityClass(pc, events)
}
func describePriorityClass(pc *schedulingv1.PriorityClass, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", pc.Name)
w.Write(LEVEL_0, "Value:\t%v\n", pc.Value)
w.Write(LEVEL_0, "GlobalDefault:\t%v\n", pc.GlobalDefault)
w.Write(LEVEL_0, "Description:\t%s\n", pc.Description)
w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(pc.Annotations))
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// PodSecurityPolicyDescriber generates information about a PodSecuritypolicyv1beta1.
type PodSecurityPolicyDescriber struct {
clientset.Interface
}
func (d *PodSecurityPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
psp, err := d.PolicyV1beta1().PodSecurityPolicies().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return describePodSecurityPolicy(psp)
}
func describePodSecurityPolicy(psp *policyv1beta1.PodSecurityPolicy) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", psp.Name)
w.Write(LEVEL_0, "\nSettings:\n")
w.Write(LEVEL_1, "Allow Privileged:\t%t\n", psp.Spec.Privileged)
if psp.Spec.AllowPrivilegeEscalation != nil {
w.Write(LEVEL_1, "Allow Privilege Escalation:\t%t\n", *psp.Spec.AllowPrivilegeEscalation)
} else {
w.Write(LEVEL_1, "Allow Privilege Escalation:\t<unset>\n")
}
w.Write(LEVEL_1, "Default Add Capabilities:\t%v\n", capsToString(psp.Spec.DefaultAddCapabilities))
w.Write(LEVEL_1, "Required Drop Capabilities:\t%s\n", capsToString(psp.Spec.RequiredDropCapabilities))
w.Write(LEVEL_1, "Allowed Capabilities:\t%s\n", capsToString(psp.Spec.AllowedCapabilities))
w.Write(LEVEL_1, "Allowed Volume Types:\t%s\n", fsTypeToString(psp.Spec.Volumes))
if len(psp.Spec.AllowedFlexVolumes) > 0 {
w.Write(LEVEL_1, "Allowed FlexVolume Types:\t%s\n", flexVolumesToString(psp.Spec.AllowedFlexVolumes))
}
if len(psp.Spec.AllowedCSIDrivers) > 0 {
w.Write(LEVEL_1, "Allowed CSI Drivers:\t%s\n", csiDriversToString(psp.Spec.AllowedCSIDrivers))
}
if len(psp.Spec.AllowedUnsafeSysctls) > 0 {
w.Write(LEVEL_1, "Allowed Unsafe Sysctls:\t%s\n", sysctlsToString(psp.Spec.AllowedUnsafeSysctls))
}
if len(psp.Spec.ForbiddenSysctls) > 0 {
w.Write(LEVEL_1, "Forbidden Sysctls:\t%s\n", sysctlsToString(psp.Spec.ForbiddenSysctls))
}
w.Write(LEVEL_1, "Allow Host Network:\t%t\n", psp.Spec.HostNetwork)
w.Write(LEVEL_1, "Allow Host Ports:\t%s\n", hostPortRangeToString(psp.Spec.HostPorts))
w.Write(LEVEL_1, "Allow Host PID:\t%t\n", psp.Spec.HostPID)
w.Write(LEVEL_1, "Allow Host IPC:\t%t\n", psp.Spec.HostIPC)
w.Write(LEVEL_1, "Read Only Root Filesystem:\t%v\n", psp.Spec.ReadOnlyRootFilesystem)
w.Write(LEVEL_1, "SELinux Context Strategy: %s\t\n", string(psp.Spec.SELinux.Rule))
var user, role, seLinuxType, level string
if psp.Spec.SELinux.SELinuxOptions != nil {
user = psp.Spec.SELinux.SELinuxOptions.User
role = psp.Spec.SELinux.SELinuxOptions.Role
seLinuxType = psp.Spec.SELinux.SELinuxOptions.Type
level = psp.Spec.SELinux.SELinuxOptions.Level
}
w.Write(LEVEL_2, "User:\t%s\n", stringOrNone(user))
w.Write(LEVEL_2, "Role:\t%s\n", stringOrNone(role))
w.Write(LEVEL_2, "Type:\t%s\n", stringOrNone(seLinuxType))
w.Write(LEVEL_2, "Level:\t%s\n", stringOrNone(level))
w.Write(LEVEL_1, "Run As User Strategy: %s\t\n", string(psp.Spec.RunAsUser.Rule))
w.Write(LEVEL_2, "Ranges:\t%s\n", idRangeToString(psp.Spec.RunAsUser.Ranges))
w.Write(LEVEL_1, "FSGroup Strategy: %s\t\n", string(psp.Spec.FSGroup.Rule))
w.Write(LEVEL_2, "Ranges:\t%s\n", idRangeToString(psp.Spec.FSGroup.Ranges))
w.Write(LEVEL_1, "Supplemental Groups Strategy: %s\t\n", string(psp.Spec.SupplementalGroups.Rule))
w.Write(LEVEL_2, "Ranges:\t%s\n", idRangeToString(psp.Spec.SupplementalGroups.Ranges))
return nil
})
}
func stringOrNone(s string) string {
return stringOrDefaultValue(s, "<none>")
}
func stringOrDefaultValue(s, defaultValue string) string {
if len(s) > 0 {
return s
}
return defaultValue
}
func fsTypeToString(volumes []policyv1beta1.FSType) string {
strVolumes := []string{}
for _, v := range volumes {
strVolumes = append(strVolumes, string(v))
}
return stringOrNone(strings.Join(strVolumes, ","))
}
func flexVolumesToString(flexVolumes []policyv1beta1.AllowedFlexVolume) string {
volumes := []string{}
for _, flexVolume := range flexVolumes {
volumes = append(volumes, "driver="+flexVolume.Driver)
}
return stringOrDefaultValue(strings.Join(volumes, ","), "<all>")
}
func csiDriversToString(csiDrivers []policyv1beta1.AllowedCSIDriver) string {
drivers := []string{}
for _, csiDriver := range csiDrivers {
drivers = append(drivers, "driver="+csiDriver.Name)
}
return stringOrDefaultValue(strings.Join(drivers, ","), "<all>")
}
func sysctlsToString(sysctls []string) string {
return stringOrNone(strings.Join(sysctls, ","))
}
func hostPortRangeToString(ranges []policyv1beta1.HostPortRange) string {
formattedString := ""
if ranges != nil {
strRanges := []string{}
for _, r := range ranges {
strRanges = append(strRanges, fmt.Sprintf("%d-%d", r.Min, r.Max))
}
formattedString = strings.Join(strRanges, ",")
}
return stringOrNone(formattedString)
}
func idRangeToString(ranges []policyv1beta1.IDRange) string {
formattedString := ""
if ranges != nil {
strRanges := []string{}
for _, r := range ranges {
strRanges = append(strRanges, fmt.Sprintf("%d-%d", r.Min, r.Max))
}
formattedString = strings.Join(strRanges, ",")
}
return stringOrNone(formattedString)
}
func capsToString(caps []corev1.Capability) string {
formattedString := ""
if caps != nil {
strCaps := []string{}
for _, c := range caps {
strCaps = append(strCaps, string(c))
}
formattedString = strings.Join(strCaps, ",")
}
return stringOrNone(formattedString)
}
func policyTypesToString(pts []networkingv1.PolicyType) string {
formattedString := ""
if pts != nil {
strPts := []string{}
for _, p := range pts {
strPts = append(strPts, string(p))
}
formattedString = strings.Join(strPts, ", ")
}
return stringOrNone(formattedString)
}
// newErrNoDescriber creates a new ErrNoDescriber with the names of the provided types.
func newErrNoDescriber(types ...reflect.Type) error {
names := make([]string, 0, len(types))
for _, t := range types {
names = append(names, t.String())
}
return describe.ErrNoDescriber{Types: names}
}
// Describers implements ObjectDescriber against functions registered via Add. Those functions can
// be strongly typed. Types are exactly matched (no conversion or assignable checks).
type Describers struct {
searchFns map[reflect.Type][]typeFunc
}
// DescribeObject implements ObjectDescriber and will attempt to print the provided object to a string,
// if at least one describer function has been registered with the exact types passed, or if any
// describer can print the exact object in its first argument (the remainder will be provided empty
// values). If no function registered with Add can satisfy the passed objects, an ErrNoDescriber will
// be returned
// TODO: reorder and partial match extra.
func (d *Describers) DescribeObject(exact interface{}, extra ...interface{}) (string, error) {
exactType := reflect.TypeOf(exact)
fns, ok := d.searchFns[exactType]
if !ok {
return "", newErrNoDescriber(exactType)
}
if len(extra) == 0 {
for _, typeFn := range fns {
if len(typeFn.Extra) == 0 {
return typeFn.Describe(exact, extra...)
}
}
typeFn := fns[0]
for _, t := range typeFn.Extra {
v := reflect.New(t).Elem()
extra = append(extra, v.Interface())
}
return fns[0].Describe(exact, extra...)
}
types := make([]reflect.Type, 0, len(extra))
for _, obj := range extra {
types = append(types, reflect.TypeOf(obj))
}
for _, typeFn := range fns {
if typeFn.Matches(types) {
return typeFn.Describe(exact, extra...)
}
}
return "", newErrNoDescriber(append([]reflect.Type{exactType}, types...)...)
}
// Add adds one or more describer functions to the describe.Describer. The passed function must
// match the signature:
//
// func(...) (string, error)
//
// Any number of arguments may be provided.
func (d *Describers) Add(fns ...interface{}) error {
for _, fn := range fns {
fv := reflect.ValueOf(fn)
ft := fv.Type()
if ft.Kind() != reflect.Func {
return fmt.Errorf("expected func, got: %v", ft)
}
numIn := ft.NumIn()
if numIn == 0 {
return fmt.Errorf("expected at least one 'in' params, got: %v", ft)
}
if ft.NumOut() != 2 {
return fmt.Errorf("expected two 'out' params - (string, error), got: %v", ft)
}
types := make([]reflect.Type, 0, numIn)
for i := 0; i < numIn; i++ {
types = append(types, ft.In(i))
}
if ft.Out(0) != reflect.TypeOf(string("")) {
return fmt.Errorf("expected string return, got: %v", ft)
}
var forErrorType error
// This convolution is necessary, otherwise TypeOf picks up on the fact
// that forErrorType is nil.
errorType := reflect.TypeOf(&forErrorType).Elem()
if ft.Out(1) != errorType {
return fmt.Errorf("expected error return, got: %v", ft)
}
exact := types[0]
extra := types[1:]
if d.searchFns == nil {
d.searchFns = make(map[reflect.Type][]typeFunc)
}
fns := d.searchFns[exact]
fn := typeFunc{Extra: extra, Fn: fv}
fns = append(fns, fn)
d.searchFns[exact] = fns
}
return nil
}
// typeFunc holds information about a describer function and the types it accepts
type typeFunc struct {
Extra []reflect.Type
Fn reflect.Value
}
// Matches returns true when the passed types exactly match the Extra list.
func (fn typeFunc) Matches(types []reflect.Type) bool {
if len(fn.Extra) != len(types) {
return false
}
// reorder the items in array types and fn.Extra
// convert the type into string and sort them, check if they are matched
varMap := make(map[reflect.Type]bool)
for i := range fn.Extra {
varMap[fn.Extra[i]] = true
}
for i := range types {
if _, found := varMap[types[i]]; !found {
return false
}
}
return true
}
// Describe invokes the nested function with the exact number of arguments.
func (fn typeFunc) Describe(exact interface{}, extra ...interface{}) (string, error) {
values := []reflect.Value{reflect.ValueOf(exact)}
for _, obj := range extra {
values = append(values, reflect.ValueOf(obj))
}
out := fn.Fn.Call(values)
s := out[0].Interface().(string)
var err error
if !out[1].IsNil() {
err = out[1].Interface().(error)
}
return s, err
}
// printLabelsMultiline prints multiple labels with a proper alignment.
func printLabelsMultiline(w PrefixWriter, title string, labels map[string]string) {
printLabelsMultilineWithIndent(w, "", title, "\t", labels, sets.NewString())
}
// printLabelsMultiline prints multiple labels with a user-defined alignment.
func printLabelsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, labels map[string]string, skip sets.String) {
w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent)
if len(labels) == 0 {
w.WriteLine("<none>")
return
}
// to print labels in the sorted order
keys := make([]string, 0, len(labels))
for key := range labels {
if skip.Has(key) {
continue
}
keys = append(keys, key)
}
if len(keys) == 0 {
w.WriteLine("<none>")
return
}
sort.Strings(keys)
for i, key := range keys {
if i != 0 {
w.Write(LEVEL_0, "%s", initialIndent)
w.Write(LEVEL_0, "%s", innerIndent)
}
w.Write(LEVEL_0, "%s=%s\n", key, labels[key])
i++
}
}
// printTaintsMultiline prints multiple taints with a proper alignment.
func printNodeTaintsMultiline(w PrefixWriter, title string, taints []corev1.Taint) {
printTaintsMultilineWithIndent(w, "", title, "\t", taints)
}
// printTaintsMultilineWithIndent prints multiple taints with a user-defined alignment.
func printTaintsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, taints []corev1.Taint) {
w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent)
if len(taints) == 0 {
w.WriteLine("<none>")
return
}
// to print taints in the sorted order
sort.Slice(taints, func(i, j int) bool {
cmpKey := func(taint corev1.Taint) string {
return string(taint.Effect) + "," + taint.Key
}
return cmpKey(taints[i]) < cmpKey(taints[j])
})
for i, taint := range taints {
if i != 0 {
w.Write(LEVEL_0, "%s", initialIndent)
w.Write(LEVEL_0, "%s", innerIndent)
}
w.Write(LEVEL_0, "%s\n", taint.ToString())
}
}
// printPodsMultiline prints multiple pods with a proper alignment.
func printPodsMultiline(w PrefixWriter, title string, pods []corev1.Pod) {
printPodsMultilineWithIndent(w, "", title, "\t", pods)
}
// printPodsMultilineWithIndent prints multiple pods with a user-defined alignment.
func printPodsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, pods []corev1.Pod) {
w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent)
if len(pods) == 0 {
w.WriteLine("<none>")
return
}
// to print pods in the sorted order
sort.Slice(pods, func(i, j int) bool {
cmpKey := func(pod corev1.Pod) string {
return pod.Name
}
return cmpKey(pods[i]) < cmpKey(pods[j])
})
for i, pod := range pods {
if i != 0 {
w.Write(LEVEL_0, "%s", initialIndent)
w.Write(LEVEL_0, "%s", innerIndent)
}
w.Write(LEVEL_0, "%s\n", pod.Name)
}
}
// printPodTolerationsMultiline prints multiple tolerations with a proper alignment.
func printPodTolerationsMultiline(w PrefixWriter, title string, tolerations []corev1.Toleration) {
printTolerationsMultilineWithIndent(w, "", title, "\t", tolerations)
}
// printTolerationsMultilineWithIndent prints multiple tolerations with a user-defined alignment.
func printTolerationsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, tolerations []corev1.Toleration) {
w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent)
if len(tolerations) == 0 {
w.WriteLine("<none>")
return
}
// to print tolerations in the sorted order
sort.Slice(tolerations, func(i, j int) bool {
return tolerations[i].Key < tolerations[j].Key
})
for i, toleration := range tolerations {
if i != 0 {
w.Write(LEVEL_0, "%s", initialIndent)
w.Write(LEVEL_0, "%s", innerIndent)
}
w.Write(LEVEL_0, "%s", toleration.Key)
if len(toleration.Value) != 0 {
w.Write(LEVEL_0, "=%s", toleration.Value)
}
if len(toleration.Effect) != 0 {
w.Write(LEVEL_0, ":%s", toleration.Effect)
}
if toleration.TolerationSeconds != nil {
w.Write(LEVEL_0, " for %ds", *toleration.TolerationSeconds)
}
w.Write(LEVEL_0, "\n")
}
}
type flusher interface {
Flush()
}
func tabbedString(f func(io.Writer) error) (string, error) {
out := new(tabwriter.Writer)
buf := &bytes.Buffer{}
out.Init(buf, 0, 8, 2, ' ', 0)
err := f(out)
if err != nil {
return "", err
}
out.Flush()
str := string(buf.String())
return str, nil
}
type SortableResourceNames []corev1.ResourceName
func (list SortableResourceNames) Len() int {
return len(list)
}
func (list SortableResourceNames) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
func (list SortableResourceNames) Less(i, j int) bool {
return list[i] < list[j]
}
// SortedResourceNames returns the sorted resource names of a resource list.
func SortedResourceNames(list corev1.ResourceList) []corev1.ResourceName {
resources := make([]corev1.ResourceName, 0, len(list))
for res := range list {
resources = append(resources, res)
}
sort.Sort(SortableResourceNames(resources))
return resources
}
type SortableResourceQuotas []corev1.ResourceQuota
func (list SortableResourceQuotas) Len() int {
return len(list)
}
func (list SortableResourceQuotas) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
func (list SortableResourceQuotas) Less(i, j int) bool {
return list[i].Name < list[j].Name
}
type SortableVolumeMounts []corev1.VolumeMount
func (list SortableVolumeMounts) Len() int {
return len(list)
}
func (list SortableVolumeMounts) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
func (list SortableVolumeMounts) Less(i, j int) bool {
return list[i].MountPath < list[j].MountPath
}
type SortableVolumeDevices []corev1.VolumeDevice
func (list SortableVolumeDevices) Len() int {
return len(list)
}
func (list SortableVolumeDevices) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
func (list SortableVolumeDevices) Less(i, j int) bool {
return list[i].DevicePath < list[j].DevicePath
}
var maxAnnotationLen = 140
// printAnnotationsMultilineWithFilter prints filtered multiple annotations with a proper alignment.
func printAnnotationsMultilineWithFilter(w PrefixWriter, title string, annotations map[string]string, skip sets.String) {
printAnnotationsMultilineWithIndent(w, "", title, "\t", annotations, skip)
}
// printAnnotationsMultiline prints multiple annotations with a proper alignment.
func printAnnotationsMultiline(w PrefixWriter, title string, annotations map[string]string) {
printAnnotationsMultilineWithIndent(w, "", title, "\t", annotations, sets.NewString())
}
// printAnnotationsMultilineWithIndent prints multiple annotations with a user-defined alignment.
// If annotation string is too long, we omit chars more than 200 length.
func printAnnotationsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, annotations map[string]string, skip sets.String) {
w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent)
if len(annotations) == 0 {
w.WriteLine("<none>")
return
}
// to print labels in the sorted order
keys := make([]string, 0, len(annotations))
for key := range annotations {
if skip.Has(key) {
continue
}
keys = append(keys, key)
}
if len(annotations) == 0 {
w.WriteLine("<none>")
return
}
sort.Strings(keys)
indent := initialIndent + innerIndent
for i, key := range keys {
if i != 0 {
w.Write(LEVEL_0, indent)
}
value := strings.TrimSuffix(annotations[key], "\n")
if (len(value)+len(key)+2) > maxAnnotationLen || strings.Contains(value, "\n") {
w.Write(LEVEL_0, "%s:\n", key)
for _, s := range strings.Split(value, "\n") {
w.Write(LEVEL_0, "%s %s\n", indent, shorten(s, maxAnnotationLen-2))
}
} else {
w.Write(LEVEL_0, "%s: %s\n", key, value)
}
i++
}
}
func shorten(s string, maxLength int) string {
if len(s) > maxLength {
return s[:maxLength] + "..."
}
return s
}
// translateTimestampSince returns the elapsed time since timestamp in
// human-readable approximation.
func translateTimestampSince(timestamp metav1.Time) string {
if timestamp.IsZero() {
return "<unknown>"
}
return duration.HumanDuration(time.Since(timestamp.Time))
}
// formatEventSource formats EventSource as a comma separated string excluding Host when empty
func formatEventSource(es corev1.EventSource) string {
EventSourceString := []string{es.Component}
if len(es.Host) > 0 {
EventSourceString = append(EventSourceString, es.Host)
}
return strings.Join(EventSourceString, ", ")
}
// Pass ports=nil for all ports.
func formatEndpoints(endpoints *corev1.Endpoints, ports sets.String) string {
if len(endpoints.Subsets) == 0 {
return "<none>"
}
list := []string{}
max := 3
more := false
count := 0
for i := range endpoints.Subsets {
ss := &endpoints.Subsets[i]
if len(ss.Ports) == 0 {
// It's possible to have headless services with no ports.
for i := range ss.Addresses {
if len(list) == max {
more = true
}
if !more {
list = append(list, ss.Addresses[i].IP)
}
count++
}
} else {
// "Normal" services with ports defined.
for i := range ss.Ports {
port := &ss.Ports[i]
if ports == nil || ports.Has(port.Name) {
for i := range ss.Addresses {
if len(list) == max {
more = true
}
addr := &ss.Addresses[i]
if !more {
hostPort := net.JoinHostPort(addr.IP, strconv.Itoa(int(port.Port)))
list = append(list, hostPort)
}
count++
}
}
}
}
}
ret := strings.Join(list, ",")
if more {
return fmt.Sprintf("%s + %d more...", ret, count-max)
}
return ret
}
func extractCSRStatus(csr *certificatesv1beta1.CertificateSigningRequest) (string, error) {
var approved, denied bool
for _, c := range csr.Status.Conditions {
switch c.Type {
case certificatesv1beta1.CertificateApproved:
approved = true
case certificatesv1beta1.CertificateDenied:
denied = true
default:
return "", fmt.Errorf("unknown csr condition %q", c)
}
}
var status string
// must be in order of presidence
if denied {
status += "Denied"
} else if approved {
status += "Approved"
} else {
status += "Pending"
}
if len(csr.Status.Certificate) > 0 {
status += ",Issued"
}
return status, nil
}
// backendStringer behaves just like a string interface and converts the given backend to a string.
func backendStringer(backend *networkingv1beta1.IngressBackend) string {
if backend == nil {
return ""
}
return fmt.Sprintf("%v:%v", backend.ServiceName, backend.ServicePort.String())
}
// findNodeRoles returns the roles of a given node.
// The roles are determined by looking for:
// * a node-role.kubernetes.io/<role>="" label
// * a kubernetes.io/role="<role>" label
func findNodeRoles(node *corev1.Node) []string {
roles := sets.NewString()
for k, v := range node.Labels {
switch {
case strings.HasPrefix(k, describe.LabelNodeRolePrefix):
if role := strings.TrimPrefix(k, describe.LabelNodeRolePrefix); len(role) > 0 {
roles.Insert(role)
}
case k == describe.NodeLabelRole && v != "":
roles.Insert(v)
}
}
return roles.List()
}
// loadBalancerStatusStringer behaves mostly like a string interface and converts the given status to a string.
// `wide` indicates whether the returned value is meant for --o=wide output. If not, it's clipped to 16 bytes.
func loadBalancerStatusStringer(s corev1.LoadBalancerStatus, wide bool) string {
ingress := s.Ingress
result := sets.NewString()
for i := range ingress {
if ingress[i].IP != "" {
result.Insert(ingress[i].IP)
} else if ingress[i].Hostname != "" {
result.Insert(ingress[i].Hostname)
}
}
r := strings.Join(result.List(), ",")
if !wide && len(r) > describe.LoadBalancerWidth {
r = r[0:(describe.LoadBalancerWidth-3)] + "..."
}
return r
}
Add signerName field to CSR resource spec
Signed-off-by: James Munnelly <james.munnelly@jetstack.io>
Kubernetes-commit: a983356caa9876079eafc5e8dd7806473022e958
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package versioned
import (
"bytes"
"context"
"crypto/x509"
"fmt"
"io"
"net"
"net/url"
"reflect"
"sort"
"strconv"
"strings"
"text/tabwriter"
"time"
"unicode"
"github.com/fatih/camelcase"
appsv1 "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/duration"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/reference"
"k8s.io/klog"
"k8s.io/kubectl/pkg/describe"
"k8s.io/kubectl/pkg/scheme"
"k8s.io/kubectl/pkg/util/certificate"
deploymentutil "k8s.io/kubectl/pkg/util/deployment"
"k8s.io/kubectl/pkg/util/event"
"k8s.io/kubectl/pkg/util/fieldpath"
"k8s.io/kubectl/pkg/util/qos"
"k8s.io/kubectl/pkg/util/rbac"
resourcehelper "k8s.io/kubectl/pkg/util/resource"
"k8s.io/kubectl/pkg/util/slice"
storageutil "k8s.io/kubectl/pkg/util/storage"
)
// Each level has 2 spaces for PrefixWriter
const (
LEVEL_0 = iota
LEVEL_1
LEVEL_2
LEVEL_3
LEVEL_4
)
// DescriberFn gives a way to easily override the function for unit testing if needed
var DescriberFn describe.DescriberFunc = Describer
// Describer returns a Describer for displaying the specified RESTMapping type or an error.
func Describer(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (describe.Describer, error) {
clientConfig, err := restClientGetter.ToRESTConfig()
if err != nil {
return nil, err
}
// try to get a describer
if describer, ok := DescriberFor(mapping.GroupVersionKind.GroupKind(), clientConfig); ok {
return describer, nil
}
// if this is a kind we don't have a describer for yet, go generic if possible
if genericDescriber, ok := GenericDescriberFor(mapping, clientConfig); ok {
return genericDescriber, nil
}
// otherwise return an unregistered error
return nil, fmt.Errorf("no description has been implemented for %s", mapping.GroupVersionKind.String())
}
// PrefixWriter can write text at various indentation levels.
type PrefixWriter interface {
// Write writes text with the specified indentation level.
Write(level int, format string, a ...interface{})
// WriteLine writes an entire line with no indentation level.
WriteLine(a ...interface{})
// Flush forces indentation to be reset.
Flush()
}
// prefixWriter implements PrefixWriter
type prefixWriter struct {
out io.Writer
}
var _ PrefixWriter = &prefixWriter{}
// NewPrefixWriter creates a new PrefixWriter.
func NewPrefixWriter(out io.Writer) PrefixWriter {
return &prefixWriter{out: out}
}
func (pw *prefixWriter) Write(level int, format string, a ...interface{}) {
levelSpace := " "
prefix := ""
for i := 0; i < level; i++ {
prefix += levelSpace
}
fmt.Fprintf(pw.out, prefix+format, a...)
}
func (pw *prefixWriter) WriteLine(a ...interface{}) {
fmt.Fprintln(pw.out, a...)
}
func (pw *prefixWriter) Flush() {
if f, ok := pw.out.(flusher); ok {
f.Flush()
}
}
func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]describe.Describer, error) {
c, err := clientset.NewForConfig(clientConfig)
if err != nil {
return nil, err
}
m := map[schema.GroupKind]describe.Describer{
{Group: corev1.GroupName, Kind: "Pod"}: &PodDescriber{c},
{Group: corev1.GroupName, Kind: "ReplicationController"}: &ReplicationControllerDescriber{c},
{Group: corev1.GroupName, Kind: "Secret"}: &SecretDescriber{c},
{Group: corev1.GroupName, Kind: "Service"}: &ServiceDescriber{c},
{Group: corev1.GroupName, Kind: "ServiceAccount"}: &ServiceAccountDescriber{c},
{Group: corev1.GroupName, Kind: "Node"}: &NodeDescriber{c},
{Group: corev1.GroupName, Kind: "LimitRange"}: &LimitRangeDescriber{c},
{Group: corev1.GroupName, Kind: "ResourceQuota"}: &ResourceQuotaDescriber{c},
{Group: corev1.GroupName, Kind: "PersistentVolume"}: &PersistentVolumeDescriber{c},
{Group: corev1.GroupName, Kind: "PersistentVolumeClaim"}: &PersistentVolumeClaimDescriber{c},
{Group: corev1.GroupName, Kind: "Namespace"}: &NamespaceDescriber{c},
{Group: corev1.GroupName, Kind: "Endpoints"}: &EndpointsDescriber{c},
{Group: corev1.GroupName, Kind: "ConfigMap"}: &ConfigMapDescriber{c},
{Group: corev1.GroupName, Kind: "PriorityClass"}: &PriorityClassDescriber{c},
{Group: discoveryv1beta1.GroupName, Kind: "EndpointSlice"}: &EndpointSliceDescriber{c},
{Group: extensionsv1beta1.GroupName, Kind: "ReplicaSet"}: &ReplicaSetDescriber{c},
{Group: extensionsv1beta1.GroupName, Kind: "NetworkPolicy"}: &NetworkPolicyDescriber{c},
{Group: extensionsv1beta1.GroupName, Kind: "PodSecurityPolicy"}: &PodSecurityPolicyDescriber{c},
{Group: autoscalingv2beta2.GroupName, Kind: "HorizontalPodAutoscaler"}: &HorizontalPodAutoscalerDescriber{c},
{Group: extensionsv1beta1.GroupName, Kind: "DaemonSet"}: &DaemonSetDescriber{c},
{Group: extensionsv1beta1.GroupName, Kind: "Deployment"}: &DeploymentDescriber{c},
{Group: extensionsv1beta1.GroupName, Kind: "Ingress"}: &IngressDescriber{c},
{Group: networkingv1beta1.GroupName, Kind: "Ingress"}: &IngressDescriber{c},
{Group: batchv1.GroupName, Kind: "Job"}: &JobDescriber{c},
{Group: batchv1.GroupName, Kind: "CronJob"}: &CronJobDescriber{c},
{Group: appsv1.GroupName, Kind: "StatefulSet"}: &StatefulSetDescriber{c},
{Group: appsv1.GroupName, Kind: "Deployment"}: &DeploymentDescriber{c},
{Group: appsv1.GroupName, Kind: "DaemonSet"}: &DaemonSetDescriber{c},
{Group: appsv1.GroupName, Kind: "ReplicaSet"}: &ReplicaSetDescriber{c},
{Group: certificatesv1beta1.GroupName, Kind: "CertificateSigningRequest"}: &CertificateSigningRequestDescriber{c},
{Group: storagev1.GroupName, Kind: "StorageClass"}: &StorageClassDescriber{c},
{Group: storagev1.GroupName, Kind: "CSINode"}: &CSINodeDescriber{c},
{Group: policyv1beta1.GroupName, Kind: "PodDisruptionBudget"}: &PodDisruptionBudgetDescriber{c},
{Group: rbacv1.GroupName, Kind: "Role"}: &RoleDescriber{c},
{Group: rbacv1.GroupName, Kind: "ClusterRole"}: &ClusterRoleDescriber{c},
{Group: rbacv1.GroupName, Kind: "RoleBinding"}: &RoleBindingDescriber{c},
{Group: rbacv1.GroupName, Kind: "ClusterRoleBinding"}: &ClusterRoleBindingDescriber{c},
{Group: networkingv1.GroupName, Kind: "NetworkPolicy"}: &NetworkPolicyDescriber{c},
{Group: schedulingv1.GroupName, Kind: "PriorityClass"}: &PriorityClassDescriber{c},
}
return m, nil
}
// DescriberFor returns the default describe functions for each of the standard
// Kubernetes types.
func DescriberFor(kind schema.GroupKind, clientConfig *rest.Config) (describe.Describer, bool) {
describers, err := describerMap(clientConfig)
if err != nil {
klog.V(1).Info(err)
return nil, false
}
f, ok := describers[kind]
return f, ok
}
// GenericDescriberFor returns a generic describer for the specified mapping
// that uses only information available from runtime.Unstructured
func GenericDescriberFor(mapping *meta.RESTMapping, clientConfig *rest.Config) (describe.Describer, bool) {
// used to fetch the resource
dynamicClient, err := dynamic.NewForConfig(clientConfig)
if err != nil {
return nil, false
}
// used to get events for the resource
clientSet, err := clientset.NewForConfig(clientConfig)
if err != nil {
return nil, false
}
eventsClient := clientSet.CoreV1()
return &genericDescriber{mapping, dynamicClient, eventsClient}, true
}
type genericDescriber struct {
mapping *meta.RESTMapping
dynamic dynamic.Interface
events corev1client.EventsGetter
}
func (g *genericDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (output string, err error) {
obj, err := g.dynamic.Resource(g.mapping.Resource).Namespace(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = g.events.Events(namespace).Search(scheme.Scheme, obj)
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", obj.GetName())
w.Write(LEVEL_0, "Namespace:\t%s\n", obj.GetNamespace())
printLabelsMultiline(w, "Labels", obj.GetLabels())
printAnnotationsMultiline(w, "Annotations", obj.GetAnnotations())
printUnstructuredContent(w, LEVEL_0, obj.UnstructuredContent(), "", ".metadata.name", ".metadata.namespace", ".metadata.labels", ".metadata.annotations")
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func printUnstructuredContent(w PrefixWriter, level int, content map[string]interface{}, skipPrefix string, skip ...string) {
fields := []string{}
for field := range content {
fields = append(fields, field)
}
sort.Strings(fields)
for _, field := range fields {
value := content[field]
switch typedValue := value.(type) {
case map[string]interface{}:
skipExpr := fmt.Sprintf("%s.%s", skipPrefix, field)
if slice.ContainsString(skip, skipExpr, nil) {
continue
}
w.Write(level, "%s:\n", smartLabelFor(field))
printUnstructuredContent(w, level+1, typedValue, skipExpr, skip...)
case []interface{}:
skipExpr := fmt.Sprintf("%s.%s", skipPrefix, field)
if slice.ContainsString(skip, skipExpr, nil) {
continue
}
w.Write(level, "%s:\n", smartLabelFor(field))
for _, child := range typedValue {
switch typedChild := child.(type) {
case map[string]interface{}:
printUnstructuredContent(w, level+1, typedChild, skipExpr, skip...)
default:
w.Write(level+1, "%v\n", typedChild)
}
}
default:
skipExpr := fmt.Sprintf("%s.%s", skipPrefix, field)
if slice.ContainsString(skip, skipExpr, nil) {
continue
}
w.Write(level, "%s:\t%v\n", smartLabelFor(field), typedValue)
}
}
}
func smartLabelFor(field string) string {
// skip creating smart label if field name contains
// special characters other than '-'
if strings.IndexFunc(field, func(r rune) bool {
return !unicode.IsLetter(r) && r != '-'
}) != -1 {
return field
}
commonAcronyms := []string{"API", "URL", "UID", "OSB", "GUID"}
parts := camelcase.Split(field)
result := make([]string, 0, len(parts))
for _, part := range parts {
if part == "_" {
continue
}
if slice.ContainsString(commonAcronyms, strings.ToUpper(part), nil) {
part = strings.ToUpper(part)
} else {
part = strings.Title(part)
}
result = append(result, part)
}
return strings.Join(result, " ")
}
// DefaultObjectDescriber can describe the default Kubernetes objects.
var DefaultObjectDescriber describe.ObjectDescriber
func init() {
d := &Describers{}
err := d.Add(
describeLimitRange,
describeQuota,
describePod,
describeService,
describeReplicationController,
describeDaemonSet,
describeNode,
describeNamespace,
)
if err != nil {
klog.Fatalf("Cannot register describers: %v", err)
}
DefaultObjectDescriber = d
}
// NamespaceDescriber generates information about a namespace
type NamespaceDescriber struct {
clientset.Interface
}
func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
ns, err := d.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
resourceQuotaList, err := d.CoreV1().ResourceQuotas(name).List(context.TODO(), metav1.ListOptions{})
if err != nil {
if errors.IsNotFound(err) {
// Server does not support resource quotas.
// Not an error, will not show resource quotas information.
resourceQuotaList = nil
} else {
return "", err
}
}
limitRangeList, err := d.CoreV1().LimitRanges(name).List(context.TODO(), metav1.ListOptions{})
if err != nil {
if errors.IsNotFound(err) {
// Server does not support limit ranges.
// Not an error, will not show limit ranges information.
limitRangeList = nil
} else {
return "", err
}
}
return describeNamespace(ns, resourceQuotaList, limitRangeList)
}
func describeNamespace(namespace *corev1.Namespace, resourceQuotaList *corev1.ResourceQuotaList, limitRangeList *corev1.LimitRangeList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", namespace.Name)
printLabelsMultiline(w, "Labels", namespace.Labels)
printAnnotationsMultiline(w, "Annotations", namespace.Annotations)
w.Write(LEVEL_0, "Status:\t%s\n", string(namespace.Status.Phase))
if resourceQuotaList != nil {
w.Write(LEVEL_0, "\n")
DescribeResourceQuotas(resourceQuotaList, w)
}
if limitRangeList != nil {
w.Write(LEVEL_0, "\n")
DescribeLimitRanges(limitRangeList, w)
}
return nil
})
}
func describeLimitRangeSpec(spec corev1.LimitRangeSpec, prefix string, w PrefixWriter) {
for i := range spec.Limits {
item := spec.Limits[i]
maxResources := item.Max
minResources := item.Min
defaultLimitResources := item.Default
defaultRequestResources := item.DefaultRequest
ratio := item.MaxLimitRequestRatio
set := map[corev1.ResourceName]bool{}
for k := range maxResources {
set[k] = true
}
for k := range minResources {
set[k] = true
}
for k := range defaultLimitResources {
set[k] = true
}
for k := range defaultRequestResources {
set[k] = true
}
for k := range ratio {
set[k] = true
}
for k := range set {
// if no value is set, we output -
maxValue := "-"
minValue := "-"
defaultLimitValue := "-"
defaultRequestValue := "-"
ratioValue := "-"
maxQuantity, maxQuantityFound := maxResources[k]
if maxQuantityFound {
maxValue = maxQuantity.String()
}
minQuantity, minQuantityFound := minResources[k]
if minQuantityFound {
minValue = minQuantity.String()
}
defaultLimitQuantity, defaultLimitQuantityFound := defaultLimitResources[k]
if defaultLimitQuantityFound {
defaultLimitValue = defaultLimitQuantity.String()
}
defaultRequestQuantity, defaultRequestQuantityFound := defaultRequestResources[k]
if defaultRequestQuantityFound {
defaultRequestValue = defaultRequestQuantity.String()
}
ratioQuantity, ratioQuantityFound := ratio[k]
if ratioQuantityFound {
ratioValue = ratioQuantity.String()
}
msg := "%s%s\t%v\t%v\t%v\t%v\t%v\t%v\n"
w.Write(LEVEL_0, msg, prefix, item.Type, k, minValue, maxValue, defaultRequestValue, defaultLimitValue, ratioValue)
}
}
}
// DescribeLimitRanges merges a set of limit range items into a single tabular description
func DescribeLimitRanges(limitRanges *corev1.LimitRangeList, w PrefixWriter) {
if len(limitRanges.Items) == 0 {
w.Write(LEVEL_0, "No LimitRange resource.\n")
return
}
w.Write(LEVEL_0, "Resource Limits\n Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n")
w.Write(LEVEL_0, " ----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n")
for _, limitRange := range limitRanges.Items {
describeLimitRangeSpec(limitRange.Spec, " ", w)
}
}
// DescribeResourceQuotas merges a set of quota items into a single tabular description of all quotas
func DescribeResourceQuotas(quotas *corev1.ResourceQuotaList, w PrefixWriter) {
if len(quotas.Items) == 0 {
w.Write(LEVEL_0, "No resource quota.\n")
return
}
sort.Sort(SortableResourceQuotas(quotas.Items))
w.Write(LEVEL_0, "Resource Quotas")
for _, q := range quotas.Items {
w.Write(LEVEL_0, "\n Name:\t%s\n", q.Name)
if len(q.Spec.Scopes) > 0 {
scopes := make([]string, 0, len(q.Spec.Scopes))
for _, scope := range q.Spec.Scopes {
scopes = append(scopes, string(scope))
}
sort.Strings(scopes)
w.Write(LEVEL_0, " Scopes:\t%s\n", strings.Join(scopes, ", "))
for _, scope := range scopes {
helpText := helpTextForResourceQuotaScope(corev1.ResourceQuotaScope(scope))
if len(helpText) > 0 {
w.Write(LEVEL_0, " * %s\n", helpText)
}
}
}
w.Write(LEVEL_0, " Resource\tUsed\tHard\n")
w.Write(LEVEL_0, " --------\t---\t---\n")
resources := make([]corev1.ResourceName, 0, len(q.Status.Hard))
for resource := range q.Status.Hard {
resources = append(resources, resource)
}
sort.Sort(SortableResourceNames(resources))
for _, resource := range resources {
hardQuantity := q.Status.Hard[resource]
usedQuantity := q.Status.Used[resource]
w.Write(LEVEL_0, " %s\t%s\t%s\n", string(resource), usedQuantity.String(), hardQuantity.String())
}
}
}
// LimitRangeDescriber generates information about a limit range
type LimitRangeDescriber struct {
clientset.Interface
}
func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
lr := d.CoreV1().LimitRanges(namespace)
limitRange, err := lr.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return describeLimitRange(limitRange)
}
func describeLimitRange(limitRange *corev1.LimitRange) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", limitRange.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", limitRange.Namespace)
w.Write(LEVEL_0, "Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n")
w.Write(LEVEL_0, "----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n")
describeLimitRangeSpec(limitRange.Spec, "", w)
return nil
})
}
// ResourceQuotaDescriber generates information about a resource quota
type ResourceQuotaDescriber struct {
clientset.Interface
}
func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
rq := d.CoreV1().ResourceQuotas(namespace)
resourceQuota, err := rq.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return describeQuota(resourceQuota)
}
func helpTextForResourceQuotaScope(scope corev1.ResourceQuotaScope) string {
switch scope {
case corev1.ResourceQuotaScopeTerminating:
return "Matches all pods that have an active deadline. These pods have a limited lifespan on a node before being actively terminated by the system."
case corev1.ResourceQuotaScopeNotTerminating:
return "Matches all pods that do not have an active deadline. These pods usually include long running pods whose container command is not expected to terminate."
case corev1.ResourceQuotaScopeBestEffort:
return "Matches all pods that do not have resource requirements set. These pods have a best effort quality of service."
case corev1.ResourceQuotaScopeNotBestEffort:
return "Matches all pods that have at least one resource requirement set. These pods have a burstable or guaranteed quality of service."
default:
return ""
}
}
func describeQuota(resourceQuota *corev1.ResourceQuota) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", resourceQuota.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", resourceQuota.Namespace)
if len(resourceQuota.Spec.Scopes) > 0 {
scopes := make([]string, 0, len(resourceQuota.Spec.Scopes))
for _, scope := range resourceQuota.Spec.Scopes {
scopes = append(scopes, string(scope))
}
sort.Strings(scopes)
w.Write(LEVEL_0, "Scopes:\t%s\n", strings.Join(scopes, ", "))
for _, scope := range scopes {
helpText := helpTextForResourceQuotaScope(corev1.ResourceQuotaScope(scope))
if len(helpText) > 0 {
w.Write(LEVEL_0, " * %s\n", helpText)
}
}
}
w.Write(LEVEL_0, "Resource\tUsed\tHard\n")
w.Write(LEVEL_0, "--------\t----\t----\n")
resources := make([]corev1.ResourceName, 0, len(resourceQuota.Status.Hard))
for resource := range resourceQuota.Status.Hard {
resources = append(resources, resource)
}
sort.Sort(SortableResourceNames(resources))
msg := "%v\t%v\t%v\n"
for i := range resources {
resource := resources[i]
hardQuantity := resourceQuota.Status.Hard[resource]
usedQuantity := resourceQuota.Status.Used[resource]
w.Write(LEVEL_0, msg, resource, usedQuantity.String(), hardQuantity.String())
}
return nil
})
}
// PodDescriber generates information about a pod and the replication controllers that
// create it.
type PodDescriber struct {
clientset.Interface
}
func (d *PodDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
pod, err := d.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
if describerSettings.ShowEvents {
eventsInterface := d.CoreV1().Events(namespace)
selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil)
options := metav1.ListOptions{FieldSelector: selector.String()}
events, err2 := eventsInterface.List(context.TODO(), options)
if describerSettings.ShowEvents && err2 == nil && len(events.Items) > 0 {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Pod '%v': error '%v', but found events.\n", name, err)
DescribeEvents(events, w)
return nil
})
}
}
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
if ref, err := reference.GetReference(scheme.Scheme, pod); err != nil {
klog.Errorf("Unable to construct reference to '%#v': %v", pod, err)
} else {
ref.Kind = ""
if _, isMirrorPod := pod.Annotations[corev1.MirrorPodAnnotationKey]; isMirrorPod {
ref.UID = types.UID(pod.Annotations[corev1.MirrorPodAnnotationKey])
}
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, ref)
}
}
return describePod(pod, events)
}
func describePod(pod *corev1.Pod, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", pod.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", pod.Namespace)
if pod.Spec.Priority != nil {
w.Write(LEVEL_0, "Priority:\t%d\n", *pod.Spec.Priority)
}
if len(pod.Spec.PriorityClassName) > 0 {
w.Write(LEVEL_0, "Priority Class Name:\t%s\n", stringOrNone(pod.Spec.PriorityClassName))
}
if pod.Spec.NodeName == "" {
w.Write(LEVEL_0, "Node:\t<none>\n")
} else {
w.Write(LEVEL_0, "Node:\t%s\n", pod.Spec.NodeName+"/"+pod.Status.HostIP)
}
if pod.Status.StartTime != nil {
w.Write(LEVEL_0, "Start Time:\t%s\n", pod.Status.StartTime.Time.Format(time.RFC1123Z))
}
printLabelsMultiline(w, "Labels", pod.Labels)
printAnnotationsMultiline(w, "Annotations", pod.Annotations)
if pod.DeletionTimestamp != nil {
w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampSince(*pod.DeletionTimestamp))
w.Write(LEVEL_0, "Termination Grace Period:\t%ds\n", *pod.DeletionGracePeriodSeconds)
} else {
w.Write(LEVEL_0, "Status:\t%s\n", string(pod.Status.Phase))
}
if len(pod.Status.Reason) > 0 {
w.Write(LEVEL_0, "Reason:\t%s\n", pod.Status.Reason)
}
if len(pod.Status.Message) > 0 {
w.Write(LEVEL_0, "Message:\t%s\n", pod.Status.Message)
}
// remove when .IP field is depreciated
w.Write(LEVEL_0, "IP:\t%s\n", pod.Status.PodIP)
describePodIPs(pod, w, "")
if controlledBy := printController(pod); len(controlledBy) > 0 {
w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy)
}
if len(pod.Status.NominatedNodeName) > 0 {
w.Write(LEVEL_0, "NominatedNodeName:\t%s\n", pod.Status.NominatedNodeName)
}
if len(pod.Spec.InitContainers) > 0 {
describeContainers("Init Containers", pod.Spec.InitContainers, pod.Status.InitContainerStatuses, EnvValueRetriever(pod), w, "")
}
describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), w, "")
if len(pod.Spec.EphemeralContainers) > 0 {
var ec []corev1.Container
for i := range pod.Spec.EphemeralContainers {
ec = append(ec, corev1.Container(pod.Spec.EphemeralContainers[i].EphemeralContainerCommon))
}
describeContainers("Ephemeral Containers", ec, pod.Status.EphemeralContainerStatuses, EnvValueRetriever(pod), w, "")
}
if len(pod.Spec.ReadinessGates) > 0 {
w.Write(LEVEL_0, "Readiness Gates:\n Type\tStatus\n")
for _, g := range pod.Spec.ReadinessGates {
status := "<none>"
for _, c := range pod.Status.Conditions {
if c.Type == g.ConditionType {
status = fmt.Sprintf("%v", c.Status)
break
}
}
w.Write(LEVEL_1, "%v \t%v \n",
g.ConditionType,
status)
}
}
if len(pod.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\n")
for _, c := range pod.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v \n",
c.Type,
c.Status)
}
}
describeVolumes(pod.Spec.Volumes, w, "")
if pod.Status.QOSClass != "" {
w.Write(LEVEL_0, "QoS Class:\t%s\n", pod.Status.QOSClass)
} else {
w.Write(LEVEL_0, "QoS Class:\t%s\n", qos.GetPodQOS(pod))
}
printLabelsMultiline(w, "Node-Selectors", pod.Spec.NodeSelector)
printPodTolerationsMultiline(w, "Tolerations", pod.Spec.Tolerations)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func printController(controllee metav1.Object) string {
if controllerRef := metav1.GetControllerOf(controllee); controllerRef != nil {
return fmt.Sprintf("%s/%s", controllerRef.Kind, controllerRef.Name)
}
return ""
}
func describePodIPs(pod *corev1.Pod, w PrefixWriter, space string) {
if len(pod.Status.PodIPs) == 0 {
w.Write(LEVEL_0, "%sIPs:\t<none>\n", space)
return
}
w.Write(LEVEL_0, "%sIPs:\n", space)
for _, ipInfo := range pod.Status.PodIPs {
w.Write(LEVEL_1, "IP:\t%s\n", ipInfo.IP)
}
}
func describeVolumes(volumes []corev1.Volume, w PrefixWriter, space string) {
if len(volumes) == 0 {
w.Write(LEVEL_0, "%sVolumes:\t<none>\n", space)
return
}
w.Write(LEVEL_0, "%sVolumes:\n", space)
for _, volume := range volumes {
nameIndent := ""
if len(space) > 0 {
nameIndent = " "
}
w.Write(LEVEL_1, "%s%v:\n", nameIndent, volume.Name)
switch {
case volume.VolumeSource.HostPath != nil:
printHostPathVolumeSource(volume.VolumeSource.HostPath, w)
case volume.VolumeSource.EmptyDir != nil:
printEmptyDirVolumeSource(volume.VolumeSource.EmptyDir, w)
case volume.VolumeSource.GCEPersistentDisk != nil:
printGCEPersistentDiskVolumeSource(volume.VolumeSource.GCEPersistentDisk, w)
case volume.VolumeSource.AWSElasticBlockStore != nil:
printAWSElasticBlockStoreVolumeSource(volume.VolumeSource.AWSElasticBlockStore, w)
case volume.VolumeSource.GitRepo != nil:
printGitRepoVolumeSource(volume.VolumeSource.GitRepo, w)
case volume.VolumeSource.Secret != nil:
printSecretVolumeSource(volume.VolumeSource.Secret, w)
case volume.VolumeSource.ConfigMap != nil:
printConfigMapVolumeSource(volume.VolumeSource.ConfigMap, w)
case volume.VolumeSource.NFS != nil:
printNFSVolumeSource(volume.VolumeSource.NFS, w)
case volume.VolumeSource.ISCSI != nil:
printISCSIVolumeSource(volume.VolumeSource.ISCSI, w)
case volume.VolumeSource.Glusterfs != nil:
printGlusterfsVolumeSource(volume.VolumeSource.Glusterfs, w)
case volume.VolumeSource.PersistentVolumeClaim != nil:
printPersistentVolumeClaimVolumeSource(volume.VolumeSource.PersistentVolumeClaim, w)
case volume.VolumeSource.RBD != nil:
printRBDVolumeSource(volume.VolumeSource.RBD, w)
case volume.VolumeSource.Quobyte != nil:
printQuobyteVolumeSource(volume.VolumeSource.Quobyte, w)
case volume.VolumeSource.DownwardAPI != nil:
printDownwardAPIVolumeSource(volume.VolumeSource.DownwardAPI, w)
case volume.VolumeSource.AzureDisk != nil:
printAzureDiskVolumeSource(volume.VolumeSource.AzureDisk, w)
case volume.VolumeSource.VsphereVolume != nil:
printVsphereVolumeSource(volume.VolumeSource.VsphereVolume, w)
case volume.VolumeSource.Cinder != nil:
printCinderVolumeSource(volume.VolumeSource.Cinder, w)
case volume.VolumeSource.PhotonPersistentDisk != nil:
printPhotonPersistentDiskVolumeSource(volume.VolumeSource.PhotonPersistentDisk, w)
case volume.VolumeSource.PortworxVolume != nil:
printPortworxVolumeSource(volume.VolumeSource.PortworxVolume, w)
case volume.VolumeSource.ScaleIO != nil:
printScaleIOVolumeSource(volume.VolumeSource.ScaleIO, w)
case volume.VolumeSource.CephFS != nil:
printCephFSVolumeSource(volume.VolumeSource.CephFS, w)
case volume.VolumeSource.StorageOS != nil:
printStorageOSVolumeSource(volume.VolumeSource.StorageOS, w)
case volume.VolumeSource.FC != nil:
printFCVolumeSource(volume.VolumeSource.FC, w)
case volume.VolumeSource.AzureFile != nil:
printAzureFileVolumeSource(volume.VolumeSource.AzureFile, w)
case volume.VolumeSource.FlexVolume != nil:
printFlexVolumeSource(volume.VolumeSource.FlexVolume, w)
case volume.VolumeSource.Flocker != nil:
printFlockerVolumeSource(volume.VolumeSource.Flocker, w)
case volume.VolumeSource.Projected != nil:
printProjectedVolumeSource(volume.VolumeSource.Projected, w)
case volume.VolumeSource.CSI != nil:
printCSIVolumeSource(volume.VolumeSource.CSI, w)
default:
w.Write(LEVEL_1, "<unknown>\n")
}
}
}
func printHostPathVolumeSource(hostPath *corev1.HostPathVolumeSource, w PrefixWriter) {
hostPathType := "<none>"
if hostPath.Type != nil {
hostPathType = string(*hostPath.Type)
}
w.Write(LEVEL_2, "Type:\tHostPath (bare host directory volume)\n"+
" Path:\t%v\n"+
" HostPathType:\t%v\n",
hostPath.Path, hostPathType)
}
func printEmptyDirVolumeSource(emptyDir *corev1.EmptyDirVolumeSource, w PrefixWriter) {
var sizeLimit string
if emptyDir.SizeLimit != nil && emptyDir.SizeLimit.Cmp(resource.Quantity{}) > 0 {
sizeLimit = fmt.Sprintf("%v", emptyDir.SizeLimit)
} else {
sizeLimit = "<unset>"
}
w.Write(LEVEL_2, "Type:\tEmptyDir (a temporary directory that shares a pod's lifetime)\n"+
" Medium:\t%v\n"+
" SizeLimit:\t%v\n",
emptyDir.Medium, sizeLimit)
}
func printGCEPersistentDiskVolumeSource(gce *corev1.GCEPersistentDiskVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tGCEPersistentDisk (a Persistent Disk resource in Google Compute Engine)\n"+
" PDName:\t%v\n"+
" FSType:\t%v\n"+
" Partition:\t%v\n"+
" ReadOnly:\t%v\n",
gce.PDName, gce.FSType, gce.Partition, gce.ReadOnly)
}
func printAWSElasticBlockStoreVolumeSource(aws *corev1.AWSElasticBlockStoreVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tAWSElasticBlockStore (a Persistent Disk resource in AWS)\n"+
" VolumeID:\t%v\n"+
" FSType:\t%v\n"+
" Partition:\t%v\n"+
" ReadOnly:\t%v\n",
aws.VolumeID, aws.FSType, aws.Partition, aws.ReadOnly)
}
func printGitRepoVolumeSource(git *corev1.GitRepoVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tGitRepo (a volume that is pulled from git when the pod is created)\n"+
" Repository:\t%v\n"+
" Revision:\t%v\n",
git.Repository, git.Revision)
}
func printSecretVolumeSource(secret *corev1.SecretVolumeSource, w PrefixWriter) {
optional := secret.Optional != nil && *secret.Optional
w.Write(LEVEL_2, "Type:\tSecret (a volume populated by a Secret)\n"+
" SecretName:\t%v\n"+
" Optional:\t%v\n",
secret.SecretName, optional)
}
func printConfigMapVolumeSource(configMap *corev1.ConfigMapVolumeSource, w PrefixWriter) {
optional := configMap.Optional != nil && *configMap.Optional
w.Write(LEVEL_2, "Type:\tConfigMap (a volume populated by a ConfigMap)\n"+
" Name:\t%v\n"+
" Optional:\t%v\n",
configMap.Name, optional)
}
func printProjectedVolumeSource(projected *corev1.ProjectedVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tProjected (a volume that contains injected data from multiple sources)\n")
for _, source := range projected.Sources {
if source.Secret != nil {
w.Write(LEVEL_2, "SecretName:\t%v\n"+
" SecretOptionalName:\t%v\n",
source.Secret.Name, source.Secret.Optional)
} else if source.DownwardAPI != nil {
w.Write(LEVEL_2, "DownwardAPI:\ttrue\n")
} else if source.ConfigMap != nil {
w.Write(LEVEL_2, "ConfigMapName:\t%v\n"+
" ConfigMapOptional:\t%v\n",
source.ConfigMap.Name, source.ConfigMap.Optional)
} else if source.ServiceAccountToken != nil {
w.Write(LEVEL_2, "TokenExpirationSeconds:\t%d\n",
*source.ServiceAccountToken.ExpirationSeconds)
}
}
}
func printNFSVolumeSource(nfs *corev1.NFSVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tNFS (an NFS mount that lasts the lifetime of a pod)\n"+
" Server:\t%v\n"+
" Path:\t%v\n"+
" ReadOnly:\t%v\n",
nfs.Server, nfs.Path, nfs.ReadOnly)
}
func printQuobyteVolumeSource(quobyte *corev1.QuobyteVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tQuobyte (a Quobyte mount on the host that shares a pod's lifetime)\n"+
" Registry:\t%v\n"+
" Volume:\t%v\n"+
" ReadOnly:\t%v\n",
quobyte.Registry, quobyte.Volume, quobyte.ReadOnly)
}
func printPortworxVolumeSource(pwxVolume *corev1.PortworxVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tPortworxVolume (a Portworx Volume resource)\n"+
" VolumeID:\t%v\n",
pwxVolume.VolumeID)
}
func printISCSIVolumeSource(iscsi *corev1.ISCSIVolumeSource, w PrefixWriter) {
initiator := "<none>"
if iscsi.InitiatorName != nil {
initiator = *iscsi.InitiatorName
}
w.Write(LEVEL_2, "Type:\tISCSI (an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod)\n"+
" TargetPortal:\t%v\n"+
" IQN:\t%v\n"+
" Lun:\t%v\n"+
" ISCSIInterface\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n"+
" Portals:\t%v\n"+
" DiscoveryCHAPAuth:\t%v\n"+
" SessionCHAPAuth:\t%v\n"+
" SecretRef:\t%v\n"+
" InitiatorName:\t%v\n",
iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiator)
}
func printISCSIPersistentVolumeSource(iscsi *corev1.ISCSIPersistentVolumeSource, w PrefixWriter) {
initiatorName := "<none>"
if iscsi.InitiatorName != nil {
initiatorName = *iscsi.InitiatorName
}
w.Write(LEVEL_2, "Type:\tISCSI (an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod)\n"+
" TargetPortal:\t%v\n"+
" IQN:\t%v\n"+
" Lun:\t%v\n"+
" ISCSIInterface\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n"+
" Portals:\t%v\n"+
" DiscoveryCHAPAuth:\t%v\n"+
" SessionCHAPAuth:\t%v\n"+
" SecretRef:\t%v\n"+
" InitiatorName:\t%v\n",
iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiatorName)
}
func printGlusterfsVolumeSource(glusterfs *corev1.GlusterfsVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+
" EndpointsName:\t%v\n"+
" Path:\t%v\n"+
" ReadOnly:\t%v\n",
glusterfs.EndpointsName, glusterfs.Path, glusterfs.ReadOnly)
}
func printGlusterfsPersistentVolumeSource(glusterfs *corev1.GlusterfsPersistentVolumeSource, w PrefixWriter) {
endpointsNamespace := "<unset>"
if glusterfs.EndpointsNamespace != nil {
endpointsNamespace = *glusterfs.EndpointsNamespace
}
w.Write(LEVEL_2, "Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+
" EndpointsName:\t%v\n"+
" EndpointsNamespace:\t%v\n"+
" Path:\t%v\n"+
" ReadOnly:\t%v\n",
glusterfs.EndpointsName, endpointsNamespace, glusterfs.Path, glusterfs.ReadOnly)
}
func printPersistentVolumeClaimVolumeSource(claim *corev1.PersistentVolumeClaimVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tPersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)\n"+
" ClaimName:\t%v\n"+
" ReadOnly:\t%v\n",
claim.ClaimName, claim.ReadOnly)
}
func printRBDVolumeSource(rbd *corev1.RBDVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+
" CephMonitors:\t%v\n"+
" RBDImage:\t%v\n"+
" FSType:\t%v\n"+
" RBDPool:\t%v\n"+
" RadosUser:\t%v\n"+
" Keyring:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n",
rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly)
}
func printRBDPersistentVolumeSource(rbd *corev1.RBDPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+
" CephMonitors:\t%v\n"+
" RBDImage:\t%v\n"+
" FSType:\t%v\n"+
" RBDPool:\t%v\n"+
" RadosUser:\t%v\n"+
" Keyring:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n",
rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly)
}
func printDownwardAPIVolumeSource(d *corev1.DownwardAPIVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tDownwardAPI (a volume populated by information about the pod)\n Items:\n")
for _, mapping := range d.Items {
if mapping.FieldRef != nil {
w.Write(LEVEL_3, "%v -> %v\n", mapping.FieldRef.FieldPath, mapping.Path)
}
if mapping.ResourceFieldRef != nil {
w.Write(LEVEL_3, "%v -> %v\n", mapping.ResourceFieldRef.Resource, mapping.Path)
}
}
}
func printAzureDiskVolumeSource(d *corev1.AzureDiskVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tAzureDisk (an Azure Data Disk mount on the host and bind mount to the pod)\n"+
" DiskName:\t%v\n"+
" DiskURI:\t%v\n"+
" Kind: \t%v\n"+
" FSType:\t%v\n"+
" CachingMode:\t%v\n"+
" ReadOnly:\t%v\n",
d.DiskName, d.DataDiskURI, *d.Kind, *d.FSType, *d.CachingMode, *d.ReadOnly)
}
func printVsphereVolumeSource(vsphere *corev1.VsphereVirtualDiskVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tvSphereVolume (a Persistent Disk resource in vSphere)\n"+
" VolumePath:\t%v\n"+
" FSType:\t%v\n"+
" StoragePolicyName:\t%v\n",
vsphere.VolumePath, vsphere.FSType, vsphere.StoragePolicyName)
}
func printPhotonPersistentDiskVolumeSource(photon *corev1.PhotonPersistentDiskVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tPhotonPersistentDisk (a Persistent Disk resource in photon platform)\n"+
" PdID:\t%v\n"+
" FSType:\t%v\n",
photon.PdID, photon.FSType)
}
func printCinderVolumeSource(cinder *corev1.CinderVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+
" VolumeID:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n"+
" SecretRef:\t%v\n",
cinder.VolumeID, cinder.FSType, cinder.ReadOnly, cinder.SecretRef)
}
func printCinderPersistentVolumeSource(cinder *corev1.CinderPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+
" VolumeID:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n"+
" SecretRef:\t%v\n",
cinder.VolumeID, cinder.FSType, cinder.ReadOnly, cinder.SecretRef)
}
func printScaleIOVolumeSource(sio *corev1.ScaleIOVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tScaleIO (a persistent volume backed by a block device in ScaleIO)\n"+
" Gateway:\t%v\n"+
" System:\t%v\n"+
" Protection Domain:\t%v\n"+
" Storage Pool:\t%v\n"+
" Storage Mode:\t%v\n"+
" VolumeName:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, sio.FSType, sio.ReadOnly)
}
func printScaleIOPersistentVolumeSource(sio *corev1.ScaleIOPersistentVolumeSource, w PrefixWriter) {
var secretNS, secretName string
if sio.SecretRef != nil {
secretName = sio.SecretRef.Name
secretNS = sio.SecretRef.Namespace
}
w.Write(LEVEL_2, "Type:\tScaleIO (a persistent volume backed by a block device in ScaleIO)\n"+
" Gateway:\t%v\n"+
" System:\t%v\n"+
" Protection Domain:\t%v\n"+
" Storage Pool:\t%v\n"+
" Storage Mode:\t%v\n"+
" VolumeName:\t%v\n"+
" SecretName:\t%v\n"+
" SecretNamespace:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, secretName, secretNS, sio.FSType, sio.ReadOnly)
}
func printLocalVolumeSource(ls *corev1.LocalVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tLocalVolume (a persistent volume backed by local storage on a node)\n"+
" Path:\t%v\n",
ls.Path)
}
func printCephFSVolumeSource(cephfs *corev1.CephFSVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCephFS (a CephFS mount on the host that shares a pod's lifetime)\n"+
" Monitors:\t%v\n"+
" Path:\t%v\n"+
" User:\t%v\n"+
" SecretFile:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n",
cephfs.Monitors, cephfs.Path, cephfs.User, cephfs.SecretFile, cephfs.SecretRef, cephfs.ReadOnly)
}
func printCephFSPersistentVolumeSource(cephfs *corev1.CephFSPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCephFS (a CephFS mount on the host that shares a pod's lifetime)\n"+
" Monitors:\t%v\n"+
" Path:\t%v\n"+
" User:\t%v\n"+
" SecretFile:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n",
cephfs.Monitors, cephfs.Path, cephfs.User, cephfs.SecretFile, cephfs.SecretRef, cephfs.ReadOnly)
}
func printStorageOSVolumeSource(storageos *corev1.StorageOSVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tStorageOS (a StorageOS Persistent Disk resource)\n"+
" VolumeName:\t%v\n"+
" VolumeNamespace:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
storageos.VolumeName, storageos.VolumeNamespace, storageos.FSType, storageos.ReadOnly)
}
func printStorageOSPersistentVolumeSource(storageos *corev1.StorageOSPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tStorageOS (a StorageOS Persistent Disk resource)\n"+
" VolumeName:\t%v\n"+
" VolumeNamespace:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
storageos.VolumeName, storageos.VolumeNamespace, storageos.FSType, storageos.ReadOnly)
}
func printFCVolumeSource(fc *corev1.FCVolumeSource, w PrefixWriter) {
lun := "<none>"
if fc.Lun != nil {
lun = strconv.Itoa(int(*fc.Lun))
}
w.Write(LEVEL_2, "Type:\tFC (a Fibre Channel disk)\n"+
" TargetWWNs:\t%v\n"+
" LUN:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
strings.Join(fc.TargetWWNs, ", "), lun, fc.FSType, fc.ReadOnly)
}
func printAzureFileVolumeSource(azureFile *corev1.AzureFileVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tAzureFile (an Azure File Service mount on the host and bind mount to the pod)\n"+
" SecretName:\t%v\n"+
" ShareName:\t%v\n"+
" ReadOnly:\t%v\n",
azureFile.SecretName, azureFile.ShareName, azureFile.ReadOnly)
}
func printAzureFilePersistentVolumeSource(azureFile *corev1.AzureFilePersistentVolumeSource, w PrefixWriter) {
ns := ""
if azureFile.SecretNamespace != nil {
ns = *azureFile.SecretNamespace
}
w.Write(LEVEL_2, "Type:\tAzureFile (an Azure File Service mount on the host and bind mount to the pod)\n"+
" SecretName:\t%v\n"+
" SecretNamespace:\t%v\n"+
" ShareName:\t%v\n"+
" ReadOnly:\t%v\n",
azureFile.SecretName, ns, azureFile.ShareName, azureFile.ReadOnly)
}
func printFlexPersistentVolumeSource(flex *corev1.FlexPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+
" Driver:\t%v\n"+
" FSType:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n"+
" Options:\t%v\n",
flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options)
}
func printFlexVolumeSource(flex *corev1.FlexVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+
" Driver:\t%v\n"+
" FSType:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n"+
" Options:\t%v\n",
flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options)
}
func printFlockerVolumeSource(flocker *corev1.FlockerVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tFlocker (a Flocker volume mounted by the Flocker agent)\n"+
" DatasetName:\t%v\n"+
" DatasetUUID:\t%v\n",
flocker.DatasetName, flocker.DatasetUUID)
}
func printCSIVolumeSource(csi *corev1.CSIVolumeSource, w PrefixWriter) {
var readOnly bool
var fsType string
if csi.ReadOnly != nil && *csi.ReadOnly {
readOnly = true
}
if csi.FSType != nil {
fsType = *csi.FSType
}
w.Write(LEVEL_2, "Type:\tCSI (a Container Storage Interface (CSI) volume source)\n"+
" Driver:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
csi.Driver, fsType, readOnly)
printCSIPersistentVolumeAttributesMultiline(w, "VolumeAttributes", csi.VolumeAttributes)
}
func printCSIPersistentVolumeSource(csi *corev1.CSIPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCSI (a Container Storage Interface (CSI) volume source)\n"+
" Driver:\t%v\n"+
" FSType:\t%v\n"+
" VolumeHandle:\t%v\n"+
" ReadOnly:\t%v\n",
csi.Driver, csi.FSType, csi.VolumeHandle, csi.ReadOnly)
printCSIPersistentVolumeAttributesMultiline(w, "VolumeAttributes", csi.VolumeAttributes)
}
func printCSIPersistentVolumeAttributesMultiline(w PrefixWriter, title string, annotations map[string]string) {
printCSIPersistentVolumeAttributesMultilineIndent(w, "", title, "\t", annotations, sets.NewString())
}
func printCSIPersistentVolumeAttributesMultilineIndent(w PrefixWriter, initialIndent, title, innerIndent string, attributes map[string]string, skip sets.String) {
w.Write(LEVEL_2, "%s%s:%s", initialIndent, title, innerIndent)
if len(attributes) == 0 {
w.WriteLine("<none>")
return
}
// to print labels in the sorted order
keys := make([]string, 0, len(attributes))
for key := range attributes {
if skip.Has(key) {
continue
}
keys = append(keys, key)
}
if len(attributes) == 0 {
w.WriteLine("<none>")
return
}
sort.Strings(keys)
for i, key := range keys {
if i != 0 {
w.Write(LEVEL_2, initialIndent)
w.Write(LEVEL_2, innerIndent)
}
line := fmt.Sprintf("%s=%s", key, attributes[key])
if len(line) > maxAnnotationLen {
w.Write(LEVEL_2, "%s...\n", line[:maxAnnotationLen])
} else {
w.Write(LEVEL_2, "%s\n", line)
}
i++
}
}
type PersistentVolumeDescriber struct {
clientset.Interface
}
func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.CoreV1().PersistentVolumes()
pv, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, pv)
}
return describePersistentVolume(pv, events)
}
func printVolumeNodeAffinity(w PrefixWriter, affinity *corev1.VolumeNodeAffinity) {
w.Write(LEVEL_0, "Node Affinity:\t")
if affinity == nil || affinity.Required == nil {
w.WriteLine("<none>")
return
}
w.WriteLine("")
if affinity.Required != nil {
w.Write(LEVEL_1, "Required Terms:\t")
if len(affinity.Required.NodeSelectorTerms) == 0 {
w.WriteLine("<none>")
} else {
w.WriteLine("")
for i, term := range affinity.Required.NodeSelectorTerms {
printNodeSelectorTermsMultilineWithIndent(w, LEVEL_2, fmt.Sprintf("Term %v", i), "\t", term.MatchExpressions)
}
}
}
}
// printLabelsMultiline prints multiple labels with a user-defined alignment.
func printNodeSelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []corev1.NodeSelectorRequirement) {
w.Write(indentLevel, "%s:%s", title, innerIndent)
if len(reqs) == 0 {
w.WriteLine("<none>")
return
}
for i, req := range reqs {
if i != 0 {
w.Write(indentLevel, "%s", innerIndent)
}
exprStr := fmt.Sprintf("%s %s", req.Key, strings.ToLower(string(req.Operator)))
if len(req.Values) > 0 {
exprStr = fmt.Sprintf("%s [%s]", exprStr, strings.Join(req.Values, ", "))
}
w.Write(LEVEL_0, "%s\n", exprStr)
}
}
func describePersistentVolume(pv *corev1.PersistentVolume, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", pv.Name)
printLabelsMultiline(w, "Labels", pv.ObjectMeta.Labels)
printAnnotationsMultiline(w, "Annotations", pv.ObjectMeta.Annotations)
w.Write(LEVEL_0, "Finalizers:\t%v\n", pv.ObjectMeta.Finalizers)
w.Write(LEVEL_0, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClass(pv))
if pv.ObjectMeta.DeletionTimestamp != nil {
w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampSince(*pv.ObjectMeta.DeletionTimestamp))
} else {
w.Write(LEVEL_0, "Status:\t%v\n", pv.Status.Phase)
}
if pv.Spec.ClaimRef != nil {
w.Write(LEVEL_0, "Claim:\t%s\n", pv.Spec.ClaimRef.Namespace+"/"+pv.Spec.ClaimRef.Name)
} else {
w.Write(LEVEL_0, "Claim:\t%s\n", "")
}
w.Write(LEVEL_0, "Reclaim Policy:\t%v\n", pv.Spec.PersistentVolumeReclaimPolicy)
w.Write(LEVEL_0, "Access Modes:\t%s\n", storageutil.GetAccessModesAsString(pv.Spec.AccessModes))
if pv.Spec.VolumeMode != nil {
w.Write(LEVEL_0, "VolumeMode:\t%v\n", *pv.Spec.VolumeMode)
}
storage := pv.Spec.Capacity[corev1.ResourceStorage]
w.Write(LEVEL_0, "Capacity:\t%s\n", storage.String())
printVolumeNodeAffinity(w, pv.Spec.NodeAffinity)
w.Write(LEVEL_0, "Message:\t%s\n", pv.Status.Message)
w.Write(LEVEL_0, "Source:\n")
switch {
case pv.Spec.HostPath != nil:
printHostPathVolumeSource(pv.Spec.HostPath, w)
case pv.Spec.GCEPersistentDisk != nil:
printGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, w)
case pv.Spec.AWSElasticBlockStore != nil:
printAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, w)
case pv.Spec.NFS != nil:
printNFSVolumeSource(pv.Spec.NFS, w)
case pv.Spec.ISCSI != nil:
printISCSIPersistentVolumeSource(pv.Spec.ISCSI, w)
case pv.Spec.Glusterfs != nil:
printGlusterfsPersistentVolumeSource(pv.Spec.Glusterfs, w)
case pv.Spec.RBD != nil:
printRBDPersistentVolumeSource(pv.Spec.RBD, w)
case pv.Spec.Quobyte != nil:
printQuobyteVolumeSource(pv.Spec.Quobyte, w)
case pv.Spec.VsphereVolume != nil:
printVsphereVolumeSource(pv.Spec.VsphereVolume, w)
case pv.Spec.Cinder != nil:
printCinderPersistentVolumeSource(pv.Spec.Cinder, w)
case pv.Spec.AzureDisk != nil:
printAzureDiskVolumeSource(pv.Spec.AzureDisk, w)
case pv.Spec.PhotonPersistentDisk != nil:
printPhotonPersistentDiskVolumeSource(pv.Spec.PhotonPersistentDisk, w)
case pv.Spec.PortworxVolume != nil:
printPortworxVolumeSource(pv.Spec.PortworxVolume, w)
case pv.Spec.ScaleIO != nil:
printScaleIOPersistentVolumeSource(pv.Spec.ScaleIO, w)
case pv.Spec.Local != nil:
printLocalVolumeSource(pv.Spec.Local, w)
case pv.Spec.CephFS != nil:
printCephFSPersistentVolumeSource(pv.Spec.CephFS, w)
case pv.Spec.StorageOS != nil:
printStorageOSPersistentVolumeSource(pv.Spec.StorageOS, w)
case pv.Spec.FC != nil:
printFCVolumeSource(pv.Spec.FC, w)
case pv.Spec.AzureFile != nil:
printAzureFilePersistentVolumeSource(pv.Spec.AzureFile, w)
case pv.Spec.FlexVolume != nil:
printFlexPersistentVolumeSource(pv.Spec.FlexVolume, w)
case pv.Spec.Flocker != nil:
printFlockerVolumeSource(pv.Spec.Flocker, w)
case pv.Spec.CSI != nil:
printCSIPersistentVolumeSource(pv.Spec.CSI, w)
default:
w.Write(LEVEL_1, "<unknown>\n")
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
type PersistentVolumeClaimDescriber struct {
clientset.Interface
}
func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.CoreV1().PersistentVolumeClaims(namespace)
pvc, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
pc := d.CoreV1().Pods(namespace)
mountPods, err := getMountPods(pc, pvc.Name)
if err != nil {
return "", err
}
events, _ := d.CoreV1().Events(namespace).Search(scheme.Scheme, pvc)
return describePersistentVolumeClaim(pvc, events, mountPods)
}
func getMountPods(c corev1client.PodInterface, pvcName string) ([]corev1.Pod, error) {
nsPods, err := c.List(context.TODO(), metav1.ListOptions{})
if err != nil {
return []corev1.Pod{}, err
}
var pods []corev1.Pod
for _, pod := range nsPods.Items {
pvcs := getPvcs(pod.Spec.Volumes)
for _, pvc := range pvcs {
if pvc.PersistentVolumeClaim.ClaimName == pvcName {
pods = append(pods, pod)
}
}
}
return pods, nil
}
func getPvcs(volumes []corev1.Volume) []corev1.Volume {
var pvcs []corev1.Volume
for _, volume := range volumes {
if volume.VolumeSource.PersistentVolumeClaim != nil {
pvcs = append(pvcs, volume)
}
}
return pvcs
}
func describePersistentVolumeClaim(pvc *corev1.PersistentVolumeClaim, events *corev1.EventList, mountPods []corev1.Pod) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", pvc.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", pvc.Namespace)
w.Write(LEVEL_0, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClaimClass(pvc))
if pvc.ObjectMeta.DeletionTimestamp != nil {
w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampSince(*pvc.ObjectMeta.DeletionTimestamp))
} else {
w.Write(LEVEL_0, "Status:\t%v\n", pvc.Status.Phase)
}
w.Write(LEVEL_0, "Volume:\t%s\n", pvc.Spec.VolumeName)
printLabelsMultiline(w, "Labels", pvc.Labels)
printAnnotationsMultiline(w, "Annotations", pvc.Annotations)
w.Write(LEVEL_0, "Finalizers:\t%v\n", pvc.ObjectMeta.Finalizers)
storage := pvc.Spec.Resources.Requests[corev1.ResourceStorage]
capacity := ""
accessModes := ""
if pvc.Spec.VolumeName != "" {
accessModes = storageutil.GetAccessModesAsString(pvc.Status.AccessModes)
storage = pvc.Status.Capacity[corev1.ResourceStorage]
capacity = storage.String()
}
w.Write(LEVEL_0, "Capacity:\t%s\n", capacity)
w.Write(LEVEL_0, "Access Modes:\t%s\n", accessModes)
if pvc.Spec.VolumeMode != nil {
w.Write(LEVEL_0, "VolumeMode:\t%v\n", *pvc.Spec.VolumeMode)
}
if pvc.Spec.DataSource != nil {
w.Write(LEVEL_0, "DataSource:\n")
if pvc.Spec.DataSource.APIGroup != nil {
w.Write(LEVEL_1, "APIGroup:\t%v\n", *pvc.Spec.DataSource.APIGroup)
}
w.Write(LEVEL_1, "Kind:\t%v\n", pvc.Spec.DataSource.Kind)
w.Write(LEVEL_1, "Name:\t%v\n", pvc.Spec.DataSource.Name)
}
printPodsMultiline(w, "Mounted By", mountPods)
if len(pvc.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n")
w.Write(LEVEL_1, "Type\tStatus\tLastProbeTime\tLastTransitionTime\tReason\tMessage\n")
w.Write(LEVEL_1, "----\t------\t-----------------\t------------------\t------\t-------\n")
for _, c := range pvc.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v \t%s \t%s \t%v \t%v\n",
c.Type,
c.Status,
c.LastProbeTime.Time.Format(time.RFC1123Z),
c.LastTransitionTime.Time.Format(time.RFC1123Z),
c.Reason,
c.Message)
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func describeContainers(label string, containers []corev1.Container, containerStatuses []corev1.ContainerStatus,
resolverFn EnvVarResolverFunc, w PrefixWriter, space string) {
statuses := map[string]corev1.ContainerStatus{}
for _, status := range containerStatuses {
statuses[status.Name] = status
}
describeContainersLabel(containers, label, space, w)
for _, container := range containers {
status, ok := statuses[container.Name]
describeContainerBasicInfo(container, status, ok, space, w)
describeContainerCommand(container, w)
if ok {
describeContainerState(status, w)
}
describeContainerResource(container, w)
describeContainerProbe(container, w)
if len(container.EnvFrom) > 0 {
describeContainerEnvFrom(container, resolverFn, w)
}
describeContainerEnvVars(container, resolverFn, w)
describeContainerVolumes(container, w)
}
}
func describeContainersLabel(containers []corev1.Container, label, space string, w PrefixWriter) {
none := ""
if len(containers) == 0 {
none = " <none>"
}
w.Write(LEVEL_0, "%s%s:%s\n", space, label, none)
}
func describeContainerBasicInfo(container corev1.Container, status corev1.ContainerStatus, ok bool, space string, w PrefixWriter) {
nameIndent := ""
if len(space) > 0 {
nameIndent = " "
}
w.Write(LEVEL_1, "%s%v:\n", nameIndent, container.Name)
if ok {
w.Write(LEVEL_2, "Container ID:\t%s\n", status.ContainerID)
}
w.Write(LEVEL_2, "Image:\t%s\n", container.Image)
if ok {
w.Write(LEVEL_2, "Image ID:\t%s\n", status.ImageID)
}
portString := describeContainerPorts(container.Ports)
if strings.Contains(portString, ",") {
w.Write(LEVEL_2, "Ports:\t%s\n", portString)
} else {
w.Write(LEVEL_2, "Port:\t%s\n", stringOrNone(portString))
}
hostPortString := describeContainerHostPorts(container.Ports)
if strings.Contains(hostPortString, ",") {
w.Write(LEVEL_2, "Host Ports:\t%s\n", hostPortString)
} else {
w.Write(LEVEL_2, "Host Port:\t%s\n", stringOrNone(hostPortString))
}
}
func describeContainerPorts(cPorts []corev1.ContainerPort) string {
ports := make([]string, 0, len(cPorts))
for _, cPort := range cPorts {
ports = append(ports, fmt.Sprintf("%d/%s", cPort.ContainerPort, cPort.Protocol))
}
return strings.Join(ports, ", ")
}
func describeContainerHostPorts(cPorts []corev1.ContainerPort) string {
ports := make([]string, 0, len(cPorts))
for _, cPort := range cPorts {
ports = append(ports, fmt.Sprintf("%d/%s", cPort.HostPort, cPort.Protocol))
}
return strings.Join(ports, ", ")
}
func describeContainerCommand(container corev1.Container, w PrefixWriter) {
if len(container.Command) > 0 {
w.Write(LEVEL_2, "Command:\n")
for _, c := range container.Command {
for _, s := range strings.Split(c, "\n") {
w.Write(LEVEL_3, "%s\n", s)
}
}
}
if len(container.Args) > 0 {
w.Write(LEVEL_2, "Args:\n")
for _, arg := range container.Args {
for _, s := range strings.Split(arg, "\n") {
w.Write(LEVEL_3, "%s\n", s)
}
}
}
}
func describeContainerResource(container corev1.Container, w PrefixWriter) {
resources := container.Resources
if len(resources.Limits) > 0 {
w.Write(LEVEL_2, "Limits:\n")
}
for _, name := range SortedResourceNames(resources.Limits) {
quantity := resources.Limits[name]
w.Write(LEVEL_3, "%s:\t%s\n", name, quantity.String())
}
if len(resources.Requests) > 0 {
w.Write(LEVEL_2, "Requests:\n")
}
for _, name := range SortedResourceNames(resources.Requests) {
quantity := resources.Requests[name]
w.Write(LEVEL_3, "%s:\t%s\n", name, quantity.String())
}
}
func describeContainerState(status corev1.ContainerStatus, w PrefixWriter) {
describeStatus("State", status.State, w)
if status.LastTerminationState.Terminated != nil {
describeStatus("Last State", status.LastTerminationState, w)
}
w.Write(LEVEL_2, "Ready:\t%v\n", printBool(status.Ready))
w.Write(LEVEL_2, "Restart Count:\t%d\n", status.RestartCount)
}
func describeContainerProbe(container corev1.Container, w PrefixWriter) {
if container.LivenessProbe != nil {
probe := DescribeProbe(container.LivenessProbe)
w.Write(LEVEL_2, "Liveness:\t%s\n", probe)
}
if container.ReadinessProbe != nil {
probe := DescribeProbe(container.ReadinessProbe)
w.Write(LEVEL_2, "Readiness:\t%s\n", probe)
}
if container.StartupProbe != nil {
probe := DescribeProbe(container.StartupProbe)
w.Write(LEVEL_2, "Startup:\t%s\n", probe)
}
}
func describeContainerVolumes(container corev1.Container, w PrefixWriter) {
// Show volumeMounts
none := ""
if len(container.VolumeMounts) == 0 {
none = "\t<none>"
}
w.Write(LEVEL_2, "Mounts:%s\n", none)
sort.Sort(SortableVolumeMounts(container.VolumeMounts))
for _, mount := range container.VolumeMounts {
flags := []string{}
if mount.ReadOnly {
flags = append(flags, "ro")
} else {
flags = append(flags, "rw")
}
if len(mount.SubPath) > 0 {
flags = append(flags, fmt.Sprintf("path=%q", mount.SubPath))
}
w.Write(LEVEL_3, "%s from %s (%s)\n", mount.MountPath, mount.Name, strings.Join(flags, ","))
}
// Show volumeDevices if exists
if len(container.VolumeDevices) > 0 {
w.Write(LEVEL_2, "Devices:%s\n", none)
sort.Sort(SortableVolumeDevices(container.VolumeDevices))
for _, device := range container.VolumeDevices {
w.Write(LEVEL_3, "%s from %s\n", device.DevicePath, device.Name)
}
}
}
func describeContainerEnvVars(container corev1.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) {
none := ""
if len(container.Env) == 0 {
none = "\t<none>"
}
w.Write(LEVEL_2, "Environment:%s\n", none)
for _, e := range container.Env {
if e.ValueFrom == nil {
for i, s := range strings.Split(e.Value, "\n") {
if i == 0 {
w.Write(LEVEL_3, "%s:\t%s\n", e.Name, s)
} else {
w.Write(LEVEL_3, "\t%s\n", s)
}
}
continue
}
switch {
case e.ValueFrom.FieldRef != nil:
var valueFrom string
if resolverFn != nil {
valueFrom = resolverFn(e)
}
w.Write(LEVEL_3, "%s:\t%s (%s:%s)\n", e.Name, valueFrom, e.ValueFrom.FieldRef.APIVersion, e.ValueFrom.FieldRef.FieldPath)
case e.ValueFrom.ResourceFieldRef != nil:
valueFrom, err := resourcehelper.ExtractContainerResourceValue(e.ValueFrom.ResourceFieldRef, &container)
if err != nil {
valueFrom = ""
}
resource := e.ValueFrom.ResourceFieldRef.Resource
if valueFrom == "0" && (resource == "limits.cpu" || resource == "limits.memory") {
valueFrom = "node allocatable"
}
w.Write(LEVEL_3, "%s:\t%s (%s)\n", e.Name, valueFrom, resource)
case e.ValueFrom.SecretKeyRef != nil:
optional := e.ValueFrom.SecretKeyRef.Optional != nil && *e.ValueFrom.SecretKeyRef.Optional
w.Write(LEVEL_3, "%s:\t<set to the key '%s' in secret '%s'>\tOptional: %t\n", e.Name, e.ValueFrom.SecretKeyRef.Key, e.ValueFrom.SecretKeyRef.Name, optional)
case e.ValueFrom.ConfigMapKeyRef != nil:
optional := e.ValueFrom.ConfigMapKeyRef.Optional != nil && *e.ValueFrom.ConfigMapKeyRef.Optional
w.Write(LEVEL_3, "%s:\t<set to the key '%s' of config map '%s'>\tOptional: %t\n", e.Name, e.ValueFrom.ConfigMapKeyRef.Key, e.ValueFrom.ConfigMapKeyRef.Name, optional)
}
}
}
func describeContainerEnvFrom(container corev1.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) {
none := ""
if len(container.EnvFrom) == 0 {
none = "\t<none>"
}
w.Write(LEVEL_2, "Environment Variables from:%s\n", none)
for _, e := range container.EnvFrom {
from := ""
name := ""
optional := false
if e.ConfigMapRef != nil {
from = "ConfigMap"
name = e.ConfigMapRef.Name
optional = e.ConfigMapRef.Optional != nil && *e.ConfigMapRef.Optional
} else if e.SecretRef != nil {
from = "Secret"
name = e.SecretRef.Name
optional = e.SecretRef.Optional != nil && *e.SecretRef.Optional
}
if len(e.Prefix) == 0 {
w.Write(LEVEL_3, "%s\t%s\tOptional: %t\n", name, from, optional)
} else {
w.Write(LEVEL_3, "%s\t%s with prefix '%s'\tOptional: %t\n", name, from, e.Prefix, optional)
}
}
}
// DescribeProbe is exported for consumers in other API groups that have probes
func DescribeProbe(probe *corev1.Probe) string {
attrs := fmt.Sprintf("delay=%ds timeout=%ds period=%ds #success=%d #failure=%d", probe.InitialDelaySeconds, probe.TimeoutSeconds, probe.PeriodSeconds, probe.SuccessThreshold, probe.FailureThreshold)
switch {
case probe.Exec != nil:
return fmt.Sprintf("exec %v %s", probe.Exec.Command, attrs)
case probe.HTTPGet != nil:
url := &url.URL{}
url.Scheme = strings.ToLower(string(probe.HTTPGet.Scheme))
if len(probe.HTTPGet.Port.String()) > 0 {
url.Host = net.JoinHostPort(probe.HTTPGet.Host, probe.HTTPGet.Port.String())
} else {
url.Host = probe.HTTPGet.Host
}
url.Path = probe.HTTPGet.Path
return fmt.Sprintf("http-get %s %s", url.String(), attrs)
case probe.TCPSocket != nil:
return fmt.Sprintf("tcp-socket %s:%s %s", probe.TCPSocket.Host, probe.TCPSocket.Port.String(), attrs)
}
return fmt.Sprintf("unknown %s", attrs)
}
type EnvVarResolverFunc func(e corev1.EnvVar) string
// EnvValueFrom is exported for use by describers in other packages
func EnvValueRetriever(pod *corev1.Pod) EnvVarResolverFunc {
return func(e corev1.EnvVar) string {
gv, err := schema.ParseGroupVersion(e.ValueFrom.FieldRef.APIVersion)
if err != nil {
return ""
}
gvk := gv.WithKind("Pod")
internalFieldPath, _, err := scheme.Scheme.ConvertFieldLabel(gvk, e.ValueFrom.FieldRef.FieldPath, "")
if err != nil {
return "" // pod validation should catch this on create
}
valueFrom, err := fieldpath.ExtractFieldPathAsString(pod, internalFieldPath)
if err != nil {
return "" // pod validation should catch this on create
}
return valueFrom
}
}
func describeStatus(stateName string, state corev1.ContainerState, w PrefixWriter) {
switch {
case state.Running != nil:
w.Write(LEVEL_2, "%s:\tRunning\n", stateName)
w.Write(LEVEL_3, "Started:\t%v\n", state.Running.StartedAt.Time.Format(time.RFC1123Z))
case state.Waiting != nil:
w.Write(LEVEL_2, "%s:\tWaiting\n", stateName)
if state.Waiting.Reason != "" {
w.Write(LEVEL_3, "Reason:\t%s\n", state.Waiting.Reason)
}
case state.Terminated != nil:
w.Write(LEVEL_2, "%s:\tTerminated\n", stateName)
if state.Terminated.Reason != "" {
w.Write(LEVEL_3, "Reason:\t%s\n", state.Terminated.Reason)
}
if state.Terminated.Message != "" {
w.Write(LEVEL_3, "Message:\t%s\n", state.Terminated.Message)
}
w.Write(LEVEL_3, "Exit Code:\t%d\n", state.Terminated.ExitCode)
if state.Terminated.Signal > 0 {
w.Write(LEVEL_3, "Signal:\t%d\n", state.Terminated.Signal)
}
w.Write(LEVEL_3, "Started:\t%s\n", state.Terminated.StartedAt.Time.Format(time.RFC1123Z))
w.Write(LEVEL_3, "Finished:\t%s\n", state.Terminated.FinishedAt.Time.Format(time.RFC1123Z))
default:
w.Write(LEVEL_2, "%s:\tWaiting\n", stateName)
}
}
func describeVolumeClaimTemplates(templates []corev1.PersistentVolumeClaim, w PrefixWriter) {
if len(templates) == 0 {
w.Write(LEVEL_0, "Volume Claims:\t<none>\n")
return
}
w.Write(LEVEL_0, "Volume Claims:\n")
for _, pvc := range templates {
w.Write(LEVEL_1, "Name:\t%s\n", pvc.Name)
w.Write(LEVEL_1, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClaimClass(&pvc))
printLabelsMultilineWithIndent(w, " ", "Labels", "\t", pvc.Labels, sets.NewString())
printLabelsMultilineWithIndent(w, " ", "Annotations", "\t", pvc.Annotations, sets.NewString())
if capacity, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; ok {
w.Write(LEVEL_1, "Capacity:\t%s\n", capacity.String())
} else {
w.Write(LEVEL_1, "Capacity:\t%s\n", "<default>")
}
w.Write(LEVEL_1, "Access Modes:\t%s\n", pvc.Spec.AccessModes)
}
}
func printBoolPtr(value *bool) string {
if value != nil {
return printBool(*value)
}
return "<unset>"
}
func printBool(value bool) string {
if value {
return "True"
}
return "False"
}
// ReplicationControllerDescriber generates information about a replication controller
// and the pods it has created.
type ReplicationControllerDescriber struct {
clientset.Interface
}
func (d *ReplicationControllerDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
rc := d.CoreV1().ReplicationControllers(namespace)
pc := d.CoreV1().Pods(namespace)
controller, err := rc.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
running, waiting, succeeded, failed, err := getPodStatusForController(pc, labels.SelectorFromSet(controller.Spec.Selector), controller.UID)
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, controller)
}
return describeReplicationController(controller, events, running, waiting, succeeded, failed)
}
func describeReplicationController(controller *corev1.ReplicationController, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", controller.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", controller.Namespace)
w.Write(LEVEL_0, "Selector:\t%s\n", labels.FormatLabels(controller.Spec.Selector))
printLabelsMultiline(w, "Labels", controller.Labels)
printAnnotationsMultiline(w, "Annotations", controller.Annotations)
w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", controller.Status.Replicas, *controller.Spec.Replicas)
w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed)
DescribePodTemplate(controller.Spec.Template, w)
if len(controller.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n")
w.Write(LEVEL_1, "----\t------\t------\n")
for _, c := range controller.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason)
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func DescribePodTemplate(template *corev1.PodTemplateSpec, w PrefixWriter) {
w.Write(LEVEL_0, "Pod Template:\n")
if template == nil {
w.Write(LEVEL_1, "<unset>")
return
}
printLabelsMultiline(w, " Labels", template.Labels)
if len(template.Annotations) > 0 {
printAnnotationsMultiline(w, " Annotations", template.Annotations)
}
if len(template.Spec.ServiceAccountName) > 0 {
w.Write(LEVEL_1, "Service Account:\t%s\n", template.Spec.ServiceAccountName)
}
if len(template.Spec.InitContainers) > 0 {
describeContainers("Init Containers", template.Spec.InitContainers, nil, nil, w, " ")
}
describeContainers("Containers", template.Spec.Containers, nil, nil, w, " ")
describeVolumes(template.Spec.Volumes, w, " ")
if len(template.Spec.PriorityClassName) > 0 {
w.Write(LEVEL_1, "Priority Class Name:\t%s\n", template.Spec.PriorityClassName)
}
}
// ReplicaSetDescriber generates information about a ReplicaSet and the pods it has created.
type ReplicaSetDescriber struct {
clientset.Interface
}
func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
rsc := d.AppsV1().ReplicaSets(namespace)
pc := d.CoreV1().Pods(namespace)
rs, err := rsc.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
if err != nil {
return "", err
}
running, waiting, succeeded, failed, getPodErr := getPodStatusForController(pc, selector, rs.UID)
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, rs)
}
return describeReplicaSet(rs, events, running, waiting, succeeded, failed, getPodErr)
}
func describeReplicaSet(rs *appsv1.ReplicaSet, events *corev1.EventList, running, waiting, succeeded, failed int, getPodErr error) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", rs.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", rs.Namespace)
w.Write(LEVEL_0, "Selector:\t%s\n", metav1.FormatLabelSelector(rs.Spec.Selector))
printLabelsMultiline(w, "Labels", rs.Labels)
printAnnotationsMultiline(w, "Annotations", rs.Annotations)
if controlledBy := printController(rs); len(controlledBy) > 0 {
w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy)
}
w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", rs.Status.Replicas, *rs.Spec.Replicas)
w.Write(LEVEL_0, "Pods Status:\t")
if getPodErr != nil {
w.Write(LEVEL_0, "error in fetching pods: %s\n", getPodErr)
} else {
w.Write(LEVEL_0, "%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed)
}
DescribePodTemplate(&rs.Spec.Template, w)
if len(rs.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n")
w.Write(LEVEL_1, "----\t------\t------\n")
for _, c := range rs.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason)
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// JobDescriber generates information about a job and the pods it has created.
type JobDescriber struct {
clientset.Interface
}
func (d *JobDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
job, err := d.BatchV1().Jobs(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, job)
}
return describeJob(job, events)
}
func describeJob(job *batchv1.Job, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", job.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", job.Namespace)
if selector, err := metav1.LabelSelectorAsSelector(job.Spec.Selector); err == nil {
w.Write(LEVEL_0, "Selector:\t%s\n", selector)
} else {
w.Write(LEVEL_0, "Selector:\tFailed to get selector: %s\n", err)
}
printLabelsMultiline(w, "Labels", job.Labels)
printAnnotationsMultiline(w, "Annotations", job.Annotations)
if controlledBy := printController(job); len(controlledBy) > 0 {
w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy)
}
w.Write(LEVEL_0, "Parallelism:\t%d\n", *job.Spec.Parallelism)
if job.Spec.Completions != nil {
w.Write(LEVEL_0, "Completions:\t%d\n", *job.Spec.Completions)
} else {
w.Write(LEVEL_0, "Completions:\t<unset>\n")
}
if job.Status.StartTime != nil {
w.Write(LEVEL_0, "Start Time:\t%s\n", job.Status.StartTime.Time.Format(time.RFC1123Z))
}
if job.Status.CompletionTime != nil {
w.Write(LEVEL_0, "Completed At:\t%s\n", job.Status.CompletionTime.Time.Format(time.RFC1123Z))
}
if job.Status.StartTime != nil && job.Status.CompletionTime != nil {
w.Write(LEVEL_0, "Duration:\t%s\n", duration.HumanDuration(job.Status.CompletionTime.Sub(job.Status.StartTime.Time)))
}
if job.Spec.ActiveDeadlineSeconds != nil {
w.Write(LEVEL_0, "Active Deadline Seconds:\t%ds\n", *job.Spec.ActiveDeadlineSeconds)
}
w.Write(LEVEL_0, "Pods Statuses:\t%d Running / %d Succeeded / %d Failed\n", job.Status.Active, job.Status.Succeeded, job.Status.Failed)
DescribePodTemplate(&job.Spec.Template, w)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// CronJobDescriber generates information about a cron job and the jobs it has created.
type CronJobDescriber struct {
client clientset.Interface
}
func (d *CronJobDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
cronJob, err := d.client.BatchV1beta1().CronJobs(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, cronJob)
}
return describeCronJob(cronJob, events)
}
func describeCronJob(cronJob *batchv1beta1.CronJob, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", cronJob.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", cronJob.Namespace)
printLabelsMultiline(w, "Labels", cronJob.Labels)
printAnnotationsMultiline(w, "Annotations", cronJob.Annotations)
w.Write(LEVEL_0, "Schedule:\t%s\n", cronJob.Spec.Schedule)
w.Write(LEVEL_0, "Concurrency Policy:\t%s\n", cronJob.Spec.ConcurrencyPolicy)
w.Write(LEVEL_0, "Suspend:\t%s\n", printBoolPtr(cronJob.Spec.Suspend))
if cronJob.Spec.SuccessfulJobsHistoryLimit != nil {
w.Write(LEVEL_0, "Successful Job History Limit:\t%d\n", *cronJob.Spec.SuccessfulJobsHistoryLimit)
} else {
w.Write(LEVEL_0, "Successful Job History Limit:\t<unset>\n")
}
if cronJob.Spec.FailedJobsHistoryLimit != nil {
w.Write(LEVEL_0, "Failed Job History Limit:\t%d\n", *cronJob.Spec.FailedJobsHistoryLimit)
} else {
w.Write(LEVEL_0, "Failed Job History Limit:\t<unset>\n")
}
if cronJob.Spec.StartingDeadlineSeconds != nil {
w.Write(LEVEL_0, "Starting Deadline Seconds:\t%ds\n", *cronJob.Spec.StartingDeadlineSeconds)
} else {
w.Write(LEVEL_0, "Starting Deadline Seconds:\t<unset>\n")
}
describeJobTemplate(cronJob.Spec.JobTemplate, w)
if cronJob.Status.LastScheduleTime != nil {
w.Write(LEVEL_0, "Last Schedule Time:\t%s\n", cronJob.Status.LastScheduleTime.Time.Format(time.RFC1123Z))
} else {
w.Write(LEVEL_0, "Last Schedule Time:\t<unset>\n")
}
printActiveJobs(w, "Active Jobs", cronJob.Status.Active)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func describeJobTemplate(jobTemplate batchv1beta1.JobTemplateSpec, w PrefixWriter) {
if jobTemplate.Spec.Selector != nil {
if selector, err := metav1.LabelSelectorAsSelector(jobTemplate.Spec.Selector); err == nil {
w.Write(LEVEL_0, "Selector:\t%s\n", selector)
} else {
w.Write(LEVEL_0, "Selector:\tFailed to get selector: %s\n", err)
}
} else {
w.Write(LEVEL_0, "Selector:\t<unset>\n")
}
if jobTemplate.Spec.Parallelism != nil {
w.Write(LEVEL_0, "Parallelism:\t%d\n", *jobTemplate.Spec.Parallelism)
} else {
w.Write(LEVEL_0, "Parallelism:\t<unset>\n")
}
if jobTemplate.Spec.Completions != nil {
w.Write(LEVEL_0, "Completions:\t%d\n", *jobTemplate.Spec.Completions)
} else {
w.Write(LEVEL_0, "Completions:\t<unset>\n")
}
if jobTemplate.Spec.ActiveDeadlineSeconds != nil {
w.Write(LEVEL_0, "Active Deadline Seconds:\t%ds\n", *jobTemplate.Spec.ActiveDeadlineSeconds)
}
DescribePodTemplate(&jobTemplate.Spec.Template, w)
}
func printActiveJobs(w PrefixWriter, title string, jobs []corev1.ObjectReference) {
w.Write(LEVEL_0, "%s:\t", title)
if len(jobs) == 0 {
w.WriteLine("<none>")
return
}
for i, job := range jobs {
if i != 0 {
w.Write(LEVEL_0, ", ")
}
w.Write(LEVEL_0, "%s", job.Name)
}
w.WriteLine("")
}
// DaemonSetDescriber generates information about a daemon set and the pods it has created.
type DaemonSetDescriber struct {
clientset.Interface
}
func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
dc := d.AppsV1().DaemonSets(namespace)
pc := d.CoreV1().Pods(namespace)
daemon, err := dc.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
selector, err := metav1.LabelSelectorAsSelector(daemon.Spec.Selector)
if err != nil {
return "", err
}
running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, daemon.UID)
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, daemon)
}
return describeDaemonSet(daemon, events, running, waiting, succeeded, failed)
}
func describeDaemonSet(daemon *appsv1.DaemonSet, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", daemon.Name)
selector, err := metav1.LabelSelectorAsSelector(daemon.Spec.Selector)
if err != nil {
// this shouldn't happen if LabelSelector passed validation
return err
}
w.Write(LEVEL_0, "Selector:\t%s\n", selector)
w.Write(LEVEL_0, "Node-Selector:\t%s\n", labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector))
printLabelsMultiline(w, "Labels", daemon.Labels)
printAnnotationsMultiline(w, "Annotations", daemon.Annotations)
w.Write(LEVEL_0, "Desired Number of Nodes Scheduled: %d\n", daemon.Status.DesiredNumberScheduled)
w.Write(LEVEL_0, "Current Number of Nodes Scheduled: %d\n", daemon.Status.CurrentNumberScheduled)
w.Write(LEVEL_0, "Number of Nodes Scheduled with Up-to-date Pods: %d\n", daemon.Status.UpdatedNumberScheduled)
w.Write(LEVEL_0, "Number of Nodes Scheduled with Available Pods: %d\n", daemon.Status.NumberAvailable)
w.Write(LEVEL_0, "Number of Nodes Misscheduled: %d\n", daemon.Status.NumberMisscheduled)
w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed)
DescribePodTemplate(&daemon.Spec.Template, w)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// SecretDescriber generates information about a secret
type SecretDescriber struct {
clientset.Interface
}
func (d *SecretDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.CoreV1().Secrets(namespace)
secret, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return describeSecret(secret)
}
func describeSecret(secret *corev1.Secret) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", secret.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", secret.Namespace)
printLabelsMultiline(w, "Labels", secret.Labels)
skipAnnotations := sets.NewString(corev1.LastAppliedConfigAnnotation)
printAnnotationsMultilineWithFilter(w, "Annotations", secret.Annotations, skipAnnotations)
w.Write(LEVEL_0, "\nType:\t%s\n", secret.Type)
w.Write(LEVEL_0, "\nData\n====\n")
for k, v := range secret.Data {
switch {
case k == corev1.ServiceAccountTokenKey && secret.Type == corev1.SecretTypeServiceAccountToken:
w.Write(LEVEL_0, "%s:\t%s\n", k, string(v))
default:
w.Write(LEVEL_0, "%s:\t%d bytes\n", k, len(v))
}
}
return nil
})
}
type IngressDescriber struct {
clientset.Interface
}
func (i *IngressDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := i.NetworkingV1beta1().Ingresses(namespace)
ing, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return i.describeIngress(ing, describerSettings)
}
func (i *IngressDescriber) describeBackend(ns string, backend *networkingv1beta1.IngressBackend) string {
endpoints, _ := i.CoreV1().Endpoints(ns).Get(context.TODO(), backend.ServiceName, metav1.GetOptions{})
service, _ := i.CoreV1().Services(ns).Get(context.TODO(), backend.ServiceName, metav1.GetOptions{})
spName := ""
for i := range service.Spec.Ports {
sp := &service.Spec.Ports[i]
switch backend.ServicePort.Type {
case intstr.String:
if backend.ServicePort.StrVal == sp.Name {
spName = sp.Name
}
case intstr.Int:
if int32(backend.ServicePort.IntVal) == sp.Port {
spName = sp.Name
}
}
}
return formatEndpoints(endpoints, sets.NewString(spName))
}
func (i *IngressDescriber) describeIngress(ing *networkingv1beta1.Ingress, describerSettings describe.DescriberSettings) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%v\n", ing.Name)
w.Write(LEVEL_0, "Namespace:\t%v\n", ing.Namespace)
w.Write(LEVEL_0, "Address:\t%v\n", loadBalancerStatusStringer(ing.Status.LoadBalancer, true))
def := ing.Spec.Backend
ns := ing.Namespace
if def == nil {
// Ingresses that don't specify a default backend inherit the
// default backend in the kube-system namespace.
def = &networkingv1beta1.IngressBackend{
ServiceName: "default-http-backend",
ServicePort: intstr.IntOrString{Type: intstr.Int, IntVal: 80},
}
ns = metav1.NamespaceSystem
}
w.Write(LEVEL_0, "Default backend:\t%s (%s)\n", backendStringer(def), i.describeBackend(ns, def))
if len(ing.Spec.TLS) != 0 {
describeIngressTLS(w, ing.Spec.TLS)
}
w.Write(LEVEL_0, "Rules:\n Host\tPath\tBackends\n")
w.Write(LEVEL_1, "----\t----\t--------\n")
count := 0
for _, rules := range ing.Spec.Rules {
if rules.HTTP == nil {
continue
}
count++
host := rules.Host
if len(host) == 0 {
host = "*"
}
w.Write(LEVEL_1, "%s\t\n", host)
for _, path := range rules.HTTP.Paths {
w.Write(LEVEL_2, "\t%s \t%s (%s)\n", path.Path, backendStringer(&path.Backend), i.describeBackend(ing.Namespace, &path.Backend))
}
}
if count == 0 {
w.Write(LEVEL_1, "%s\t%s \t%s (%s)\n", "*", "*", backendStringer(def), i.describeBackend(ns, def))
}
printAnnotationsMultiline(w, "Annotations", ing.Annotations)
if describerSettings.ShowEvents {
events, _ := i.CoreV1().Events(ing.Namespace).Search(scheme.Scheme, ing)
if events != nil {
DescribeEvents(events, w)
}
}
return nil
})
}
func describeIngressTLS(w PrefixWriter, ingTLS []networkingv1beta1.IngressTLS) {
w.Write(LEVEL_0, "TLS:\n")
for _, t := range ingTLS {
if t.SecretName == "" {
w.Write(LEVEL_1, "SNI routes %v\n", strings.Join(t.Hosts, ","))
} else {
w.Write(LEVEL_1, "%v terminates %v\n", t.SecretName, strings.Join(t.Hosts, ","))
}
}
}
// ServiceDescriber generates information about a service.
type ServiceDescriber struct {
clientset.Interface
}
func (d *ServiceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.CoreV1().Services(namespace)
service, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
endpoints, _ := d.CoreV1().Endpoints(namespace).Get(context.TODO(), name, metav1.GetOptions{})
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, service)
}
return describeService(service, endpoints, events)
}
func buildIngressString(ingress []corev1.LoadBalancerIngress) string {
var buffer bytes.Buffer
for i := range ingress {
if i != 0 {
buffer.WriteString(", ")
}
if ingress[i].IP != "" {
buffer.WriteString(ingress[i].IP)
} else {
buffer.WriteString(ingress[i].Hostname)
}
}
return buffer.String()
}
func describeService(service *corev1.Service, endpoints *corev1.Endpoints, events *corev1.EventList) (string, error) {
if endpoints == nil {
endpoints = &corev1.Endpoints{}
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", service.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", service.Namespace)
printLabelsMultiline(w, "Labels", service.Labels)
printAnnotationsMultiline(w, "Annotations", service.Annotations)
w.Write(LEVEL_0, "Selector:\t%s\n", labels.FormatLabels(service.Spec.Selector))
w.Write(LEVEL_0, "Type:\t%s\n", service.Spec.Type)
w.Write(LEVEL_0, "IP:\t%s\n", service.Spec.ClusterIP)
if service.Spec.IPFamily != nil {
w.Write(LEVEL_0, "IPFamily:\t%s\n", *(service.Spec.IPFamily))
}
if len(service.Spec.ExternalIPs) > 0 {
w.Write(LEVEL_0, "External IPs:\t%v\n", strings.Join(service.Spec.ExternalIPs, ","))
}
if service.Spec.LoadBalancerIP != "" {
w.Write(LEVEL_0, "IP:\t%s\n", service.Spec.LoadBalancerIP)
}
if service.Spec.ExternalName != "" {
w.Write(LEVEL_0, "External Name:\t%s\n", service.Spec.ExternalName)
}
if len(service.Status.LoadBalancer.Ingress) > 0 {
list := buildIngressString(service.Status.LoadBalancer.Ingress)
w.Write(LEVEL_0, "LoadBalancer Ingress:\t%s\n", list)
}
for i := range service.Spec.Ports {
sp := &service.Spec.Ports[i]
name := sp.Name
if name == "" {
name = "<unset>"
}
w.Write(LEVEL_0, "Port:\t%s\t%d/%s\n", name, sp.Port, sp.Protocol)
if sp.TargetPort.Type == intstr.Type(intstr.Int) {
w.Write(LEVEL_0, "TargetPort:\t%d/%s\n", sp.TargetPort.IntVal, sp.Protocol)
} else {
w.Write(LEVEL_0, "TargetPort:\t%s/%s\n", sp.TargetPort.StrVal, sp.Protocol)
}
if sp.NodePort != 0 {
w.Write(LEVEL_0, "NodePort:\t%s\t%d/%s\n", name, sp.NodePort, sp.Protocol)
}
w.Write(LEVEL_0, "Endpoints:\t%s\n", formatEndpoints(endpoints, sets.NewString(sp.Name)))
}
w.Write(LEVEL_0, "Session Affinity:\t%s\n", service.Spec.SessionAffinity)
if service.Spec.ExternalTrafficPolicy != "" {
w.Write(LEVEL_0, "External Traffic Policy:\t%s\n", service.Spec.ExternalTrafficPolicy)
}
if service.Spec.HealthCheckNodePort != 0 {
w.Write(LEVEL_0, "HealthCheck NodePort:\t%d\n", service.Spec.HealthCheckNodePort)
}
if len(service.Spec.LoadBalancerSourceRanges) > 0 {
w.Write(LEVEL_0, "LoadBalancer Source Ranges:\t%v\n", strings.Join(service.Spec.LoadBalancerSourceRanges, ","))
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// EndpointsDescriber generates information about an Endpoint.
type EndpointsDescriber struct {
clientset.Interface
}
func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.CoreV1().Endpoints(namespace)
ep, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, ep)
}
return describeEndpoints(ep, events)
}
func describeEndpoints(ep *corev1.Endpoints, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", ep.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", ep.Namespace)
printLabelsMultiline(w, "Labels", ep.Labels)
printAnnotationsMultiline(w, "Annotations", ep.Annotations)
w.Write(LEVEL_0, "Subsets:\n")
for i := range ep.Subsets {
subset := &ep.Subsets[i]
addresses := make([]string, 0, len(subset.Addresses))
for _, addr := range subset.Addresses {
addresses = append(addresses, addr.IP)
}
addressesString := strings.Join(addresses, ",")
if len(addressesString) == 0 {
addressesString = "<none>"
}
w.Write(LEVEL_1, "Addresses:\t%s\n", addressesString)
notReadyAddresses := make([]string, 0, len(subset.NotReadyAddresses))
for _, addr := range subset.NotReadyAddresses {
notReadyAddresses = append(notReadyAddresses, addr.IP)
}
notReadyAddressesString := strings.Join(notReadyAddresses, ",")
if len(notReadyAddressesString) == 0 {
notReadyAddressesString = "<none>"
}
w.Write(LEVEL_1, "NotReadyAddresses:\t%s\n", notReadyAddressesString)
if len(subset.Ports) > 0 {
w.Write(LEVEL_1, "Ports:\n")
w.Write(LEVEL_2, "Name\tPort\tProtocol\n")
w.Write(LEVEL_2, "----\t----\t--------\n")
for _, port := range subset.Ports {
name := port.Name
if len(name) == 0 {
name = "<unset>"
}
w.Write(LEVEL_2, "%s\t%d\t%s\n", name, port.Port, port.Protocol)
}
}
w.Write(LEVEL_0, "\n")
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// EndpointSliceDescriber generates information about an EndpointSlice.
type EndpointSliceDescriber struct {
clientset.Interface
}
func (d *EndpointSliceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.DiscoveryV1beta1().EndpointSlices(namespace)
eps, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, eps)
}
return describeEndpointSlice(eps, events)
}
func describeEndpointSlice(eps *discoveryv1beta1.EndpointSlice, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", eps.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", eps.Namespace)
printLabelsMultiline(w, "Labels", eps.Labels)
printAnnotationsMultiline(w, "Annotations", eps.Annotations)
w.Write(LEVEL_0, "AddressType:\t%s\n", string(eps.AddressType))
if len(eps.Ports) == 0 {
w.Write(LEVEL_0, "Ports: <unset>\n")
} else {
w.Write(LEVEL_0, "Ports:\n")
w.Write(LEVEL_1, "Name\tPort\tProtocol\n")
w.Write(LEVEL_1, "----\t----\t--------\n")
for _, port := range eps.Ports {
portName := "<unset>"
if port.Name != nil && len(*port.Name) > 0 {
portName = *port.Name
}
portNum := "<unset>"
if port.Port != nil {
portNum = strconv.Itoa(int(*port.Port))
}
w.Write(LEVEL_1, "%s\t%s\t%s\n", portName, portNum, *port.Protocol)
}
}
if len(eps.Endpoints) == 0 {
w.Write(LEVEL_0, "Endpoints: <none>\n")
} else {
w.Write(LEVEL_0, "Endpoints:\n")
for i := range eps.Endpoints {
endpoint := &eps.Endpoints[i]
addressesString := strings.Join(endpoint.Addresses, ",")
if len(addressesString) == 0 {
addressesString = "<none>"
}
w.Write(LEVEL_1, "- Addresses:\t%s\n", addressesString)
w.Write(LEVEL_2, "Conditions:\n")
readyText := "<unset>"
if endpoint.Conditions.Ready != nil {
readyText = strconv.FormatBool(*endpoint.Conditions.Ready)
}
w.Write(LEVEL_3, "Ready:\t%s\n", readyText)
hostnameText := "<unset>"
if endpoint.Hostname != nil {
hostnameText = *endpoint.Hostname
}
w.Write(LEVEL_2, "Hostname:\t%s\n", hostnameText)
if endpoint.TargetRef != nil {
w.Write(LEVEL_2, "TargetRef:\t%s/%s\n", endpoint.TargetRef.Kind, endpoint.TargetRef.Name)
}
printLabelsMultilineWithIndent(w, " ", "Topology", "\t", endpoint.Topology, sets.NewString())
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// ServiceAccountDescriber generates information about a service.
type ServiceAccountDescriber struct {
clientset.Interface
}
func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.CoreV1().ServiceAccounts(namespace)
serviceAccount, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
tokens := []corev1.Secret{}
// missingSecrets is the set of all secrets present in the
// serviceAccount but not present in the set of existing secrets.
missingSecrets := sets.NewString()
secrets, err := d.CoreV1().Secrets(namespace).List(context.TODO(), metav1.ListOptions{})
// errors are tolerated here in order to describe the serviceAccount with all
// of the secrets that it references, even if those secrets cannot be fetched.
if err == nil {
// existingSecrets is the set of all secrets remaining on a
// service account that are not present in the "tokens" slice.
existingSecrets := sets.NewString()
for _, s := range secrets.Items {
if s.Type == corev1.SecretTypeServiceAccountToken {
name := s.Annotations[corev1.ServiceAccountNameKey]
uid := s.Annotations[corev1.ServiceAccountUIDKey]
if name == serviceAccount.Name && uid == string(serviceAccount.UID) {
tokens = append(tokens, s)
}
}
existingSecrets.Insert(s.Name)
}
for _, s := range serviceAccount.Secrets {
if !existingSecrets.Has(s.Name) {
missingSecrets.Insert(s.Name)
}
}
for _, s := range serviceAccount.ImagePullSecrets {
if !existingSecrets.Has(s.Name) {
missingSecrets.Insert(s.Name)
}
}
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, serviceAccount)
}
return describeServiceAccount(serviceAccount, tokens, missingSecrets, events)
}
func describeServiceAccount(serviceAccount *corev1.ServiceAccount, tokens []corev1.Secret, missingSecrets sets.String, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", serviceAccount.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", serviceAccount.Namespace)
printLabelsMultiline(w, "Labels", serviceAccount.Labels)
printAnnotationsMultiline(w, "Annotations", serviceAccount.Annotations)
var (
emptyHeader = " "
pullHeader = "Image pull secrets:"
mountHeader = "Mountable secrets: "
tokenHeader = "Tokens: "
pullSecretNames = []string{}
mountSecretNames = []string{}
tokenSecretNames = []string{}
)
for _, s := range serviceAccount.ImagePullSecrets {
pullSecretNames = append(pullSecretNames, s.Name)
}
for _, s := range serviceAccount.Secrets {
mountSecretNames = append(mountSecretNames, s.Name)
}
for _, s := range tokens {
tokenSecretNames = append(tokenSecretNames, s.Name)
}
types := map[string][]string{
pullHeader: pullSecretNames,
mountHeader: mountSecretNames,
tokenHeader: tokenSecretNames,
}
for _, header := range sets.StringKeySet(types).List() {
names := types[header]
if len(names) == 0 {
w.Write(LEVEL_0, "%s\t<none>\n", header)
} else {
prefix := header
for _, name := range names {
if missingSecrets.Has(name) {
w.Write(LEVEL_0, "%s\t%s (not found)\n", prefix, name)
} else {
w.Write(LEVEL_0, "%s\t%s\n", prefix, name)
}
prefix = emptyHeader
}
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// RoleDescriber generates information about a node.
type RoleDescriber struct {
clientset.Interface
}
func (d *RoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
role, err := d.RbacV1().Roles(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
breakdownRules := []rbacv1.PolicyRule{}
for _, rule := range role.Rules {
breakdownRules = append(breakdownRules, rbac.BreakdownRule(rule)...)
}
compactRules, err := rbac.CompactRules(breakdownRules)
if err != nil {
return "", err
}
sort.Stable(rbac.SortableRuleSlice(compactRules))
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", role.Name)
printLabelsMultiline(w, "Labels", role.Labels)
printAnnotationsMultiline(w, "Annotations", role.Annotations)
w.Write(LEVEL_0, "PolicyRule:\n")
w.Write(LEVEL_1, "Resources\tNon-Resource URLs\tResource Names\tVerbs\n")
w.Write(LEVEL_1, "---------\t-----------------\t--------------\t-----\n")
for _, r := range compactRules {
w.Write(LEVEL_1, "%s\t%v\t%v\t%v\n", CombineResourceGroup(r.Resources, r.APIGroups), r.NonResourceURLs, r.ResourceNames, r.Verbs)
}
return nil
})
}
// ClusterRoleDescriber generates information about a node.
type ClusterRoleDescriber struct {
clientset.Interface
}
func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
role, err := d.RbacV1().ClusterRoles().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
breakdownRules := []rbacv1.PolicyRule{}
for _, rule := range role.Rules {
breakdownRules = append(breakdownRules, rbac.BreakdownRule(rule)...)
}
compactRules, err := rbac.CompactRules(breakdownRules)
if err != nil {
return "", err
}
sort.Stable(rbac.SortableRuleSlice(compactRules))
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", role.Name)
printLabelsMultiline(w, "Labels", role.Labels)
printAnnotationsMultiline(w, "Annotations", role.Annotations)
w.Write(LEVEL_0, "PolicyRule:\n")
w.Write(LEVEL_1, "Resources\tNon-Resource URLs\tResource Names\tVerbs\n")
w.Write(LEVEL_1, "---------\t-----------------\t--------------\t-----\n")
for _, r := range compactRules {
w.Write(LEVEL_1, "%s\t%v\t%v\t%v\n", CombineResourceGroup(r.Resources, r.APIGroups), r.NonResourceURLs, r.ResourceNames, r.Verbs)
}
return nil
})
}
func CombineResourceGroup(resource, group []string) string {
if len(resource) == 0 {
return ""
}
parts := strings.SplitN(resource[0], "/", 2)
combine := parts[0]
if len(group) > 0 && group[0] != "" {
combine = combine + "." + group[0]
}
if len(parts) == 2 {
combine = combine + "/" + parts[1]
}
return combine
}
// RoleBindingDescriber generates information about a node.
type RoleBindingDescriber struct {
clientset.Interface
}
func (d *RoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
binding, err := d.RbacV1().RoleBindings(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", binding.Name)
printLabelsMultiline(w, "Labels", binding.Labels)
printAnnotationsMultiline(w, "Annotations", binding.Annotations)
w.Write(LEVEL_0, "Role:\n")
w.Write(LEVEL_1, "Kind:\t%s\n", binding.RoleRef.Kind)
w.Write(LEVEL_1, "Name:\t%s\n", binding.RoleRef.Name)
w.Write(LEVEL_0, "Subjects:\n")
w.Write(LEVEL_1, "Kind\tName\tNamespace\n")
w.Write(LEVEL_1, "----\t----\t---------\n")
for _, s := range binding.Subjects {
w.Write(LEVEL_1, "%s\t%s\t%s\n", s.Kind, s.Name, s.Namespace)
}
return nil
})
}
// ClusterRoleBindingDescriber generates information about a node.
type ClusterRoleBindingDescriber struct {
clientset.Interface
}
func (d *ClusterRoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
binding, err := d.RbacV1().ClusterRoleBindings().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", binding.Name)
printLabelsMultiline(w, "Labels", binding.Labels)
printAnnotationsMultiline(w, "Annotations", binding.Annotations)
w.Write(LEVEL_0, "Role:\n")
w.Write(LEVEL_1, "Kind:\t%s\n", binding.RoleRef.Kind)
w.Write(LEVEL_1, "Name:\t%s\n", binding.RoleRef.Name)
w.Write(LEVEL_0, "Subjects:\n")
w.Write(LEVEL_1, "Kind\tName\tNamespace\n")
w.Write(LEVEL_1, "----\t----\t---------\n")
for _, s := range binding.Subjects {
w.Write(LEVEL_1, "%s\t%s\t%s\n", s.Kind, s.Name, s.Namespace)
}
return nil
})
}
// NodeDescriber generates information about a node.
type NodeDescriber struct {
clientset.Interface
}
func (d *NodeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
mc := d.CoreV1().Nodes()
node, err := mc.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
lease, err := d.CoordinationV1().Leases(corev1.NamespaceNodeLease).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
return "", err
}
// Corresponding Lease object doesn't exist - print it accordingly.
lease = nil
}
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + name + ",status.phase!=" + string(corev1.PodSucceeded) + ",status.phase!=" + string(corev1.PodFailed))
if err != nil {
return "", err
}
// in a policy aware setting, users may have access to a node, but not all pods
// in that case, we note that the user does not have access to the pods
canViewPods := true
nodeNonTerminatedPodsList, err := d.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{FieldSelector: fieldSelector.String()})
if err != nil {
if !errors.IsForbidden(err) {
return "", err
}
canViewPods = false
}
var events *corev1.EventList
if describerSettings.ShowEvents {
if ref, err := reference.GetReference(scheme.Scheme, node); err != nil {
klog.Errorf("Unable to construct reference to '%#v': %v", node, err)
} else {
// TODO: We haven't decided the namespace for Node object yet.
ref.UID = types.UID(ref.Name)
events, _ = d.CoreV1().Events("").Search(scheme.Scheme, ref)
}
}
return describeNode(node, lease, nodeNonTerminatedPodsList, events, canViewPods)
}
func describeNode(node *corev1.Node, lease *coordinationv1.Lease, nodeNonTerminatedPodsList *corev1.PodList, events *corev1.EventList, canViewPods bool) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", node.Name)
if roles := findNodeRoles(node); len(roles) > 0 {
w.Write(LEVEL_0, "Roles:\t%s\n", strings.Join(roles, ","))
} else {
w.Write(LEVEL_0, "Roles:\t%s\n", "<none>")
}
printLabelsMultiline(w, "Labels", node.Labels)
printAnnotationsMultiline(w, "Annotations", node.Annotations)
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", node.CreationTimestamp.Time.Format(time.RFC1123Z))
printNodeTaintsMultiline(w, "Taints", node.Spec.Taints)
w.Write(LEVEL_0, "Unschedulable:\t%v\n", node.Spec.Unschedulable)
w.Write(LEVEL_0, "Lease:\n")
holderIdentity := "<unset>"
if lease != nil && lease.Spec.HolderIdentity != nil {
holderIdentity = *lease.Spec.HolderIdentity
}
w.Write(LEVEL_1, "HolderIdentity:\t%s\n", holderIdentity)
acquireTime := "<unset>"
if lease != nil && lease.Spec.AcquireTime != nil {
acquireTime = lease.Spec.AcquireTime.Time.Format(time.RFC1123Z)
}
w.Write(LEVEL_1, "AcquireTime:\t%s\n", acquireTime)
renewTime := "<unset>"
if lease != nil && lease.Spec.RenewTime != nil {
renewTime = lease.Spec.RenewTime.Time.Format(time.RFC1123Z)
}
w.Write(LEVEL_1, "RenewTime:\t%s\n", renewTime)
if len(node.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tLastHeartbeatTime\tLastTransitionTime\tReason\tMessage\n")
w.Write(LEVEL_1, "----\t------\t-----------------\t------------------\t------\t-------\n")
for _, c := range node.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v \t%s \t%s \t%v \t%v\n",
c.Type,
c.Status,
c.LastHeartbeatTime.Time.Format(time.RFC1123Z),
c.LastTransitionTime.Time.Format(time.RFC1123Z),
c.Reason,
c.Message)
}
}
w.Write(LEVEL_0, "Addresses:\n")
for _, address := range node.Status.Addresses {
w.Write(LEVEL_1, "%s:\t%s\n", address.Type, address.Address)
}
printResourceList := func(resourceList corev1.ResourceList) {
resources := make([]corev1.ResourceName, 0, len(resourceList))
for resource := range resourceList {
resources = append(resources, resource)
}
sort.Sort(SortableResourceNames(resources))
for _, resource := range resources {
value := resourceList[resource]
w.Write(LEVEL_0, " %s:\t%s\n", resource, value.String())
}
}
if len(node.Status.Capacity) > 0 {
w.Write(LEVEL_0, "Capacity:\n")
printResourceList(node.Status.Capacity)
}
if len(node.Status.Allocatable) > 0 {
w.Write(LEVEL_0, "Allocatable:\n")
printResourceList(node.Status.Allocatable)
}
w.Write(LEVEL_0, "System Info:\n")
w.Write(LEVEL_0, " Machine ID:\t%s\n", node.Status.NodeInfo.MachineID)
w.Write(LEVEL_0, " System UUID:\t%s\n", node.Status.NodeInfo.SystemUUID)
w.Write(LEVEL_0, " Boot ID:\t%s\n", node.Status.NodeInfo.BootID)
w.Write(LEVEL_0, " Kernel Version:\t%s\n", node.Status.NodeInfo.KernelVersion)
w.Write(LEVEL_0, " OS Image:\t%s\n", node.Status.NodeInfo.OSImage)
w.Write(LEVEL_0, " Operating System:\t%s\n", node.Status.NodeInfo.OperatingSystem)
w.Write(LEVEL_0, " Architecture:\t%s\n", node.Status.NodeInfo.Architecture)
w.Write(LEVEL_0, " Container Runtime Version:\t%s\n", node.Status.NodeInfo.ContainerRuntimeVersion)
w.Write(LEVEL_0, " Kubelet Version:\t%s\n", node.Status.NodeInfo.KubeletVersion)
w.Write(LEVEL_0, " Kube-Proxy Version:\t%s\n", node.Status.NodeInfo.KubeProxyVersion)
// remove when .PodCIDR is depreciated
if len(node.Spec.PodCIDR) > 0 {
w.Write(LEVEL_0, "PodCIDR:\t%s\n", node.Spec.PodCIDR)
}
if len(node.Spec.PodCIDRs) > 0 {
w.Write(LEVEL_0, "PodCIDRs:\t%s\n", strings.Join(node.Spec.PodCIDRs, ","))
}
if len(node.Spec.ProviderID) > 0 {
w.Write(LEVEL_0, "ProviderID:\t%s\n", node.Spec.ProviderID)
}
if canViewPods && nodeNonTerminatedPodsList != nil {
describeNodeResource(nodeNonTerminatedPodsList, node, w)
} else {
w.Write(LEVEL_0, "Pods:\tnot authorized\n")
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
type StatefulSetDescriber struct {
client clientset.Interface
}
func (p *StatefulSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
ps, err := p.client.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
pc := p.client.CoreV1().Pods(namespace)
selector, err := metav1.LabelSelectorAsSelector(ps.Spec.Selector)
if err != nil {
return "", err
}
running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, ps.UID)
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = p.client.CoreV1().Events(namespace).Search(scheme.Scheme, ps)
}
return describeStatefulSet(ps, selector, events, running, waiting, succeeded, failed)
}
func describeStatefulSet(ps *appsv1.StatefulSet, selector labels.Selector, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", ps.ObjectMeta.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", ps.ObjectMeta.Namespace)
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", ps.CreationTimestamp.Time.Format(time.RFC1123Z))
w.Write(LEVEL_0, "Selector:\t%s\n", selector)
printLabelsMultiline(w, "Labels", ps.Labels)
printAnnotationsMultiline(w, "Annotations", ps.Annotations)
w.Write(LEVEL_0, "Replicas:\t%d desired | %d total\n", *ps.Spec.Replicas, ps.Status.Replicas)
w.Write(LEVEL_0, "Update Strategy:\t%s\n", ps.Spec.UpdateStrategy.Type)
if ps.Spec.UpdateStrategy.RollingUpdate != nil {
ru := ps.Spec.UpdateStrategy.RollingUpdate
if ru.Partition != nil {
w.Write(LEVEL_1, "Partition:\t%d\n", *ru.Partition)
}
}
w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed)
DescribePodTemplate(&ps.Spec.Template, w)
describeVolumeClaimTemplates(ps.Spec.VolumeClaimTemplates, w)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
type CertificateSigningRequestDescriber struct {
client clientset.Interface
}
func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
csr, err := p.client.CertificatesV1beta1().CertificateSigningRequests().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
cr, err := certificate.ParseCSR(csr)
if err != nil {
return "", fmt.Errorf("Error parsing CSR: %v", err)
}
status, err := extractCSRStatus(csr)
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = p.client.CoreV1().Events(namespace).Search(scheme.Scheme, csr)
}
return describeCertificateSigningRequest(csr, cr, status, events)
}
func describeCertificateSigningRequest(csr *certificatesv1beta1.CertificateSigningRequest, cr *x509.CertificateRequest, status string, events *corev1.EventList) (string, error) {
printListHelper := func(w PrefixWriter, prefix, name string, values []string) {
if len(values) == 0 {
return
}
w.Write(LEVEL_0, prefix+name+":\t")
w.Write(LEVEL_0, strings.Join(values, "\n"+prefix+"\t"))
w.Write(LEVEL_0, "\n")
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", csr.Name)
w.Write(LEVEL_0, "Labels:\t%s\n", labels.FormatLabels(csr.Labels))
w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(csr.Annotations))
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", csr.CreationTimestamp.Time.Format(time.RFC1123Z))
w.Write(LEVEL_0, "Requesting User:\t%s\n", csr.Spec.Username)
if csr.Spec.SignerName != nil {
w.Write(LEVEL_0, "Signer:\t%s\n", *csr.Spec.SignerName)
}
w.Write(LEVEL_0, "Status:\t%s\n", status)
w.Write(LEVEL_0, "Subject:\n")
w.Write(LEVEL_0, "\tCommon Name:\t%s\n", cr.Subject.CommonName)
w.Write(LEVEL_0, "\tSerial Number:\t%s\n", cr.Subject.SerialNumber)
printListHelper(w, "\t", "Organization", cr.Subject.Organization)
printListHelper(w, "\t", "Organizational Unit", cr.Subject.OrganizationalUnit)
printListHelper(w, "\t", "Country", cr.Subject.Country)
printListHelper(w, "\t", "Locality", cr.Subject.Locality)
printListHelper(w, "\t", "Province", cr.Subject.Province)
printListHelper(w, "\t", "StreetAddress", cr.Subject.StreetAddress)
printListHelper(w, "\t", "PostalCode", cr.Subject.PostalCode)
if len(cr.DNSNames)+len(cr.EmailAddresses)+len(cr.IPAddresses)+len(cr.URIs) > 0 {
w.Write(LEVEL_0, "Subject Alternative Names:\n")
printListHelper(w, "\t", "DNS Names", cr.DNSNames)
printListHelper(w, "\t", "Email Addresses", cr.EmailAddresses)
var uris []string
for _, uri := range cr.URIs {
uris = append(uris, uri.String())
}
printListHelper(w, "\t", "URIs", uris)
var ipaddrs []string
for _, ipaddr := range cr.IPAddresses {
ipaddrs = append(ipaddrs, ipaddr.String())
}
printListHelper(w, "\t", "IP Addresses", ipaddrs)
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// HorizontalPodAutoscalerDescriber generates information about a horizontal pod autoscaler.
type HorizontalPodAutoscalerDescriber struct {
client clientset.Interface
}
func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
var events *corev1.EventList
// autoscaling/v2beta2 is introduced since v1.12 and autoscaling/v1 does not have full backward compatibility
// with autoscaling/v2beta2, so describer will try to get and describe hpa v2beta2 object firstly, if it fails,
// describer will fall back to do with hpa v1 object
hpaV2beta2, err := d.client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err == nil {
if describerSettings.ShowEvents {
events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, hpaV2beta2)
}
return describeHorizontalPodAutoscalerV2beta2(hpaV2beta2, events, d)
}
hpaV1, err := d.client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err == nil {
if describerSettings.ShowEvents {
events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, hpaV1)
}
return describeHorizontalPodAutoscalerV1(hpaV1, events, d)
}
return "", err
}
func describeHorizontalPodAutoscalerV2beta2(hpa *autoscalingv2beta2.HorizontalPodAutoscaler, events *corev1.EventList, d *HorizontalPodAutoscalerDescriber) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", hpa.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", hpa.Namespace)
printLabelsMultiline(w, "Labels", hpa.Labels)
printAnnotationsMultiline(w, "Annotations", hpa.Annotations)
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", hpa.CreationTimestamp.Time.Format(time.RFC1123Z))
w.Write(LEVEL_0, "Reference:\t%s/%s\n",
hpa.Spec.ScaleTargetRef.Kind,
hpa.Spec.ScaleTargetRef.Name)
w.Write(LEVEL_0, "Metrics:\t( current / target )\n")
for i, metric := range hpa.Spec.Metrics {
switch metric.Type {
case autoscalingv2beta2.ExternalMetricSourceType:
if metric.External.Target.AverageValue != nil {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].External != nil &&
hpa.Status.CurrentMetrics[i].External.Current.AverageValue != nil {
current = hpa.Status.CurrentMetrics[i].External.Current.AverageValue.String()
}
w.Write(LEVEL_1, "%q (target average value):\t%s / %s\n", metric.External.Metric.Name, current, metric.External.Target.AverageValue.String())
} else {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].External != nil {
current = hpa.Status.CurrentMetrics[i].External.Current.Value.String()
}
w.Write(LEVEL_1, "%q (target value):\t%s / %s\n", metric.External.Metric.Name, current, metric.External.Target.Value.String())
}
case autoscalingv2beta2.PodsMetricSourceType:
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Pods != nil {
current = hpa.Status.CurrentMetrics[i].Pods.Current.AverageValue.String()
}
w.Write(LEVEL_1, "%q on pods:\t%s / %s\n", metric.Pods.Metric.Name, current, metric.Pods.Target.AverageValue.String())
case autoscalingv2beta2.ObjectMetricSourceType:
w.Write(LEVEL_1, "\"%s\" on %s/%s ", metric.Object.Metric.Name, metric.Object.DescribedObject.Kind, metric.Object.DescribedObject.Name)
if metric.Object.Target.Type == autoscalingv2beta2.AverageValueMetricType {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Object != nil {
current = hpa.Status.CurrentMetrics[i].Object.Current.AverageValue.String()
}
w.Write(LEVEL_0, "(target average value):\t%s / %s\n", current, metric.Object.Target.AverageValue.String())
} else {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Object != nil {
current = hpa.Status.CurrentMetrics[i].Object.Current.Value.String()
}
w.Write(LEVEL_0, "(target value):\t%s / %s\n", current, metric.Object.Target.Value.String())
}
case autoscalingv2beta2.ResourceMetricSourceType:
w.Write(LEVEL_1, "resource %s on pods", string(metric.Resource.Name))
if metric.Resource.Target.AverageValue != nil {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Resource != nil {
current = hpa.Status.CurrentMetrics[i].Resource.Current.AverageValue.String()
}
w.Write(LEVEL_0, ":\t%s / %s\n", current, metric.Resource.Target.AverageValue.String())
} else {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Resource != nil && hpa.Status.CurrentMetrics[i].Resource.Current.AverageUtilization != nil {
current = fmt.Sprintf("%d%% (%s)", *hpa.Status.CurrentMetrics[i].Resource.Current.AverageUtilization, hpa.Status.CurrentMetrics[i].Resource.Current.AverageValue.String())
}
target := "<auto>"
if metric.Resource.Target.AverageUtilization != nil {
target = fmt.Sprintf("%d%%", *metric.Resource.Target.AverageUtilization)
}
w.Write(LEVEL_1, "(as a percentage of request):\t%s / %s\n", current, target)
}
default:
w.Write(LEVEL_1, "<unknown metric type %q>", string(metric.Type))
}
}
minReplicas := "<unset>"
if hpa.Spec.MinReplicas != nil {
minReplicas = fmt.Sprintf("%d", *hpa.Spec.MinReplicas)
}
w.Write(LEVEL_0, "Min replicas:\t%s\n", minReplicas)
w.Write(LEVEL_0, "Max replicas:\t%d\n", hpa.Spec.MaxReplicas)
// only print the hpa behavior if present
if hpa.Spec.Behavior != nil {
w.Write(LEVEL_0, "Behavior:\n")
printDirectionBehavior(w, "Scale Up", hpa.Spec.Behavior.ScaleUp)
printDirectionBehavior(w, "Scale Down", hpa.Spec.Behavior.ScaleDown)
}
w.Write(LEVEL_0, "%s pods:\t", hpa.Spec.ScaleTargetRef.Kind)
w.Write(LEVEL_0, "%d current / %d desired\n", hpa.Status.CurrentReplicas, hpa.Status.DesiredReplicas)
if len(hpa.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n")
w.Write(LEVEL_1, "Type\tStatus\tReason\tMessage\n")
w.Write(LEVEL_1, "----\t------\t------\t-------\n")
for _, c := range hpa.Status.Conditions {
w.Write(LEVEL_1, "%v\t%v\t%v\t%v\n", c.Type, c.Status, c.Reason, c.Message)
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func printDirectionBehavior(w PrefixWriter, direction string, rules *autoscalingv2beta2.HPAScalingRules) {
if rules != nil {
w.Write(LEVEL_1, "%s:\n", direction)
if rules.StabilizationWindowSeconds != nil {
w.Write(LEVEL_2, "Stabilization Window: %d seconds\n", *rules.StabilizationWindowSeconds)
}
if len(rules.Policies) > 0 {
if rules.SelectPolicy != nil {
w.Write(LEVEL_2, "Select Policy: %s\n", *rules.SelectPolicy)
} else {
w.Write(LEVEL_2, "Select Policy: %s\n", autoscalingv2beta2.MaxPolicySelect)
}
w.Write(LEVEL_2, "Policies:\n")
for _, p := range rules.Policies {
w.Write(LEVEL_3, "- Type: %s\tValue: %d\tPeriod: %d seconds\n", p.Type, p.Value, p.PeriodSeconds)
}
}
}
}
func describeHorizontalPodAutoscalerV1(hpa *autoscalingv1.HorizontalPodAutoscaler, events *corev1.EventList, d *HorizontalPodAutoscalerDescriber) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", hpa.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", hpa.Namespace)
printLabelsMultiline(w, "Labels", hpa.Labels)
printAnnotationsMultiline(w, "Annotations", hpa.Annotations)
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", hpa.CreationTimestamp.Time.Format(time.RFC1123Z))
w.Write(LEVEL_0, "Reference:\t%s/%s\n",
hpa.Spec.ScaleTargetRef.Kind,
hpa.Spec.ScaleTargetRef.Name)
if hpa.Spec.TargetCPUUtilizationPercentage != nil {
w.Write(LEVEL_0, "Target CPU utilization:\t%d%%\n", *hpa.Spec.TargetCPUUtilizationPercentage)
current := "<unknown>"
if hpa.Status.CurrentCPUUtilizationPercentage != nil {
current = fmt.Sprintf("%d", *hpa.Status.CurrentCPUUtilizationPercentage)
}
w.Write(LEVEL_0, "Current CPU utilization:\t%s%%\n", current)
}
minReplicas := "<unset>"
if hpa.Spec.MinReplicas != nil {
minReplicas = fmt.Sprintf("%d", *hpa.Spec.MinReplicas)
}
w.Write(LEVEL_0, "Min replicas:\t%s\n", minReplicas)
w.Write(LEVEL_0, "Max replicas:\t%d\n", hpa.Spec.MaxReplicas)
w.Write(LEVEL_0, "%s pods:\t", hpa.Spec.ScaleTargetRef.Kind)
w.Write(LEVEL_0, "%d current / %d desired\n", hpa.Status.CurrentReplicas, hpa.Status.DesiredReplicas)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func describeNodeResource(nodeNonTerminatedPodsList *corev1.PodList, node *corev1.Node, w PrefixWriter) {
w.Write(LEVEL_0, "Non-terminated Pods:\t(%d in total)\n", len(nodeNonTerminatedPodsList.Items))
w.Write(LEVEL_1, "Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\tAGE\n")
w.Write(LEVEL_1, "---------\t----\t\t------------\t----------\t---------------\t-------------\t---\n")
allocatable := node.Status.Capacity
if len(node.Status.Allocatable) > 0 {
allocatable = node.Status.Allocatable
}
for _, pod := range nodeNonTerminatedPodsList.Items {
req, limit := resourcehelper.PodRequestsAndLimits(&pod)
cpuReq, cpuLimit, memoryReq, memoryLimit := req[corev1.ResourceCPU], limit[corev1.ResourceCPU], req[corev1.ResourceMemory], limit[corev1.ResourceMemory]
fractionCpuReq := float64(cpuReq.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100
fractionCpuLimit := float64(cpuLimit.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100
fractionMemoryReq := float64(memoryReq.Value()) / float64(allocatable.Memory().Value()) * 100
fractionMemoryLimit := float64(memoryLimit.Value()) / float64(allocatable.Memory().Value()) * 100
w.Write(LEVEL_1, "%s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s\n", pod.Namespace, pod.Name,
cpuReq.String(), int64(fractionCpuReq), cpuLimit.String(), int64(fractionCpuLimit),
memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit), translateTimestampSince(pod.CreationTimestamp))
}
w.Write(LEVEL_0, "Allocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n")
w.Write(LEVEL_1, "Resource\tRequests\tLimits\n")
w.Write(LEVEL_1, "--------\t--------\t------\n")
reqs, limits := getPodsTotalRequestsAndLimits(nodeNonTerminatedPodsList)
cpuReqs, cpuLimits, memoryReqs, memoryLimits, ephemeralstorageReqs, ephemeralstorageLimits :=
reqs[corev1.ResourceCPU], limits[corev1.ResourceCPU], reqs[corev1.ResourceMemory], limits[corev1.ResourceMemory], reqs[corev1.ResourceEphemeralStorage], limits[corev1.ResourceEphemeralStorage]
fractionCpuReqs := float64(0)
fractionCpuLimits := float64(0)
if allocatable.Cpu().MilliValue() != 0 {
fractionCpuReqs = float64(cpuReqs.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100
fractionCpuLimits = float64(cpuLimits.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100
}
fractionMemoryReqs := float64(0)
fractionMemoryLimits := float64(0)
if allocatable.Memory().Value() != 0 {
fractionMemoryReqs = float64(memoryReqs.Value()) / float64(allocatable.Memory().Value()) * 100
fractionMemoryLimits = float64(memoryLimits.Value()) / float64(allocatable.Memory().Value()) * 100
}
fractionEphemeralStorageReqs := float64(0)
fractionEphemeralStorageLimits := float64(0)
if allocatable.StorageEphemeral().Value() != 0 {
fractionEphemeralStorageReqs = float64(ephemeralstorageReqs.Value()) / float64(allocatable.StorageEphemeral().Value()) * 100
fractionEphemeralStorageLimits = float64(ephemeralstorageLimits.Value()) / float64(allocatable.StorageEphemeral().Value()) * 100
}
w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n",
corev1.ResourceCPU, cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits))
w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n",
corev1.ResourceMemory, memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits))
w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n",
corev1.ResourceEphemeralStorage, ephemeralstorageReqs.String(), int64(fractionEphemeralStorageReqs), ephemeralstorageLimits.String(), int64(fractionEphemeralStorageLimits))
extResources := make([]string, 0, len(allocatable))
hugePageResources := make([]string, 0, len(allocatable))
for resource := range allocatable {
if resourcehelper.IsHugePageResourceName(resource) {
hugePageResources = append(hugePageResources, string(resource))
} else if !resourcehelper.IsStandardContainerResourceName(string(resource)) && resource != corev1.ResourcePods {
extResources = append(extResources, string(resource))
}
}
sort.Strings(extResources)
sort.Strings(hugePageResources)
for _, resource := range hugePageResources {
hugePageSizeRequests, hugePageSizeLimits, hugePageSizeAllocable := reqs[corev1.ResourceName(resource)], limits[corev1.ResourceName(resource)], allocatable[corev1.ResourceName(resource)]
fractionHugePageSizeRequests := float64(0)
fractionHugePageSizeLimits := float64(0)
if hugePageSizeAllocable.Value() != 0 {
fractionHugePageSizeRequests = float64(hugePageSizeRequests.Value()) / float64(hugePageSizeAllocable.Value()) * 100
fractionHugePageSizeLimits = float64(hugePageSizeLimits.Value()) / float64(hugePageSizeAllocable.Value()) * 100
}
w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n",
resource, hugePageSizeRequests.String(), int64(fractionHugePageSizeRequests), hugePageSizeLimits.String(), int64(fractionHugePageSizeLimits))
}
for _, ext := range extResources {
extRequests, extLimits := reqs[corev1.ResourceName(ext)], limits[corev1.ResourceName(ext)]
w.Write(LEVEL_1, "%s\t%s\t%s\n", ext, extRequests.String(), extLimits.String())
}
}
func getPodsTotalRequestsAndLimits(podList *corev1.PodList) (reqs map[corev1.ResourceName]resource.Quantity, limits map[corev1.ResourceName]resource.Quantity) {
reqs, limits = map[corev1.ResourceName]resource.Quantity{}, map[corev1.ResourceName]resource.Quantity{}
for _, pod := range podList.Items {
podReqs, podLimits := resourcehelper.PodRequestsAndLimits(&pod)
for podReqName, podReqValue := range podReqs {
if value, ok := reqs[podReqName]; !ok {
reqs[podReqName] = podReqValue.DeepCopy()
} else {
value.Add(podReqValue)
reqs[podReqName] = value
}
}
for podLimitName, podLimitValue := range podLimits {
if value, ok := limits[podLimitName]; !ok {
limits[podLimitName] = podLimitValue.DeepCopy()
} else {
value.Add(podLimitValue)
limits[podLimitName] = value
}
}
}
return
}
func DescribeEvents(el *corev1.EventList, w PrefixWriter) {
if len(el.Items) == 0 {
w.Write(LEVEL_0, "Events:\t<none>\n")
return
}
w.Flush()
sort.Sort(event.SortableEvents(el.Items))
w.Write(LEVEL_0, "Events:\n Type\tReason\tAge\tFrom\tMessage\n")
w.Write(LEVEL_1, "----\t------\t----\t----\t-------\n")
for _, e := range el.Items {
var interval string
if e.Count > 1 {
interval = fmt.Sprintf("%s (x%d over %s)", translateTimestampSince(e.LastTimestamp), e.Count, translateTimestampSince(e.FirstTimestamp))
} else {
interval = translateTimestampSince(e.FirstTimestamp)
}
w.Write(LEVEL_1, "%v\t%v\t%s\t%v\t%v\n",
e.Type,
e.Reason,
interval,
formatEventSource(e.Source),
strings.TrimSpace(e.Message),
)
}
}
// DeploymentDescriber generates information about a deployment.
type DeploymentDescriber struct {
client clientset.Interface
}
func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
d, err := dd.client.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = dd.client.CoreV1().Events(namespace).Search(scheme.Scheme, d)
}
return describeDeployment(d, selector, d, events, dd)
}
func describeDeployment(d *appsv1.Deployment, selector labels.Selector, internalDeployment *appsv1.Deployment, events *corev1.EventList, dd *DeploymentDescriber) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", d.ObjectMeta.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", d.ObjectMeta.Namespace)
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", d.CreationTimestamp.Time.Format(time.RFC1123Z))
printLabelsMultiline(w, "Labels", d.Labels)
printAnnotationsMultiline(w, "Annotations", d.Annotations)
w.Write(LEVEL_0, "Selector:\t%s\n", selector)
w.Write(LEVEL_0, "Replicas:\t%d desired | %d updated | %d total | %d available | %d unavailable\n", *(d.Spec.Replicas), d.Status.UpdatedReplicas, d.Status.Replicas, d.Status.AvailableReplicas, d.Status.UnavailableReplicas)
w.Write(LEVEL_0, "StrategyType:\t%s\n", d.Spec.Strategy.Type)
w.Write(LEVEL_0, "MinReadySeconds:\t%d\n", d.Spec.MinReadySeconds)
if d.Spec.Strategy.RollingUpdate != nil {
ru := d.Spec.Strategy.RollingUpdate
w.Write(LEVEL_0, "RollingUpdateStrategy:\t%s max unavailable, %s max surge\n", ru.MaxUnavailable.String(), ru.MaxSurge.String())
}
DescribePodTemplate(&internalDeployment.Spec.Template, w)
if len(d.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n")
w.Write(LEVEL_1, "----\t------\t------\n")
for _, c := range d.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason)
}
}
oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd.client.AppsV1())
if err == nil {
w.Write(LEVEL_0, "OldReplicaSets:\t%s\n", printReplicaSetsByLabels(oldRSs))
var newRSs []*appsv1.ReplicaSet
if newRS != nil {
newRSs = append(newRSs, newRS)
}
w.Write(LEVEL_0, "NewReplicaSet:\t%s\n", printReplicaSetsByLabels(newRSs))
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func printReplicaSetsByLabels(matchingRSs []*appsv1.ReplicaSet) string {
// Format the matching ReplicaSets into strings.
rsStrings := make([]string, 0, len(matchingRSs))
for _, rs := range matchingRSs {
rsStrings = append(rsStrings, fmt.Sprintf("%s (%d/%d replicas created)", rs.Name, rs.Status.Replicas, *rs.Spec.Replicas))
}
list := strings.Join(rsStrings, ", ")
if list == "" {
return "<none>"
}
return list
}
func getPodStatusForController(c corev1client.PodInterface, selector labels.Selector, uid types.UID) (running, waiting, succeeded, failed int, err error) {
options := metav1.ListOptions{LabelSelector: selector.String()}
rcPods, err := c.List(context.TODO(), options)
if err != nil {
return
}
for _, pod := range rcPods.Items {
controllerRef := metav1.GetControllerOf(&pod)
// Skip pods that are orphans or owned by other controllers.
if controllerRef == nil || controllerRef.UID != uid {
continue
}
switch pod.Status.Phase {
case corev1.PodRunning:
running++
case corev1.PodPending:
waiting++
case corev1.PodSucceeded:
succeeded++
case corev1.PodFailed:
failed++
}
}
return
}
// ConfigMapDescriber generates information about a ConfigMap
type ConfigMapDescriber struct {
clientset.Interface
}
func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.CoreV1().ConfigMaps(namespace)
configMap, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", configMap.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", configMap.Namespace)
printLabelsMultiline(w, "Labels", configMap.Labels)
printAnnotationsMultiline(w, "Annotations", configMap.Annotations)
w.Write(LEVEL_0, "\nData\n====\n")
for k, v := range configMap.Data {
w.Write(LEVEL_0, "%s:\n----\n", k)
w.Write(LEVEL_0, "%s\n", string(v))
}
if describerSettings.ShowEvents {
events, err := d.CoreV1().Events(namespace).Search(scheme.Scheme, configMap)
if err != nil {
return err
}
if events != nil {
DescribeEvents(events, w)
}
}
return nil
})
}
// NetworkPolicyDescriber generates information about a networkingv1.NetworkPolicy
type NetworkPolicyDescriber struct {
clientset.Interface
}
func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
c := d.NetworkingV1().NetworkPolicies(namespace)
networkPolicy, err := c.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return describeNetworkPolicy(networkPolicy)
}
func describeNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", networkPolicy.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", networkPolicy.Namespace)
w.Write(LEVEL_0, "Created on:\t%s\n", networkPolicy.CreationTimestamp)
printLabelsMultiline(w, "Labels", networkPolicy.Labels)
printAnnotationsMultiline(w, "Annotations", networkPolicy.Annotations)
describeNetworkPolicySpec(networkPolicy.Spec, w)
return nil
})
}
func describeNetworkPolicySpec(nps networkingv1.NetworkPolicySpec, w PrefixWriter) {
w.Write(LEVEL_0, "Spec:\n")
w.Write(LEVEL_1, "PodSelector: ")
if len(nps.PodSelector.MatchLabels) == 0 && len(nps.PodSelector.MatchExpressions) == 0 {
w.Write(LEVEL_2, "<none> (Allowing the specific traffic to all pods in this namespace)\n")
} else {
w.Write(LEVEL_2, "%s\n", metav1.FormatLabelSelector(&nps.PodSelector))
}
ingressEnabled, egressEnabled := getPolicyType(nps)
if ingressEnabled {
w.Write(LEVEL_1, "Allowing ingress traffic:\n")
printNetworkPolicySpecIngressFrom(nps.Ingress, " ", w)
} else {
w.Write(LEVEL_1, "Not affecting ingress traffic\n")
}
if egressEnabled {
w.Write(LEVEL_1, "Allowing egress traffic:\n")
printNetworkPolicySpecEgressTo(nps.Egress, " ", w)
} else {
w.Write(LEVEL_1, "Not affecting egress traffic\n")
}
w.Write(LEVEL_1, "Policy Types: %v\n", policyTypesToString(nps.PolicyTypes))
}
func getPolicyType(nps networkingv1.NetworkPolicySpec) (bool, bool) {
var ingress, egress bool
for _, pt := range nps.PolicyTypes {
switch pt {
case networkingv1.PolicyTypeIngress:
ingress = true
case networkingv1.PolicyTypeEgress:
egress = true
}
}
return ingress, egress
}
func printNetworkPolicySpecIngressFrom(npirs []networkingv1.NetworkPolicyIngressRule, initialIndent string, w PrefixWriter) {
if len(npirs) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "<none> (Selected pods are isolated for ingress connectivity)")
return
}
for i, npir := range npirs {
if len(npir.Ports) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: <any> (traffic allowed to all ports)")
} else {
for _, port := range npir.Ports {
var proto corev1.Protocol
if port.Protocol != nil {
proto = *port.Protocol
} else {
proto = corev1.ProtocolTCP
}
w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto)
}
}
if len(npir.From) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "From: <any> (traffic not restricted by source)")
} else {
for _, from := range npir.From {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "From:")
if from.PodSelector != nil && from.NamespaceSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(from.NamespaceSelector))
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(from.PodSelector))
} else if from.PodSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(from.PodSelector))
} else if from.NamespaceSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(from.NamespaceSelector))
} else if from.IPBlock != nil {
w.Write(LEVEL_1, "%sIPBlock:\n", initialIndent)
w.Write(LEVEL_2, "%sCIDR: %s\n", initialIndent, from.IPBlock.CIDR)
w.Write(LEVEL_2, "%sExcept: %v\n", initialIndent, strings.Join(from.IPBlock.Except, ", "))
}
}
}
if i != len(npirs)-1 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "----------")
}
}
}
func printNetworkPolicySpecEgressTo(npers []networkingv1.NetworkPolicyEgressRule, initialIndent string, w PrefixWriter) {
if len(npers) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "<none> (Selected pods are isolated for egress connectivity)")
return
}
for i, nper := range npers {
if len(nper.Ports) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: <any> (traffic allowed to all ports)")
} else {
for _, port := range nper.Ports {
var proto corev1.Protocol
if port.Protocol != nil {
proto = *port.Protocol
} else {
proto = corev1.ProtocolTCP
}
w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto)
}
}
if len(nper.To) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "To: <any> (traffic not restricted by source)")
} else {
for _, to := range nper.To {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "To:")
if to.PodSelector != nil && to.NamespaceSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(to.NamespaceSelector))
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(to.PodSelector))
} else if to.PodSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(to.PodSelector))
} else if to.NamespaceSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(to.NamespaceSelector))
} else if to.IPBlock != nil {
w.Write(LEVEL_1, "%sIPBlock:\n", initialIndent)
w.Write(LEVEL_2, "%sCIDR: %s\n", initialIndent, to.IPBlock.CIDR)
w.Write(LEVEL_2, "%sExcept: %v\n", initialIndent, strings.Join(to.IPBlock.Except, ", "))
}
}
}
if i != len(npers)-1 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "----------")
}
}
}
type StorageClassDescriber struct {
clientset.Interface
}
func (s *StorageClassDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
sc, err := s.StorageV1().StorageClasses().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = s.CoreV1().Events(namespace).Search(scheme.Scheme, sc)
}
return describeStorageClass(sc, events)
}
func describeStorageClass(sc *storagev1.StorageClass, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", sc.Name)
w.Write(LEVEL_0, "IsDefaultClass:\t%s\n", storageutil.IsDefaultAnnotationText(sc.ObjectMeta))
w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(sc.Annotations))
w.Write(LEVEL_0, "Provisioner:\t%s\n", sc.Provisioner)
w.Write(LEVEL_0, "Parameters:\t%s\n", labels.FormatLabels(sc.Parameters))
w.Write(LEVEL_0, "AllowVolumeExpansion:\t%s\n", printBoolPtr(sc.AllowVolumeExpansion))
if len(sc.MountOptions) == 0 {
w.Write(LEVEL_0, "MountOptions:\t<none>\n")
} else {
w.Write(LEVEL_0, "MountOptions:\n")
for _, option := range sc.MountOptions {
w.Write(LEVEL_1, "%s\n", option)
}
}
if sc.ReclaimPolicy != nil {
w.Write(LEVEL_0, "ReclaimPolicy:\t%s\n", *sc.ReclaimPolicy)
}
if sc.VolumeBindingMode != nil {
w.Write(LEVEL_0, "VolumeBindingMode:\t%s\n", *sc.VolumeBindingMode)
}
if sc.AllowedTopologies != nil {
printAllowedTopologies(w, sc.AllowedTopologies)
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
type CSINodeDescriber struct {
clientset.Interface
}
func (c *CSINodeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
csi, err := c.StorageV1().CSINodes().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = c.CoreV1().Events(namespace).Search(scheme.Scheme, csi)
}
return describeCSINode(csi, events)
}
func describeCSINode(csi *storagev1.CSINode, events *corev1.EventList) (output string, err error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", csi.GetName())
printLabelsMultiline(w, "Labels", csi.GetLabels())
printAnnotationsMultiline(w, "Annotations", csi.GetAnnotations())
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", csi.CreationTimestamp.Time.Format(time.RFC1123Z))
w.Write(LEVEL_0, "Spec:\n")
if csi.Spec.Drivers != nil {
w.Write(LEVEL_1, "Drivers:\n")
for _, driver := range csi.Spec.Drivers {
w.Write(LEVEL_2, "%s:\n", driver.Name)
w.Write(LEVEL_3, "Allocatables:\n")
w.Write(LEVEL_4, "Count:\t%d\n", *driver.Allocatable.Count)
w.Write(LEVEL_3, "Node ID:\t%s\n", driver.NodeID)
w.Write(LEVEL_3, "Topology Keys:\t%s\n", driver.TopologyKeys)
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func printAllowedTopologies(w PrefixWriter, topologies []corev1.TopologySelectorTerm) {
w.Write(LEVEL_0, "AllowedTopologies:\t")
if len(topologies) == 0 {
w.WriteLine("<none>")
return
}
w.WriteLine("")
for i, term := range topologies {
printTopologySelectorTermsMultilineWithIndent(w, LEVEL_1, fmt.Sprintf("Term %d", i), "\t", term.MatchLabelExpressions)
}
}
func printTopologySelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []corev1.TopologySelectorLabelRequirement) {
w.Write(indentLevel, "%s:%s", title, innerIndent)
if len(reqs) == 0 {
w.WriteLine("<none>")
return
}
for i, req := range reqs {
if i != 0 {
w.Write(indentLevel, "%s", innerIndent)
}
exprStr := fmt.Sprintf("%s %s", req.Key, "in")
if len(req.Values) > 0 {
exprStr = fmt.Sprintf("%s [%s]", exprStr, strings.Join(req.Values, ", "))
}
w.Write(LEVEL_0, "%s\n", exprStr)
}
}
type PodDisruptionBudgetDescriber struct {
clientset.Interface
}
func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
pdb, err := p.PolicyV1beta1().PodDisruptionBudgets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = p.CoreV1().Events(namespace).Search(scheme.Scheme, pdb)
}
return describePodDisruptionBudget(pdb, events)
}
func describePodDisruptionBudget(pdb *policyv1beta1.PodDisruptionBudget, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", pdb.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", pdb.Namespace)
if pdb.Spec.MinAvailable != nil {
w.Write(LEVEL_0, "Min available:\t%s\n", pdb.Spec.MinAvailable.String())
} else if pdb.Spec.MaxUnavailable != nil {
w.Write(LEVEL_0, "Max unavailable:\t%s\n", pdb.Spec.MaxUnavailable.String())
}
if pdb.Spec.Selector != nil {
w.Write(LEVEL_0, "Selector:\t%s\n", metav1.FormatLabelSelector(pdb.Spec.Selector))
} else {
w.Write(LEVEL_0, "Selector:\t<unset>\n")
}
w.Write(LEVEL_0, "Status:\n")
w.Write(LEVEL_2, "Allowed disruptions:\t%d\n", pdb.Status.DisruptionsAllowed)
w.Write(LEVEL_2, "Current:\t%d\n", pdb.Status.CurrentHealthy)
w.Write(LEVEL_2, "Desired:\t%d\n", pdb.Status.DesiredHealthy)
w.Write(LEVEL_2, "Total:\t%d\n", pdb.Status.ExpectedPods)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// PriorityClassDescriber generates information about a PriorityClass.
type PriorityClassDescriber struct {
clientset.Interface
}
func (s *PriorityClassDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
pc, err := s.SchedulingV1().PriorityClasses().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = s.CoreV1().Events(namespace).Search(scheme.Scheme, pc)
}
return describePriorityClass(pc, events)
}
func describePriorityClass(pc *schedulingv1.PriorityClass, events *corev1.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", pc.Name)
w.Write(LEVEL_0, "Value:\t%v\n", pc.Value)
w.Write(LEVEL_0, "GlobalDefault:\t%v\n", pc.GlobalDefault)
w.Write(LEVEL_0, "Description:\t%s\n", pc.Description)
w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(pc.Annotations))
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// PodSecurityPolicyDescriber generates information about a PodSecuritypolicyv1beta1.
type PodSecurityPolicyDescriber struct {
clientset.Interface
}
func (d *PodSecurityPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) {
psp, err := d.PolicyV1beta1().PodSecurityPolicies().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return "", err
}
return describePodSecurityPolicy(psp)
}
func describePodSecurityPolicy(psp *policyv1beta1.PodSecurityPolicy) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", psp.Name)
w.Write(LEVEL_0, "\nSettings:\n")
w.Write(LEVEL_1, "Allow Privileged:\t%t\n", psp.Spec.Privileged)
if psp.Spec.AllowPrivilegeEscalation != nil {
w.Write(LEVEL_1, "Allow Privilege Escalation:\t%t\n", *psp.Spec.AllowPrivilegeEscalation)
} else {
w.Write(LEVEL_1, "Allow Privilege Escalation:\t<unset>\n")
}
w.Write(LEVEL_1, "Default Add Capabilities:\t%v\n", capsToString(psp.Spec.DefaultAddCapabilities))
w.Write(LEVEL_1, "Required Drop Capabilities:\t%s\n", capsToString(psp.Spec.RequiredDropCapabilities))
w.Write(LEVEL_1, "Allowed Capabilities:\t%s\n", capsToString(psp.Spec.AllowedCapabilities))
w.Write(LEVEL_1, "Allowed Volume Types:\t%s\n", fsTypeToString(psp.Spec.Volumes))
if len(psp.Spec.AllowedFlexVolumes) > 0 {
w.Write(LEVEL_1, "Allowed FlexVolume Types:\t%s\n", flexVolumesToString(psp.Spec.AllowedFlexVolumes))
}
if len(psp.Spec.AllowedCSIDrivers) > 0 {
w.Write(LEVEL_1, "Allowed CSI Drivers:\t%s\n", csiDriversToString(psp.Spec.AllowedCSIDrivers))
}
if len(psp.Spec.AllowedUnsafeSysctls) > 0 {
w.Write(LEVEL_1, "Allowed Unsafe Sysctls:\t%s\n", sysctlsToString(psp.Spec.AllowedUnsafeSysctls))
}
if len(psp.Spec.ForbiddenSysctls) > 0 {
w.Write(LEVEL_1, "Forbidden Sysctls:\t%s\n", sysctlsToString(psp.Spec.ForbiddenSysctls))
}
w.Write(LEVEL_1, "Allow Host Network:\t%t\n", psp.Spec.HostNetwork)
w.Write(LEVEL_1, "Allow Host Ports:\t%s\n", hostPortRangeToString(psp.Spec.HostPorts))
w.Write(LEVEL_1, "Allow Host PID:\t%t\n", psp.Spec.HostPID)
w.Write(LEVEL_1, "Allow Host IPC:\t%t\n", psp.Spec.HostIPC)
w.Write(LEVEL_1, "Read Only Root Filesystem:\t%v\n", psp.Spec.ReadOnlyRootFilesystem)
w.Write(LEVEL_1, "SELinux Context Strategy: %s\t\n", string(psp.Spec.SELinux.Rule))
var user, role, seLinuxType, level string
if psp.Spec.SELinux.SELinuxOptions != nil {
user = psp.Spec.SELinux.SELinuxOptions.User
role = psp.Spec.SELinux.SELinuxOptions.Role
seLinuxType = psp.Spec.SELinux.SELinuxOptions.Type
level = psp.Spec.SELinux.SELinuxOptions.Level
}
w.Write(LEVEL_2, "User:\t%s\n", stringOrNone(user))
w.Write(LEVEL_2, "Role:\t%s\n", stringOrNone(role))
w.Write(LEVEL_2, "Type:\t%s\n", stringOrNone(seLinuxType))
w.Write(LEVEL_2, "Level:\t%s\n", stringOrNone(level))
w.Write(LEVEL_1, "Run As User Strategy: %s\t\n", string(psp.Spec.RunAsUser.Rule))
w.Write(LEVEL_2, "Ranges:\t%s\n", idRangeToString(psp.Spec.RunAsUser.Ranges))
w.Write(LEVEL_1, "FSGroup Strategy: %s\t\n", string(psp.Spec.FSGroup.Rule))
w.Write(LEVEL_2, "Ranges:\t%s\n", idRangeToString(psp.Spec.FSGroup.Ranges))
w.Write(LEVEL_1, "Supplemental Groups Strategy: %s\t\n", string(psp.Spec.SupplementalGroups.Rule))
w.Write(LEVEL_2, "Ranges:\t%s\n", idRangeToString(psp.Spec.SupplementalGroups.Ranges))
return nil
})
}
func stringOrNone(s string) string {
return stringOrDefaultValue(s, "<none>")
}
func stringOrDefaultValue(s, defaultValue string) string {
if len(s) > 0 {
return s
}
return defaultValue
}
func fsTypeToString(volumes []policyv1beta1.FSType) string {
strVolumes := []string{}
for _, v := range volumes {
strVolumes = append(strVolumes, string(v))
}
return stringOrNone(strings.Join(strVolumes, ","))
}
func flexVolumesToString(flexVolumes []policyv1beta1.AllowedFlexVolume) string {
volumes := []string{}
for _, flexVolume := range flexVolumes {
volumes = append(volumes, "driver="+flexVolume.Driver)
}
return stringOrDefaultValue(strings.Join(volumes, ","), "<all>")
}
func csiDriversToString(csiDrivers []policyv1beta1.AllowedCSIDriver) string {
drivers := []string{}
for _, csiDriver := range csiDrivers {
drivers = append(drivers, "driver="+csiDriver.Name)
}
return stringOrDefaultValue(strings.Join(drivers, ","), "<all>")
}
func sysctlsToString(sysctls []string) string {
return stringOrNone(strings.Join(sysctls, ","))
}
func hostPortRangeToString(ranges []policyv1beta1.HostPortRange) string {
formattedString := ""
if ranges != nil {
strRanges := []string{}
for _, r := range ranges {
strRanges = append(strRanges, fmt.Sprintf("%d-%d", r.Min, r.Max))
}
formattedString = strings.Join(strRanges, ",")
}
return stringOrNone(formattedString)
}
func idRangeToString(ranges []policyv1beta1.IDRange) string {
formattedString := ""
if ranges != nil {
strRanges := []string{}
for _, r := range ranges {
strRanges = append(strRanges, fmt.Sprintf("%d-%d", r.Min, r.Max))
}
formattedString = strings.Join(strRanges, ",")
}
return stringOrNone(formattedString)
}
func capsToString(caps []corev1.Capability) string {
formattedString := ""
if caps != nil {
strCaps := []string{}
for _, c := range caps {
strCaps = append(strCaps, string(c))
}
formattedString = strings.Join(strCaps, ",")
}
return stringOrNone(formattedString)
}
func policyTypesToString(pts []networkingv1.PolicyType) string {
formattedString := ""
if pts != nil {
strPts := []string{}
for _, p := range pts {
strPts = append(strPts, string(p))
}
formattedString = strings.Join(strPts, ", ")
}
return stringOrNone(formattedString)
}
// newErrNoDescriber creates a new ErrNoDescriber with the names of the provided types.
func newErrNoDescriber(types ...reflect.Type) error {
names := make([]string, 0, len(types))
for _, t := range types {
names = append(names, t.String())
}
return describe.ErrNoDescriber{Types: names}
}
// Describers implements ObjectDescriber against functions registered via Add. Those functions can
// be strongly typed. Types are exactly matched (no conversion or assignable checks).
type Describers struct {
searchFns map[reflect.Type][]typeFunc
}
// DescribeObject implements ObjectDescriber and will attempt to print the provided object to a string,
// if at least one describer function has been registered with the exact types passed, or if any
// describer can print the exact object in its first argument (the remainder will be provided empty
// values). If no function registered with Add can satisfy the passed objects, an ErrNoDescriber will
// be returned
// TODO: reorder and partial match extra.
func (d *Describers) DescribeObject(exact interface{}, extra ...interface{}) (string, error) {
exactType := reflect.TypeOf(exact)
fns, ok := d.searchFns[exactType]
if !ok {
return "", newErrNoDescriber(exactType)
}
if len(extra) == 0 {
for _, typeFn := range fns {
if len(typeFn.Extra) == 0 {
return typeFn.Describe(exact, extra...)
}
}
typeFn := fns[0]
for _, t := range typeFn.Extra {
v := reflect.New(t).Elem()
extra = append(extra, v.Interface())
}
return fns[0].Describe(exact, extra...)
}
types := make([]reflect.Type, 0, len(extra))
for _, obj := range extra {
types = append(types, reflect.TypeOf(obj))
}
for _, typeFn := range fns {
if typeFn.Matches(types) {
return typeFn.Describe(exact, extra...)
}
}
return "", newErrNoDescriber(append([]reflect.Type{exactType}, types...)...)
}
// Add adds one or more describer functions to the describe.Describer. The passed function must
// match the signature:
//
// func(...) (string, error)
//
// Any number of arguments may be provided.
func (d *Describers) Add(fns ...interface{}) error {
for _, fn := range fns {
fv := reflect.ValueOf(fn)
ft := fv.Type()
if ft.Kind() != reflect.Func {
return fmt.Errorf("expected func, got: %v", ft)
}
numIn := ft.NumIn()
if numIn == 0 {
return fmt.Errorf("expected at least one 'in' params, got: %v", ft)
}
if ft.NumOut() != 2 {
return fmt.Errorf("expected two 'out' params - (string, error), got: %v", ft)
}
types := make([]reflect.Type, 0, numIn)
for i := 0; i < numIn; i++ {
types = append(types, ft.In(i))
}
if ft.Out(0) != reflect.TypeOf(string("")) {
return fmt.Errorf("expected string return, got: %v", ft)
}
var forErrorType error
// This convolution is necessary, otherwise TypeOf picks up on the fact
// that forErrorType is nil.
errorType := reflect.TypeOf(&forErrorType).Elem()
if ft.Out(1) != errorType {
return fmt.Errorf("expected error return, got: %v", ft)
}
exact := types[0]
extra := types[1:]
if d.searchFns == nil {
d.searchFns = make(map[reflect.Type][]typeFunc)
}
fns := d.searchFns[exact]
fn := typeFunc{Extra: extra, Fn: fv}
fns = append(fns, fn)
d.searchFns[exact] = fns
}
return nil
}
// typeFunc holds information about a describer function and the types it accepts
type typeFunc struct {
Extra []reflect.Type
Fn reflect.Value
}
// Matches returns true when the passed types exactly match the Extra list.
func (fn typeFunc) Matches(types []reflect.Type) bool {
if len(fn.Extra) != len(types) {
return false
}
// reorder the items in array types and fn.Extra
// convert the type into string and sort them, check if they are matched
varMap := make(map[reflect.Type]bool)
for i := range fn.Extra {
varMap[fn.Extra[i]] = true
}
for i := range types {
if _, found := varMap[types[i]]; !found {
return false
}
}
return true
}
// Describe invokes the nested function with the exact number of arguments.
func (fn typeFunc) Describe(exact interface{}, extra ...interface{}) (string, error) {
values := []reflect.Value{reflect.ValueOf(exact)}
for _, obj := range extra {
values = append(values, reflect.ValueOf(obj))
}
out := fn.Fn.Call(values)
s := out[0].Interface().(string)
var err error
if !out[1].IsNil() {
err = out[1].Interface().(error)
}
return s, err
}
// printLabelsMultiline prints multiple labels with a proper alignment.
func printLabelsMultiline(w PrefixWriter, title string, labels map[string]string) {
printLabelsMultilineWithIndent(w, "", title, "\t", labels, sets.NewString())
}
// printLabelsMultiline prints multiple labels with a user-defined alignment.
func printLabelsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, labels map[string]string, skip sets.String) {
w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent)
if len(labels) == 0 {
w.WriteLine("<none>")
return
}
// to print labels in the sorted order
keys := make([]string, 0, len(labels))
for key := range labels {
if skip.Has(key) {
continue
}
keys = append(keys, key)
}
if len(keys) == 0 {
w.WriteLine("<none>")
return
}
sort.Strings(keys)
for i, key := range keys {
if i != 0 {
w.Write(LEVEL_0, "%s", initialIndent)
w.Write(LEVEL_0, "%s", innerIndent)
}
w.Write(LEVEL_0, "%s=%s\n", key, labels[key])
i++
}
}
// printTaintsMultiline prints multiple taints with a proper alignment.
func printNodeTaintsMultiline(w PrefixWriter, title string, taints []corev1.Taint) {
printTaintsMultilineWithIndent(w, "", title, "\t", taints)
}
// printTaintsMultilineWithIndent prints multiple taints with a user-defined alignment.
func printTaintsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, taints []corev1.Taint) {
w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent)
if len(taints) == 0 {
w.WriteLine("<none>")
return
}
// to print taints in the sorted order
sort.Slice(taints, func(i, j int) bool {
cmpKey := func(taint corev1.Taint) string {
return string(taint.Effect) + "," + taint.Key
}
return cmpKey(taints[i]) < cmpKey(taints[j])
})
for i, taint := range taints {
if i != 0 {
w.Write(LEVEL_0, "%s", initialIndent)
w.Write(LEVEL_0, "%s", innerIndent)
}
w.Write(LEVEL_0, "%s\n", taint.ToString())
}
}
// printPodsMultiline prints multiple pods with a proper alignment.
func printPodsMultiline(w PrefixWriter, title string, pods []corev1.Pod) {
printPodsMultilineWithIndent(w, "", title, "\t", pods)
}
// printPodsMultilineWithIndent prints multiple pods with a user-defined alignment.
func printPodsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, pods []corev1.Pod) {
w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent)
if len(pods) == 0 {
w.WriteLine("<none>")
return
}
// to print pods in the sorted order
sort.Slice(pods, func(i, j int) bool {
cmpKey := func(pod corev1.Pod) string {
return pod.Name
}
return cmpKey(pods[i]) < cmpKey(pods[j])
})
for i, pod := range pods {
if i != 0 {
w.Write(LEVEL_0, "%s", initialIndent)
w.Write(LEVEL_0, "%s", innerIndent)
}
w.Write(LEVEL_0, "%s\n", pod.Name)
}
}
// printPodTolerationsMultiline prints multiple tolerations with a proper alignment.
func printPodTolerationsMultiline(w PrefixWriter, title string, tolerations []corev1.Toleration) {
printTolerationsMultilineWithIndent(w, "", title, "\t", tolerations)
}
// printTolerationsMultilineWithIndent prints multiple tolerations with a user-defined alignment.
func printTolerationsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, tolerations []corev1.Toleration) {
w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent)
if len(tolerations) == 0 {
w.WriteLine("<none>")
return
}
// to print tolerations in the sorted order
sort.Slice(tolerations, func(i, j int) bool {
return tolerations[i].Key < tolerations[j].Key
})
for i, toleration := range tolerations {
if i != 0 {
w.Write(LEVEL_0, "%s", initialIndent)
w.Write(LEVEL_0, "%s", innerIndent)
}
w.Write(LEVEL_0, "%s", toleration.Key)
if len(toleration.Value) != 0 {
w.Write(LEVEL_0, "=%s", toleration.Value)
}
if len(toleration.Effect) != 0 {
w.Write(LEVEL_0, ":%s", toleration.Effect)
}
if toleration.TolerationSeconds != nil {
w.Write(LEVEL_0, " for %ds", *toleration.TolerationSeconds)
}
w.Write(LEVEL_0, "\n")
}
}
type flusher interface {
Flush()
}
func tabbedString(f func(io.Writer) error) (string, error) {
out := new(tabwriter.Writer)
buf := &bytes.Buffer{}
out.Init(buf, 0, 8, 2, ' ', 0)
err := f(out)
if err != nil {
return "", err
}
out.Flush()
str := string(buf.String())
return str, nil
}
type SortableResourceNames []corev1.ResourceName
func (list SortableResourceNames) Len() int {
return len(list)
}
func (list SortableResourceNames) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
func (list SortableResourceNames) Less(i, j int) bool {
return list[i] < list[j]
}
// SortedResourceNames returns the sorted resource names of a resource list.
func SortedResourceNames(list corev1.ResourceList) []corev1.ResourceName {
resources := make([]corev1.ResourceName, 0, len(list))
for res := range list {
resources = append(resources, res)
}
sort.Sort(SortableResourceNames(resources))
return resources
}
type SortableResourceQuotas []corev1.ResourceQuota
func (list SortableResourceQuotas) Len() int {
return len(list)
}
func (list SortableResourceQuotas) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
func (list SortableResourceQuotas) Less(i, j int) bool {
return list[i].Name < list[j].Name
}
type SortableVolumeMounts []corev1.VolumeMount
func (list SortableVolumeMounts) Len() int {
return len(list)
}
func (list SortableVolumeMounts) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
func (list SortableVolumeMounts) Less(i, j int) bool {
return list[i].MountPath < list[j].MountPath
}
type SortableVolumeDevices []corev1.VolumeDevice
func (list SortableVolumeDevices) Len() int {
return len(list)
}
func (list SortableVolumeDevices) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
func (list SortableVolumeDevices) Less(i, j int) bool {
return list[i].DevicePath < list[j].DevicePath
}
var maxAnnotationLen = 140
// printAnnotationsMultilineWithFilter prints filtered multiple annotations with a proper alignment.
func printAnnotationsMultilineWithFilter(w PrefixWriter, title string, annotations map[string]string, skip sets.String) {
printAnnotationsMultilineWithIndent(w, "", title, "\t", annotations, skip)
}
// printAnnotationsMultiline prints multiple annotations with a proper alignment.
func printAnnotationsMultiline(w PrefixWriter, title string, annotations map[string]string) {
printAnnotationsMultilineWithIndent(w, "", title, "\t", annotations, sets.NewString())
}
// printAnnotationsMultilineWithIndent prints multiple annotations with a user-defined alignment.
// If annotation string is too long, we omit chars more than 200 length.
func printAnnotationsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, annotations map[string]string, skip sets.String) {
w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent)
if len(annotations) == 0 {
w.WriteLine("<none>")
return
}
// to print labels in the sorted order
keys := make([]string, 0, len(annotations))
for key := range annotations {
if skip.Has(key) {
continue
}
keys = append(keys, key)
}
if len(annotations) == 0 {
w.WriteLine("<none>")
return
}
sort.Strings(keys)
indent := initialIndent + innerIndent
for i, key := range keys {
if i != 0 {
w.Write(LEVEL_0, indent)
}
value := strings.TrimSuffix(annotations[key], "\n")
if (len(value)+len(key)+2) > maxAnnotationLen || strings.Contains(value, "\n") {
w.Write(LEVEL_0, "%s:\n", key)
for _, s := range strings.Split(value, "\n") {
w.Write(LEVEL_0, "%s %s\n", indent, shorten(s, maxAnnotationLen-2))
}
} else {
w.Write(LEVEL_0, "%s: %s\n", key, value)
}
i++
}
}
func shorten(s string, maxLength int) string {
if len(s) > maxLength {
return s[:maxLength] + "..."
}
return s
}
// translateTimestampSince returns the elapsed time since timestamp in
// human-readable approximation.
func translateTimestampSince(timestamp metav1.Time) string {
if timestamp.IsZero() {
return "<unknown>"
}
return duration.HumanDuration(time.Since(timestamp.Time))
}
// formatEventSource formats EventSource as a comma separated string excluding Host when empty
func formatEventSource(es corev1.EventSource) string {
EventSourceString := []string{es.Component}
if len(es.Host) > 0 {
EventSourceString = append(EventSourceString, es.Host)
}
return strings.Join(EventSourceString, ", ")
}
// Pass ports=nil for all ports.
func formatEndpoints(endpoints *corev1.Endpoints, ports sets.String) string {
if len(endpoints.Subsets) == 0 {
return "<none>"
}
list := []string{}
max := 3
more := false
count := 0
for i := range endpoints.Subsets {
ss := &endpoints.Subsets[i]
if len(ss.Ports) == 0 {
// It's possible to have headless services with no ports.
for i := range ss.Addresses {
if len(list) == max {
more = true
}
if !more {
list = append(list, ss.Addresses[i].IP)
}
count++
}
} else {
// "Normal" services with ports defined.
for i := range ss.Ports {
port := &ss.Ports[i]
if ports == nil || ports.Has(port.Name) {
for i := range ss.Addresses {
if len(list) == max {
more = true
}
addr := &ss.Addresses[i]
if !more {
hostPort := net.JoinHostPort(addr.IP, strconv.Itoa(int(port.Port)))
list = append(list, hostPort)
}
count++
}
}
}
}
}
ret := strings.Join(list, ",")
if more {
return fmt.Sprintf("%s + %d more...", ret, count-max)
}
return ret
}
func extractCSRStatus(csr *certificatesv1beta1.CertificateSigningRequest) (string, error) {
var approved, denied bool
for _, c := range csr.Status.Conditions {
switch c.Type {
case certificatesv1beta1.CertificateApproved:
approved = true
case certificatesv1beta1.CertificateDenied:
denied = true
default:
return "", fmt.Errorf("unknown csr condition %q", c)
}
}
var status string
// must be in order of presidence
if denied {
status += "Denied"
} else if approved {
status += "Approved"
} else {
status += "Pending"
}
if len(csr.Status.Certificate) > 0 {
status += ",Issued"
}
return status, nil
}
// backendStringer behaves just like a string interface and converts the given backend to a string.
func backendStringer(backend *networkingv1beta1.IngressBackend) string {
if backend == nil {
return ""
}
return fmt.Sprintf("%v:%v", backend.ServiceName, backend.ServicePort.String())
}
// findNodeRoles returns the roles of a given node.
// The roles are determined by looking for:
// * a node-role.kubernetes.io/<role>="" label
// * a kubernetes.io/role="<role>" label
func findNodeRoles(node *corev1.Node) []string {
roles := sets.NewString()
for k, v := range node.Labels {
switch {
case strings.HasPrefix(k, describe.LabelNodeRolePrefix):
if role := strings.TrimPrefix(k, describe.LabelNodeRolePrefix); len(role) > 0 {
roles.Insert(role)
}
case k == describe.NodeLabelRole && v != "":
roles.Insert(v)
}
}
return roles.List()
}
// loadBalancerStatusStringer behaves mostly like a string interface and converts the given status to a string.
// `wide` indicates whether the returned value is meant for --o=wide output. If not, it's clipped to 16 bytes.
func loadBalancerStatusStringer(s corev1.LoadBalancerStatus, wide bool) string {
ingress := s.Ingress
result := sets.NewString()
for i := range ingress {
if ingress[i].IP != "" {
result.Insert(ingress[i].IP)
} else if ingress[i].Hostname != "" {
result.Insert(ingress[i].Hostname)
}
}
r := strings.Join(result.List(), ",")
if !wide && len(r) > describe.LoadBalancerWidth {
r = r[0:(describe.LoadBalancerWidth-3)] + "..."
}
return r
}
|
package executor
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
)
func getBinaryNameFromCommand(command string) (string, error) {
_, name := path.Split(command)
nameSplit := strings.Split(name, " ")
if len(nameSplit) == 0 {
return "", fmt.Errorf("Failed to extract command name from %s", command)
}
return nameSplit[0], nil
}
func createExecutorOutputFiles(command, prefix string) (stdout, stderr *os.File, err error) {
if len(command) == 0 {
return nil, nil, errors.New("Empty command string")
}
commandName, err := getBinaryNameFromCommand(command)
if err != nil {
return nil, nil, err
}
directoryPrivilages := os.FileMode(0755)
pwd, err := os.Getwd()
if err != nil {
return nil, nil, fmt.Errorf("Failed to get working directory. Error: %s\n", err.Error())
}
outputDir, err := ioutil.TempDir(pwd, prefix+"_"+commandName+"_")
if err != nil {
return nil, nil, fmt.Errorf("Failed to create output directory for %s. Error: %s\n", commandName,
err.Error())
}
if err = os.Chmod(outputDir, directoryPrivilages); err != nil {
return nil, nil, fmt.Errorf("Failed to set privileges for dir %s: %q", outputDir, err)
}
filePrivileges := os.FileMode(0644)
stdoutFileName := path.Join(outputDir, "stdout")
stdout, err = os.Create(stdoutFileName)
if err != nil {
return nil, nil, err
}
if err = stdout.Chmod(filePrivileges); err != nil {
return nil, nil, fmt.Errorf("Failed to set privileges for file %s: %q", stdout.Name(), err)
}
stderr, err = os.Create(path.Join(outputDir, "stderr"))
if err != nil {
os.Remove(stdoutFileName)
return nil, nil, err
}
if err = stderr.Chmod(filePrivileges); err != nil {
return nil, nil, fmt.Errorf("Failed to set privileges for file %s: %q", stderr.Name(), err)
}
return stdout, stderr, err
}
Spelling nit
package executor
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
)
func getBinaryNameFromCommand(command string) (string, error) {
_, name := path.Split(command)
nameSplit := strings.Split(name, " ")
if len(nameSplit) == 0 {
return "", fmt.Errorf("Failed to extract command name from %s", command)
}
return nameSplit[0], nil
}
func createExecutorOutputFiles(command, prefix string) (stdout, stderr *os.File, err error) {
if len(command) == 0 {
return nil, nil, errors.New("Empty command string")
}
commandName, err := getBinaryNameFromCommand(command)
if err != nil {
return nil, nil, err
}
directoryPrivileges := os.FileMode(0755)
pwd, err := os.Getwd()
if err != nil {
return nil, nil, fmt.Errorf("Failed to get working directory. Error: %s\n", err.Error())
}
outputDir, err := ioutil.TempDir(pwd, prefix+"_"+commandName+"_")
if err != nil {
return nil, nil, fmt.Errorf("Failed to create output directory for %s. Error: %s\n", commandName,
err.Error())
}
if err = os.Chmod(outputDir, directoryPrivileges); err != nil {
return nil, nil, fmt.Errorf("Failed to set privileges for dir %s: %q", outputDir, err)
}
filePrivileges := os.FileMode(0644)
stdoutFileName := path.Join(outputDir, "stdout")
stdout, err = os.Create(stdoutFileName)
if err != nil {
return nil, nil, err
}
if err = stdout.Chmod(filePrivileges); err != nil {
return nil, nil, fmt.Errorf("Failed to set privileges for file %s: %q", stdout.Name(), err)
}
stderr, err = os.Create(path.Join(outputDir, "stderr"))
if err != nil {
os.Remove(stdoutFileName)
return nil, nil, err
}
if err = stderr.Chmod(filePrivileges); err != nil {
return nil, nil, fmt.Errorf("Failed to set privileges for file %s: %q", stderr.Name(), err)
}
return stdout, stderr, err
}
|
package builder
import (
"github.com/Aptomi/aptomi/pkg/external"
"github.com/Aptomi/aptomi/pkg/external/secrets"
"github.com/Aptomi/aptomi/pkg/external/users"
"github.com/Aptomi/aptomi/pkg/lang"
"github.com/Aptomi/aptomi/pkg/object"
"github.com/Aptomi/aptomi/pkg/util"
"math/rand"
)
var randSeed = int64(239)
var idLength = 16
// PolicyBuilder is a utility struct to help build policy objects
// It is primarily used in unit tests
type PolicyBuilder struct {
random *rand.Rand
namespace string
policy *lang.Policy
users *users.UserLoaderMock
secrets *secrets.SecretLoaderMock
domainAdmin *lang.User
domainAdminView *lang.PolicyView
}
// NewPolicyBuilder creates a new PolicyBuilder with a default "main" namespace
func NewPolicyBuilder() *PolicyBuilder {
return NewPolicyBuilderWithNS("main")
}
// NewPolicyBuilderWithNS creates a new PolicyBuilder
func NewPolicyBuilderWithNS(namespace string) *PolicyBuilder {
result := &PolicyBuilder{
random: rand.New(rand.NewSource(randSeed)),
namespace: namespace,
policy: lang.NewPolicy(),
users: users.NewUserLoaderMock(),
secrets: secrets.NewSecretLoaderMock(),
}
for _, rule := range lang.ACLRulesBootstrap {
result.policy.AddObject(rule)
}
result.domainAdmin = result.AddUserDomainAdmin()
result.domainAdminView = result.policy.View(result.domainAdmin)
return result
}
// SwitchNamespace switches the current namespace where objects will be generated
func (builder *PolicyBuilder) SwitchNamespace(namespace string) {
builder.namespace = namespace
}
// AddDependency creates a new dependency and adds it to the policy
func (builder *PolicyBuilder) AddDependency(user *lang.User, contract *lang.Contract) *lang.Dependency {
result := &lang.Dependency{
Metadata: lang.Metadata{
Kind: lang.DependencyObject.Kind,
Namespace: builder.namespace,
Name: util.RandomID(builder.random, idLength),
},
UserID: user.ID,
Contract: contract.Namespace + "/" + contract.Name,
Labels: make(map[string]string),
}
builder.domainAdminView.AddObject(result)
return result
}
// AddUser creates a new user who can consume services from the 'main' namespace and adds it to the policy
func (builder *PolicyBuilder) AddUser() *lang.User {
result := &lang.User{
ID: util.RandomID(builder.random, idLength),
Name: util.RandomID(builder.random, idLength),
Labels: map[string]string{"role": "aptomi_main_ns_consumer"},
}
builder.users.AddUser(result)
return result
}
// AddUserDomainAdmin creates a new user who is a domain admin and adds it to the policy
func (builder *PolicyBuilder) AddUserDomainAdmin() *lang.User {
result := builder.AddUser()
result.Labels["role"] = "aptomi_domain_admin"
return result
}
// AddService creates a new service and adds it to the policy
func (builder *PolicyBuilder) AddService(owner *lang.User) *lang.Service {
var ownerID = ""
if owner != nil {
ownerID = owner.ID
}
result := &lang.Service{
Metadata: lang.Metadata{
Kind: lang.ServiceObject.Kind,
Namespace: builder.namespace,
Name: util.RandomID(builder.random, idLength),
},
Owner: ownerID,
}
builder.domainAdminView.AddObject(result)
return result
}
// AddContract creates a new contract for a given service and adds it to the policy
func (builder *PolicyBuilder) AddContract(service *lang.Service, criteria *lang.Criteria) *lang.Contract {
result := &lang.Contract{
Metadata: lang.Metadata{
Kind: lang.ContractObject.Kind,
Namespace: builder.namespace,
Name: util.RandomID(builder.random, idLength),
},
Contexts: []*lang.Context{{
Name: util.RandomID(builder.random, idLength),
Criteria: criteria,
Allocation: &lang.Allocation{
Service: service.Name,
},
}},
}
builder.domainAdminView.AddObject(result)
return result
}
// AddContractMultipleContexts creates contract with multiple contexts for a given service and adds it to the policy
func (builder *PolicyBuilder) AddContractMultipleContexts(service *lang.Service, criteriaArray ...*lang.Criteria) *lang.Contract {
result := &lang.Contract{
Metadata: lang.Metadata{
Kind: lang.ContractObject.Kind,
Namespace: builder.namespace,
Name: util.RandomID(builder.random, idLength),
},
}
for _, criteria := range criteriaArray {
result.Contexts = append(result.Contexts,
&lang.Context{
Name: util.RandomID(builder.random, idLength),
Criteria: criteria,
Allocation: &lang.Allocation{
Service: service.Name,
},
},
)
}
builder.domainAdminView.AddObject(result)
return result
}
// AddRule creates a new rule and adds it to the policy
func (builder *PolicyBuilder) AddRule(criteria *lang.Criteria, actions *lang.RuleActions) *lang.Rule {
result := &lang.Rule{
Metadata: lang.Metadata{
Kind: lang.RuleObject.Kind,
Namespace: builder.namespace,
Name: util.RandomID(builder.random, idLength),
},
Weight: len(builder.policy.GetObjectsByKind(lang.RuleObject.Kind)),
Criteria: criteria,
Actions: actions,
}
builder.domainAdminView.AddObject(result)
return result
}
// AddCluster creates a new cluster and adds it to the policy
func (builder *PolicyBuilder) AddCluster() *lang.Cluster {
result := &lang.Cluster{
Metadata: lang.Metadata{
Kind: lang.ClusterObject.Kind,
Namespace: object.SystemNS,
Name: util.RandomID(builder.random, idLength),
},
}
builder.domainAdminView.AddObject(result)
return result
}
// Criteria creates a criteria with one require-all, one require-any, and one require-none
func (builder *PolicyBuilder) Criteria(all string, any string, none string) *lang.Criteria {
return &lang.Criteria{
RequireAll: []string{all},
RequireAny: []string{any},
RequireNone: []string{none},
}
}
// CriteriaTrue creates a criteria which always evaluates to true
func (builder *PolicyBuilder) CriteriaTrue() *lang.Criteria {
return &lang.Criteria{
RequireAny: []string{"true"},
}
}
// AllocationKeys creates allocation keys
func (builder *PolicyBuilder) AllocationKeys(key string) []string {
return []string{key}
}
// UnknownComponent creates an unknown component for a service (not code and not contract)
func (builder *PolicyBuilder) UnknownComponent() *lang.ServiceComponent {
return &lang.ServiceComponent{
Name: util.RandomID(builder.random, idLength),
}
}
// CodeComponent creates a new code component for a service
func (builder *PolicyBuilder) CodeComponent(codeParams util.NestedParameterMap, discoveryParams util.NestedParameterMap) *lang.ServiceComponent {
return &lang.ServiceComponent{
Name: util.RandomID(builder.random, idLength),
Code: &lang.Code{
Type: "aptomi/code/unittests",
Params: codeParams,
},
Discovery: discoveryParams,
}
}
// ContractComponent creates a new contract component for a service
func (builder *PolicyBuilder) ContractComponent(contract *lang.Contract) *lang.ServiceComponent {
return &lang.ServiceComponent{
Name: util.RandomID(builder.random, idLength),
Contract: contract.Namespace + "/" + contract.Name,
}
}
// AddServiceComponent adds a given service component to the service
func (builder *PolicyBuilder) AddServiceComponent(service *lang.Service, component *lang.ServiceComponent) *lang.ServiceComponent {
service.Components = append(service.Components, component)
return component
}
// AddComponentDependency adds a component dependency on another component
func (builder *PolicyBuilder) AddComponentDependency(component *lang.ServiceComponent, dependsOn *lang.ServiceComponent) {
component.Dependencies = append(component.Dependencies, dependsOn.Name)
}
// RuleActions creates a new RuleActions object
func (builder *PolicyBuilder) RuleActions(labelOps lang.LabelOperations) *lang.RuleActions {
result := &lang.RuleActions{}
if labelOps != nil {
result.ChangeLabels = lang.ChangeLabelsAction(labelOps)
}
return result
}
// Policy returns the generated policy
func (builder *PolicyBuilder) Policy() *lang.Policy {
return builder.policy
}
// External returns the generated external data
func (builder *PolicyBuilder) External() *external.Data {
return external.NewData(
builder.users,
builder.secrets,
)
}
// Namespace returns the current namespace
func (builder *PolicyBuilder) Namespace() string {
return builder.namespace
}
fixed linter warning
package builder
import (
"github.com/Aptomi/aptomi/pkg/external"
"github.com/Aptomi/aptomi/pkg/external/secrets"
"github.com/Aptomi/aptomi/pkg/external/users"
"github.com/Aptomi/aptomi/pkg/lang"
"github.com/Aptomi/aptomi/pkg/object"
"github.com/Aptomi/aptomi/pkg/util"
"math/rand"
)
var randSeed = int64(239)
var idLength = 16
// PolicyBuilder is a utility struct to help build policy objects
// It is primarily used in unit tests
type PolicyBuilder struct {
random *rand.Rand
namespace string
policy *lang.Policy
users *users.UserLoaderMock
secrets *secrets.SecretLoaderMock
domainAdmin *lang.User
domainAdminView *lang.PolicyView
}
// NewPolicyBuilder creates a new PolicyBuilder with a default "main" namespace
func NewPolicyBuilder() *PolicyBuilder {
return NewPolicyBuilderWithNS("main")
}
// NewPolicyBuilderWithNS creates a new PolicyBuilder
func NewPolicyBuilderWithNS(namespace string) *PolicyBuilder {
result := &PolicyBuilder{
random: rand.New(rand.NewSource(randSeed)),
namespace: namespace,
policy: lang.NewPolicy(),
users: users.NewUserLoaderMock(),
secrets: secrets.NewSecretLoaderMock(),
}
for _, rule := range lang.ACLRulesBootstrap {
result.policy.AddObject(rule)
}
result.domainAdmin = result.AddUserDomainAdmin()
result.domainAdminView = result.policy.View(result.domainAdmin)
return result
}
// SwitchNamespace switches the current namespace where objects will be generated
func (builder *PolicyBuilder) SwitchNamespace(namespace string) {
builder.namespace = namespace
}
// AddDependency creates a new dependency and adds it to the policy
func (builder *PolicyBuilder) AddDependency(user *lang.User, contract *lang.Contract) *lang.Dependency {
result := &lang.Dependency{
Metadata: lang.Metadata{
Kind: lang.DependencyObject.Kind,
Namespace: builder.namespace,
Name: util.RandomID(builder.random, idLength),
},
UserID: user.ID,
Contract: contract.Namespace + "/" + contract.Name,
Labels: make(map[string]string),
}
_ = builder.domainAdminView.AddObject(result)
return result
}
// AddUser creates a new user who can consume services from the 'main' namespace and adds it to the policy
func (builder *PolicyBuilder) AddUser() *lang.User {
result := &lang.User{
ID: util.RandomID(builder.random, idLength),
Name: util.RandomID(builder.random, idLength),
Labels: map[string]string{"role": "aptomi_main_ns_consumer"},
}
builder.users.AddUser(result)
return result
}
// AddUserDomainAdmin creates a new user who is a domain admin and adds it to the policy
func (builder *PolicyBuilder) AddUserDomainAdmin() *lang.User {
result := builder.AddUser()
result.Labels["role"] = "aptomi_domain_admin"
return result
}
// AddService creates a new service and adds it to the policy
func (builder *PolicyBuilder) AddService(owner *lang.User) *lang.Service {
var ownerID = ""
if owner != nil {
ownerID = owner.ID
}
result := &lang.Service{
Metadata: lang.Metadata{
Kind: lang.ServiceObject.Kind,
Namespace: builder.namespace,
Name: util.RandomID(builder.random, idLength),
},
Owner: ownerID,
}
_ = builder.domainAdminView.AddObject(result)
return result
}
// AddContract creates a new contract for a given service and adds it to the policy
func (builder *PolicyBuilder) AddContract(service *lang.Service, criteria *lang.Criteria) *lang.Contract {
result := &lang.Contract{
Metadata: lang.Metadata{
Kind: lang.ContractObject.Kind,
Namespace: builder.namespace,
Name: util.RandomID(builder.random, idLength),
},
Contexts: []*lang.Context{{
Name: util.RandomID(builder.random, idLength),
Criteria: criteria,
Allocation: &lang.Allocation{
Service: service.Name,
},
}},
}
_ = builder.domainAdminView.AddObject(result)
return result
}
// AddContractMultipleContexts creates contract with multiple contexts for a given service and adds it to the policy
func (builder *PolicyBuilder) AddContractMultipleContexts(service *lang.Service, criteriaArray ...*lang.Criteria) *lang.Contract {
result := &lang.Contract{
Metadata: lang.Metadata{
Kind: lang.ContractObject.Kind,
Namespace: builder.namespace,
Name: util.RandomID(builder.random, idLength),
},
}
for _, criteria := range criteriaArray {
result.Contexts = append(result.Contexts,
&lang.Context{
Name: util.RandomID(builder.random, idLength),
Criteria: criteria,
Allocation: &lang.Allocation{
Service: service.Name,
},
},
)
}
_ = builder.domainAdminView.AddObject(result)
return result
}
// AddRule creates a new rule and adds it to the policy
func (builder *PolicyBuilder) AddRule(criteria *lang.Criteria, actions *lang.RuleActions) *lang.Rule {
result := &lang.Rule{
Metadata: lang.Metadata{
Kind: lang.RuleObject.Kind,
Namespace: builder.namespace,
Name: util.RandomID(builder.random, idLength),
},
Weight: len(builder.policy.GetObjectsByKind(lang.RuleObject.Kind)),
Criteria: criteria,
Actions: actions,
}
_ = builder.domainAdminView.AddObject(result)
return result
}
// AddCluster creates a new cluster and adds it to the policy
func (builder *PolicyBuilder) AddCluster() *lang.Cluster {
result := &lang.Cluster{
Metadata: lang.Metadata{
Kind: lang.ClusterObject.Kind,
Namespace: object.SystemNS,
Name: util.RandomID(builder.random, idLength),
},
}
_ = builder.domainAdminView.AddObject(result)
return result
}
// Criteria creates a criteria with one require-all, one require-any, and one require-none
func (builder *PolicyBuilder) Criteria(all string, any string, none string) *lang.Criteria {
return &lang.Criteria{
RequireAll: []string{all},
RequireAny: []string{any},
RequireNone: []string{none},
}
}
// CriteriaTrue creates a criteria which always evaluates to true
func (builder *PolicyBuilder) CriteriaTrue() *lang.Criteria {
return &lang.Criteria{
RequireAny: []string{"true"},
}
}
// AllocationKeys creates allocation keys
func (builder *PolicyBuilder) AllocationKeys(key string) []string {
return []string{key}
}
// UnknownComponent creates an unknown component for a service (not code and not contract)
func (builder *PolicyBuilder) UnknownComponent() *lang.ServiceComponent {
return &lang.ServiceComponent{
Name: util.RandomID(builder.random, idLength),
}
}
// CodeComponent creates a new code component for a service
func (builder *PolicyBuilder) CodeComponent(codeParams util.NestedParameterMap, discoveryParams util.NestedParameterMap) *lang.ServiceComponent {
return &lang.ServiceComponent{
Name: util.RandomID(builder.random, idLength),
Code: &lang.Code{
Type: "aptomi/code/unittests",
Params: codeParams,
},
Discovery: discoveryParams,
}
}
// ContractComponent creates a new contract component for a service
func (builder *PolicyBuilder) ContractComponent(contract *lang.Contract) *lang.ServiceComponent {
return &lang.ServiceComponent{
Name: util.RandomID(builder.random, idLength),
Contract: contract.Namespace + "/" + contract.Name,
}
}
// AddServiceComponent adds a given service component to the service
func (builder *PolicyBuilder) AddServiceComponent(service *lang.Service, component *lang.ServiceComponent) *lang.ServiceComponent {
service.Components = append(service.Components, component)
return component
}
// AddComponentDependency adds a component dependency on another component
func (builder *PolicyBuilder) AddComponentDependency(component *lang.ServiceComponent, dependsOn *lang.ServiceComponent) {
component.Dependencies = append(component.Dependencies, dependsOn.Name)
}
// RuleActions creates a new RuleActions object
func (builder *PolicyBuilder) RuleActions(labelOps lang.LabelOperations) *lang.RuleActions {
result := &lang.RuleActions{}
if labelOps != nil {
result.ChangeLabels = lang.ChangeLabelsAction(labelOps)
}
return result
}
// Policy returns the generated policy
func (builder *PolicyBuilder) Policy() *lang.Policy {
return builder.policy
}
// External returns the generated external data
func (builder *PolicyBuilder) External() *external.Data {
return external.NewData(
builder.users,
builder.secrets,
)
}
// Namespace returns the current namespace
func (builder *PolicyBuilder) Namespace() string {
return builder.namespace
}
|
/*
Copyright 2017 Mirantis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package libvirttools
import (
"bytes"
"encoding/json"
"io/ioutil"
"net"
"os"
"path/filepath"
"reflect"
"strconv"
"testing"
cnitypes "github.com/containernetworking/cni/pkg/types"
cnicurrent "github.com/containernetworking/cni/pkg/types/current"
"github.com/davecgh/go-spew/spew"
"github.com/ghodss/yaml"
"github.com/Mirantis/virtlet/pkg/utils"
testutils "github.com/Mirantis/virtlet/pkg/utils/testing"
libvirtxml "github.com/libvirt/libvirt-go-xml"
)
type fakeFlexvolume struct {
uuid string
part int
path string
}
func newFakeFlexvolume(t *testing.T, parentDir string, uuid string, part int) *fakeFlexvolume {
info := map[string]string{"uuid": uuid}
if part >= 0 {
info["part"] = strconv.Itoa(part)
}
volDir := filepath.Join(parentDir, uuid)
if err := os.MkdirAll(volDir, 0777); err != nil {
t.Fatalf("MkdirAll(): %q: %v", volDir, err)
}
infoPath := filepath.Join(volDir, "virtlet-flexvolume.json")
if err := utils.WriteJson(infoPath, info, 0777); err != nil {
t.Fatalf("WriteJson(): %q: %v", infoPath, err)
}
return &fakeFlexvolume{
uuid: uuid,
part: part,
path: volDir,
}
}
func buildNetworkedPodConfig(cniResult *cnicurrent.Result) *VMConfig {
r, err := json.Marshal(cniResult)
if err != nil {
panic("failed to marshal CNI result")
}
return &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{ImageType: "nocloud"},
CNIConfig: string(r),
}
}
func TestCloudInitGenerator(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "fake-flexvol")
if err != nil {
t.Fatalf("TempDir(): %v", err)
}
defer os.RemoveAll(tmpDir)
vols := []*fakeFlexvolume{
newFakeFlexvolume(t, tmpDir, "77f29a0e-46af-4188-a6af-9ff8b8a65224", -1),
newFakeFlexvolume(t, tmpDir, "82b7a880-dc04-48a3-8f2d-0c6249bb53fe", 0),
newFakeFlexvolume(t, tmpDir, "94ae25c7-62e1-4854-9f9b-9e285c3a5ed9", 2),
}
for _, tc := range []struct {
name string
config *VMConfig
volumeMap diskPathMap
expectedMetaData map[string]interface{}
expectedUserData map[string]interface{}
expectedNetworkConfig map[string]interface{}
expectedUserDataStr string
}{
{
name: "plain pod",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{ImageType: "nocloud"},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
},
expectedUserData: nil,
expectedNetworkConfig: map[string]interface{}{
// that's how yaml parses the number
"version": float64(1),
},
},
{
name: "pod with ssh keys",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{
SSHKeys: []string{"key1", "key2"},
ImageType: "nocloud",
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
"public-keys": []interface{}{"key1", "key2"},
},
expectedUserData: nil,
},
{
name: "pod with ssh keys and meta-data override",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{
SSHKeys: []string{"key1", "key2"},
MetaData: map[string]interface{}{
"instance-id": "foobar",
},
ImageType: "nocloud",
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foobar",
"local-hostname": "foo",
"public-keys": []interface{}{"key1", "key2"},
},
expectedUserData: nil,
},
{
name: "pod with user data",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{
UserData: map[string]interface{}{
"users": []interface{}{
map[string]interface{}{
"name": "cloudy",
},
},
},
SSHKeys: []string{"key1", "key2"},
ImageType: "nocloud",
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
"public-keys": []interface{}{"key1", "key2"},
},
expectedUserData: map[string]interface{}{
"users": []interface{}{
map[string]interface{}{
"name": "cloudy",
},
},
},
},
{
name: "pod with env variables",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{ImageType: "nocloud"},
Environment: []*VMKeyValue{
{"foo", "bar"},
{"baz", "abc"},
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
},
expectedUserData: map[string]interface{}{
"write_files": []interface{}{
map[string]interface{}{
"path": "/etc/cloud/environment",
"content": "foo=bar\nbaz=abc\n",
"permissions": "0644",
},
},
},
},
{
name: "pod with env variables and user data",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{
UserData: map[string]interface{}{
"users": []interface{}{
map[string]interface{}{
"name": "cloudy",
},
},
"write_files": []interface{}{
map[string]interface{}{
"path": "/etc/foobar",
"content": "whatever",
},
},
},
ImageType: "nocloud",
},
Environment: []*VMKeyValue{
{"foo", "bar"},
{"baz", "abc"},
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
},
expectedUserData: map[string]interface{}{
"users": []interface{}{
map[string]interface{}{
"name": "cloudy",
},
},
"write_files": []interface{}{
map[string]interface{}{
"path": "/etc/foobar",
"content": "whatever",
},
map[string]interface{}{
"path": "/etc/cloud/environment",
"content": "foo=bar\nbaz=abc\n",
"permissions": "0644",
},
},
},
},
{
name: "pod with user data script",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{
UserDataScript: "#!/bin/sh\necho hi\n",
SSHKeys: []string{"key1", "key2"},
ImageType: "nocloud",
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
"public-keys": []interface{}{"key1", "key2"},
},
expectedUserDataStr: "#!/bin/sh\necho hi\n",
},
{
name: "pod with volumes to mount",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{ImageType: "nocloud"},
Mounts: []*VMMount{
{
ContainerPath: "/opt",
HostPath: vols[0].path,
},
{
ContainerPath: "/var/lib/whatever",
HostPath: vols[1].path,
},
{
ContainerPath: "/var/lib/foobar",
HostPath: vols[2].path,
},
},
},
volumeMap: diskPathMap{
vols[0].uuid: {
devPath: "/dev/disk/by-path/virtio-pci-0000:00:01.0-scsi-0:0:0:1",
sysfsPath: "/sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:1/block/",
},
vols[1].uuid: {
devPath: "/dev/disk/by-path/virtio-pci-0000:00:01.0-scsi-0:0:0:2",
sysfsPath: "/sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:2/block/",
},
vols[2].uuid: {
devPath: "/dev/disk/by-path/virtio-pci-0000:00:01.0-scsi-0:0:0:3",
sysfsPath: "/sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:3/block/",
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
},
expectedUserData: map[string]interface{}{
"mounts": []interface{}{
[]interface{}{"/dev/disk/by-path/virtio-pci-0000:00:01.0-scsi-0:0:0:1-part1", "/opt"},
[]interface{}{"/dev/disk/by-path/virtio-pci-0000:00:01.0-scsi-0:0:0:2", "/var/lib/whatever"},
[]interface{}{"/dev/disk/by-path/virtio-pci-0000:00:01.0-scsi-0:0:0:3-part2", "/var/lib/foobar"},
},
"write_files": []interface{}{
map[string]interface{}{
"path": "/etc/cloud/mount-volumes.sh",
"permissions": "0755",
"content": "#!/bin/sh\n" +
"if ! mountpoint '/opt'; then mkdir -p '/opt' && mount /dev/`ls /sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:1/block/`1 '/opt'; fi\n" +
"if ! mountpoint '/var/lib/whatever'; then mkdir -p '/var/lib/whatever' && mount /dev/`ls /sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:2/block/` '/var/lib/whatever'; fi\n" +
"if ! mountpoint '/var/lib/foobar'; then mkdir -p '/var/lib/foobar' && mount /dev/`ls /sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:3/block/`2 '/var/lib/foobar'; fi\n",
},
},
},
},
{
name: "injecting mount script into user data script",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{
UserDataScript: "#!/bin/sh\necho hi\n@virtlet-mount-script@",
ImageType: "nocloud",
},
Mounts: []*VMMount{
{
ContainerPath: "/opt",
HostPath: vols[0].path,
},
},
},
volumeMap: diskPathMap{
vols[0].uuid: {
devPath: "/dev/disk/by-path/virtio-pci-0000:00:01.0-scsi-0:0:0:1",
sysfsPath: "/sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:1/block/",
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
},
expectedUserDataStr: "#!/bin/sh\necho hi\n" +
"#!/bin/sh\n" +
"if ! mountpoint '/opt'; then mkdir -p '/opt' && mount /dev/`ls /sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:1/block/`1 '/opt'; fi\n",
},
{
name: "pod with network config",
config: buildNetworkedPodConfig(&cnicurrent.Result{
Interfaces: []*cnicurrent.Interface{
{
Name: "cni0",
Mac: "00:11:22:33:44:55",
Sandbox: "/var/run/netns/bae464f1-6ee7-4ee2-826e-33293a9de95e",
},
{
Name: "ignoreme0",
Mac: "00:12:34:56:78:9a",
Sandbox: "", // host interface
},
},
IPs: []*cnicurrent.IPConfig{
{
Version: "4",
Address: net.IPNet{
IP: net.IPv4(1, 1, 1, 1),
Mask: net.CIDRMask(8, 32),
},
Gateway: net.IPv4(1, 2, 3, 4),
Interface: 0,
},
},
Routes: []*cnitypes.Route{
{
Dst: net.IPNet{
IP: net.IPv4zero,
Mask: net.CIDRMask(0, 32),
},
GW: nil,
},
},
DNS: cnitypes.DNS{
Nameservers: []string{"1.2.3.4"},
Search: []string{"some", "search"},
},
}),
expectedNetworkConfig: map[string]interface{}{
"version": float64(1),
"config": []interface{}{
map[string]interface{}{
"mac_address": "00:11:22:33:44:55",
"name": "cni0",
"subnets": []interface{}{
map[string]interface{}{
"address": "1.1.1.1",
"netmask": "255.0.0.0",
"type": "static",
},
},
"type": "physical",
},
map[string]interface{}{
"destination": "0.0.0.0/0",
"gateway": "1.2.3.4",
"type": "route",
},
map[string]interface{}{
"address": []interface{}{"1.2.3.4"},
"search": []interface{}{"some", "search"},
"type": "nameserver",
},
},
},
},
{
name: "pod with multiple network interfaces",
config: buildNetworkedPodConfig(&cnicurrent.Result{
Interfaces: []*cnicurrent.Interface{
{
Name: "cni0",
Mac: "00:11:22:33:44:55",
Sandbox: "/var/run/netns/bae464f1-6ee7-4ee2-826e-33293a9de95e",
},
{
Name: "cni1",
Mac: "00:11:22:33:ab:cd",
Sandbox: "/var/run/netns/d920d2e2-5849-4c70-b9a6-5e3cb4f831cb",
},
{
Name: "ignoreme0",
Mac: "00:12:34:56:78:9a",
Sandbox: "", // host interface
},
},
IPs: []*cnicurrent.IPConfig{
// Note that Gateway addresses are not used because
// there's no routes with nil gateway
{
Version: "4",
Address: net.IPNet{
IP: net.IPv4(1, 1, 1, 1),
Mask: net.CIDRMask(8, 32),
},
Gateway: net.IPv4(1, 2, 3, 4),
Interface: 0,
},
{
Version: "4",
Address: net.IPNet{
IP: net.IPv4(192, 168, 100, 42),
Mask: net.CIDRMask(24, 32),
},
Gateway: net.IPv4(192, 168, 100, 1),
Interface: 1,
},
},
Routes: []*cnitypes.Route{
{
Dst: net.IPNet{
IP: net.IPv4zero,
Mask: net.CIDRMask(0, 32),
},
GW: net.IPv4(1, 2, 3, 4),
},
},
DNS: cnitypes.DNS{
Nameservers: []string{"1.2.3.4"},
Search: []string{"some", "search"},
},
}),
expectedNetworkConfig: map[string]interface{}{
"version": float64(1),
"config": []interface{}{
map[string]interface{}{
"mac_address": "00:11:22:33:44:55",
"name": "cni0",
"subnets": []interface{}{
map[string]interface{}{
"address": "1.1.1.1",
"netmask": "255.0.0.0",
"type": "static",
},
},
"type": "physical",
},
map[string]interface{}{
"mac_address": "00:11:22:33:ab:cd",
"name": "cni1",
"subnets": []interface{}{
map[string]interface{}{
"address": "192.168.100.42",
"netmask": "255.255.255.0",
"type": "static",
},
},
"type": "physical",
},
map[string]interface{}{
"destination": "0.0.0.0/0",
"gateway": "1.2.3.4",
"type": "route",
},
map[string]interface{}{
"address": []interface{}{"1.2.3.4"},
"search": []interface{}{"some", "search"},
"type": "nameserver",
},
},
},
},
} {
t.Run(tc.name, func(t *testing.T) {
// we're not invoking actual iso generation here so "/foobar"
// as isoDir will do
g := NewCloudInitGenerator(tc.config, "/foobar")
if tc.expectedMetaData != nil {
metaDataBytes, err := g.generateMetaData()
if err != nil {
t.Fatalf("generateMetaData(): %v", err)
}
var metaData map[string]interface{}
if err := json.Unmarshal(metaDataBytes, &metaData); err != nil {
t.Fatalf("Can't unmarshal meta-data: %v", err)
}
if !reflect.DeepEqual(tc.expectedMetaData, metaData) {
t.Errorf("Bad meta-data:\n%s\nUnmarshaled:\n%s", metaDataBytes, spew.Sdump(metaData))
}
}
userDataBytes, err := g.generateUserData(tc.volumeMap)
if err != nil {
t.Fatalf("generateUserData(): %v", err)
}
if tc.expectedUserDataStr != "" {
if string(userDataBytes) != tc.expectedUserDataStr {
t.Errorf("Bad user-data string:\n%s", userDataBytes)
}
} else {
if !bytes.HasPrefix(userDataBytes, []byte("#cloud-config\n")) {
t.Errorf("No #cloud-config header")
}
var userData map[string]interface{}
if err := yaml.Unmarshal(userDataBytes, &userData); err != nil {
t.Fatalf("Can't unmarshal user-data: %v", err)
}
if !reflect.DeepEqual(tc.expectedUserData, userData) {
t.Errorf("Bad user-data:\n%s\nUnmarshaled:\n%s", userDataBytes, spew.Sdump(userData))
}
}
if tc.expectedNetworkConfig != nil {
networkConfigBytes, err := g.generateNetworkConfiguration()
if err != nil {
t.Fatalf("generateNetworkConfiguration(): %v", err)
}
var networkConfig map[string]interface{}
if err := yaml.Unmarshal(networkConfigBytes, &networkConfig); err != nil {
t.Fatalf("Can't unmarshal user-data: %v", err)
}
if !reflect.DeepEqual(tc.expectedNetworkConfig, networkConfig) {
t.Errorf("Bad network-config:\n%s\nUnmarshaled:\n%s", networkConfigBytes, spew.Sdump(networkConfig))
}
}
})
}
}
func TestCloudInitDiskDef(t *testing.T) {
g := NewCloudInitGenerator(&VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{ImageType: "nocloud"},
}, "")
diskDef := g.DiskDef()
if !reflect.DeepEqual(diskDef, &libvirtxml.DomainDisk{
Type: "file",
Device: "cdrom",
Driver: &libvirtxml.DomainDiskDriver{Name: "qemu", Type: "raw"},
Source: &libvirtxml.DomainDiskSource{File: g.IsoPath()},
ReadOnly: &libvirtxml.DomainDiskReadOnly{},
}) {
t.Errorf("Bad disk definition:\n%s", spew.Sdump(diskDef))
}
}
func TestCloudInitGenerateImage(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "config-")
if err != nil {
t.Fatalf("Can't create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
g := NewCloudInitGenerator(&VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{ImageType: "nocloud"},
}, tmpDir)
if err := g.GenerateImage(nil); err != nil {
t.Fatalf("GenerateImage(): %v", err)
}
m, err := testutils.IsoToMap(g.IsoPath())
if err != nil {
t.Fatalf("IsoToMap(): %v", err)
}
if !reflect.DeepEqual(m, map[string]interface{}{
"meta-data": "{\"instance-id\":\"foo.default\",\"local-hostname\":\"foo\"}",
"network-config": "version: 1\n",
"user-data": "#cloud-config\n",
}) {
t.Errorf("Bad iso content:\n%s", spew.Sdump(m))
}
}
func TestEnvDataGeneration(t *testing.T) {
expected := "key=value\n"
g := NewCloudInitGenerator(&VMConfig{
Environment: []*VMKeyValue{
{Key: "key", Value: "value"},
},
}, "")
output := g.generateEnvVarsContent()
if output != expected {
t.Errorf("Bad environment data generated:\n%s\nExpected:\n%s", output, expected)
}
}
func verifyWriteFiles(t *testing.T, u *writeFilesUpdater, expectedWriteFiles ...interface{}) {
userData := make(map[string]interface{})
u.updateUserData(userData)
expectedUserData := map[string]interface{}{"write_files": expectedWriteFiles}
if !reflect.DeepEqual(userData, expectedUserData) {
t.Errorf("Bad user-data:\n%s\nExpected:\n%s", spew.Sdump(userData), spew.Sdump(expectedUserData))
}
}
func withFakeVolumeDir(t *testing.T, subdir string, perms os.FileMode, toRun func(location string)) {
tmpDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Can't create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
var location, filePath string
if subdir != "" {
location = filepath.Join(tmpDir, subdir)
if err := os.MkdirAll(location, 0755); err != nil {
t.Fatalf("Can't create secrets directory in temp dir: %v", err)
}
filePath = filepath.Join(location, "file")
} else {
filePath = filepath.Join(tmpDir, "file")
location = filePath
}
f, err := os.Create(filePath)
if err != nil {
t.Fatalf("Can't create sample file in temp directory: %v", err)
}
if _, err := f.WriteString("test content"); err != nil {
f.Close()
t.Fatalf("Error writing test file: %v", err)
}
if perms != 0 {
if err := f.Chmod(perms); err != nil {
t.Fatalf("Chmod(): %v", err)
}
}
if err := f.Close(); err != nil {
t.Fatalf("Error closing test file: %v", err)
}
toRun(location)
}
func TestAddingSecrets(t *testing.T) {
withFakeVolumeDir(t, "volumes/kubernetes.io~secret/test-volume", 0640, func(location string) {
u := newWriteFilesUpdater([]*VMMount{
{ContainerPath: "/container", HostPath: location},
})
u.addSecrets()
verifyWriteFiles(t, u, map[string]interface{}{
"path": "/container/file",
"content": "dGVzdCBjb250ZW50",
"encoding": "b64",
"permissions": "0640",
})
})
}
func TestAddingConfigMap(t *testing.T) {
withFakeVolumeDir(t, "volumes/kubernetes.io~configmap/test-volume", 0, func(location string) {
u := newWriteFilesUpdater([]*VMMount{
{ContainerPath: "/container", HostPath: location},
})
u.addConfigMapEntries()
verifyWriteFiles(t, u, map[string]interface{}{
"path": "/container/file",
"content": "dGVzdCBjb250ZW50",
"encoding": "b64",
"permissions": "0644",
})
})
}
func TestAddingFileLikeMount(t *testing.T) {
withFakeVolumeDir(t, "", 0, func(location string) {
u := newWriteFilesUpdater([]*VMMount{
{ContainerPath: "/container", HostPath: location},
})
u.addFileLikeMounts()
verifyWriteFiles(t, u, map[string]interface{}{
"path": "/container",
"content": "dGVzdCBjb250ZW50",
"encoding": "b64",
"permissions": "0644",
})
})
}
Add test for configdrive metadata
/*
Copyright 2017 Mirantis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package libvirttools
import (
"bytes"
"encoding/json"
"io/ioutil"
"net"
"os"
"path/filepath"
"reflect"
"strconv"
"testing"
cnitypes "github.com/containernetworking/cni/pkg/types"
cnicurrent "github.com/containernetworking/cni/pkg/types/current"
"github.com/davecgh/go-spew/spew"
"github.com/ghodss/yaml"
"github.com/Mirantis/virtlet/pkg/utils"
testutils "github.com/Mirantis/virtlet/pkg/utils/testing"
libvirtxml "github.com/libvirt/libvirt-go-xml"
)
type fakeFlexvolume struct {
uuid string
part int
path string
}
func newFakeFlexvolume(t *testing.T, parentDir string, uuid string, part int) *fakeFlexvolume {
info := map[string]string{"uuid": uuid}
if part >= 0 {
info["part"] = strconv.Itoa(part)
}
volDir := filepath.Join(parentDir, uuid)
if err := os.MkdirAll(volDir, 0777); err != nil {
t.Fatalf("MkdirAll(): %q: %v", volDir, err)
}
infoPath := filepath.Join(volDir, "virtlet-flexvolume.json")
if err := utils.WriteJson(infoPath, info, 0777); err != nil {
t.Fatalf("WriteJson(): %q: %v", infoPath, err)
}
return &fakeFlexvolume{
uuid: uuid,
part: part,
path: volDir,
}
}
func buildNetworkedPodConfig(cniResult *cnicurrent.Result) *VMConfig {
r, err := json.Marshal(cniResult)
if err != nil {
panic("failed to marshal CNI result")
}
return &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{ImageType: "nocloud"},
CNIConfig: string(r),
}
}
func TestCloudInitGenerator(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "fake-flexvol")
if err != nil {
t.Fatalf("TempDir(): %v", err)
}
defer os.RemoveAll(tmpDir)
vols := []*fakeFlexvolume{
newFakeFlexvolume(t, tmpDir, "77f29a0e-46af-4188-a6af-9ff8b8a65224", -1),
newFakeFlexvolume(t, tmpDir, "82b7a880-dc04-48a3-8f2d-0c6249bb53fe", 0),
newFakeFlexvolume(t, tmpDir, "94ae25c7-62e1-4854-9f9b-9e285c3a5ed9", 2),
}
for _, tc := range []struct {
name string
config *VMConfig
volumeMap diskPathMap
expectedMetaData map[string]interface{}
expectedUserData map[string]interface{}
expectedNetworkConfig map[string]interface{}
expectedUserDataStr string
}{
{
name: "plain pod",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{ImageType: "nocloud"},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
},
expectedNetworkConfig: map[string]interface{}{
// that's how yaml parses the number
"version": float64(1),
},
},
{
name: "metadata for configdrive",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{ImageType: "configdrive"},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
"uuid": "foo.default",
"hostname": "foo",
},
},
{
name: "pod with ssh keys",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{
SSHKeys: []string{"key1", "key2"},
ImageType: "nocloud",
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
"public-keys": []interface{}{"key1", "key2"},
},
},
{
name: "pod with ssh keys and meta-data override",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{
SSHKeys: []string{"key1", "key2"},
MetaData: map[string]interface{}{
"instance-id": "foobar",
},
ImageType: "nocloud",
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foobar",
"local-hostname": "foo",
"public-keys": []interface{}{"key1", "key2"},
},
},
{
name: "pod with user data",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{
UserData: map[string]interface{}{
"users": []interface{}{
map[string]interface{}{
"name": "cloudy",
},
},
},
SSHKeys: []string{"key1", "key2"},
ImageType: "nocloud",
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
"public-keys": []interface{}{"key1", "key2"},
},
expectedUserData: map[string]interface{}{
"users": []interface{}{
map[string]interface{}{
"name": "cloudy",
},
},
},
},
{
name: "pod with env variables",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{ImageType: "nocloud"},
Environment: []*VMKeyValue{
{"foo", "bar"},
{"baz", "abc"},
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
},
expectedUserData: map[string]interface{}{
"write_files": []interface{}{
map[string]interface{}{
"path": "/etc/cloud/environment",
"content": "foo=bar\nbaz=abc\n",
"permissions": "0644",
},
},
},
},
{
name: "pod with env variables and user data",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{
UserData: map[string]interface{}{
"users": []interface{}{
map[string]interface{}{
"name": "cloudy",
},
},
"write_files": []interface{}{
map[string]interface{}{
"path": "/etc/foobar",
"content": "whatever",
},
},
},
ImageType: "nocloud",
},
Environment: []*VMKeyValue{
{"foo", "bar"},
{"baz", "abc"},
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
},
expectedUserData: map[string]interface{}{
"users": []interface{}{
map[string]interface{}{
"name": "cloudy",
},
},
"write_files": []interface{}{
map[string]interface{}{
"path": "/etc/foobar",
"content": "whatever",
},
map[string]interface{}{
"path": "/etc/cloud/environment",
"content": "foo=bar\nbaz=abc\n",
"permissions": "0644",
},
},
},
},
{
name: "pod with user data script",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{
UserDataScript: "#!/bin/sh\necho hi\n",
SSHKeys: []string{"key1", "key2"},
ImageType: "nocloud",
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
"public-keys": []interface{}{"key1", "key2"},
},
expectedUserDataStr: "#!/bin/sh\necho hi\n",
},
{
name: "pod with volumes to mount",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{ImageType: "nocloud"},
Mounts: []*VMMount{
{
ContainerPath: "/opt",
HostPath: vols[0].path,
},
{
ContainerPath: "/var/lib/whatever",
HostPath: vols[1].path,
},
{
ContainerPath: "/var/lib/foobar",
HostPath: vols[2].path,
},
},
},
volumeMap: diskPathMap{
vols[0].uuid: {
devPath: "/dev/disk/by-path/virtio-pci-0000:00:01.0-scsi-0:0:0:1",
sysfsPath: "/sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:1/block/",
},
vols[1].uuid: {
devPath: "/dev/disk/by-path/virtio-pci-0000:00:01.0-scsi-0:0:0:2",
sysfsPath: "/sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:2/block/",
},
vols[2].uuid: {
devPath: "/dev/disk/by-path/virtio-pci-0000:00:01.0-scsi-0:0:0:3",
sysfsPath: "/sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:3/block/",
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
},
expectedUserData: map[string]interface{}{
"mounts": []interface{}{
[]interface{}{"/dev/disk/by-path/virtio-pci-0000:00:01.0-scsi-0:0:0:1-part1", "/opt"},
[]interface{}{"/dev/disk/by-path/virtio-pci-0000:00:01.0-scsi-0:0:0:2", "/var/lib/whatever"},
[]interface{}{"/dev/disk/by-path/virtio-pci-0000:00:01.0-scsi-0:0:0:3-part2", "/var/lib/foobar"},
},
"write_files": []interface{}{
map[string]interface{}{
"path": "/etc/cloud/mount-volumes.sh",
"permissions": "0755",
"content": "#!/bin/sh\n" +
"if ! mountpoint '/opt'; then mkdir -p '/opt' && mount /dev/`ls /sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:1/block/`1 '/opt'; fi\n" +
"if ! mountpoint '/var/lib/whatever'; then mkdir -p '/var/lib/whatever' && mount /dev/`ls /sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:2/block/` '/var/lib/whatever'; fi\n" +
"if ! mountpoint '/var/lib/foobar'; then mkdir -p '/var/lib/foobar' && mount /dev/`ls /sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:3/block/`2 '/var/lib/foobar'; fi\n",
},
},
},
},
{
name: "injecting mount script into user data script",
config: &VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{
UserDataScript: "#!/bin/sh\necho hi\n@virtlet-mount-script@",
ImageType: "nocloud",
},
Mounts: []*VMMount{
{
ContainerPath: "/opt",
HostPath: vols[0].path,
},
},
},
volumeMap: diskPathMap{
vols[0].uuid: {
devPath: "/dev/disk/by-path/virtio-pci-0000:00:01.0-scsi-0:0:0:1",
sysfsPath: "/sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:1/block/",
},
},
expectedMetaData: map[string]interface{}{
"instance-id": "foo.default",
"local-hostname": "foo",
},
expectedUserDataStr: "#!/bin/sh\necho hi\n" +
"#!/bin/sh\n" +
"if ! mountpoint '/opt'; then mkdir -p '/opt' && mount /dev/`ls /sys/devices/pci0000:00/0000:00:03.0/virtio*/host*/target*:0:0/*:0:0:1/block/`1 '/opt'; fi\n",
},
{
name: "pod with network config",
config: buildNetworkedPodConfig(&cnicurrent.Result{
Interfaces: []*cnicurrent.Interface{
{
Name: "cni0",
Mac: "00:11:22:33:44:55",
Sandbox: "/var/run/netns/bae464f1-6ee7-4ee2-826e-33293a9de95e",
},
{
Name: "ignoreme0",
Mac: "00:12:34:56:78:9a",
Sandbox: "", // host interface
},
},
IPs: []*cnicurrent.IPConfig{
{
Version: "4",
Address: net.IPNet{
IP: net.IPv4(1, 1, 1, 1),
Mask: net.CIDRMask(8, 32),
},
Gateway: net.IPv4(1, 2, 3, 4),
Interface: 0,
},
},
Routes: []*cnitypes.Route{
{
Dst: net.IPNet{
IP: net.IPv4zero,
Mask: net.CIDRMask(0, 32),
},
GW: nil,
},
},
DNS: cnitypes.DNS{
Nameservers: []string{"1.2.3.4"},
Search: []string{"some", "search"},
},
}),
expectedNetworkConfig: map[string]interface{}{
"version": float64(1),
"config": []interface{}{
map[string]interface{}{
"mac_address": "00:11:22:33:44:55",
"name": "cni0",
"subnets": []interface{}{
map[string]interface{}{
"address": "1.1.1.1",
"netmask": "255.0.0.0",
"type": "static",
},
},
"type": "physical",
},
map[string]interface{}{
"destination": "0.0.0.0/0",
"gateway": "1.2.3.4",
"type": "route",
},
map[string]interface{}{
"address": []interface{}{"1.2.3.4"},
"search": []interface{}{"some", "search"},
"type": "nameserver",
},
},
},
},
{
name: "pod with multiple network interfaces",
config: buildNetworkedPodConfig(&cnicurrent.Result{
Interfaces: []*cnicurrent.Interface{
{
Name: "cni0",
Mac: "00:11:22:33:44:55",
Sandbox: "/var/run/netns/bae464f1-6ee7-4ee2-826e-33293a9de95e",
},
{
Name: "cni1",
Mac: "00:11:22:33:ab:cd",
Sandbox: "/var/run/netns/d920d2e2-5849-4c70-b9a6-5e3cb4f831cb",
},
{
Name: "ignoreme0",
Mac: "00:12:34:56:78:9a",
Sandbox: "", // host interface
},
},
IPs: []*cnicurrent.IPConfig{
// Note that Gateway addresses are not used because
// there's no routes with nil gateway
{
Version: "4",
Address: net.IPNet{
IP: net.IPv4(1, 1, 1, 1),
Mask: net.CIDRMask(8, 32),
},
Gateway: net.IPv4(1, 2, 3, 4),
Interface: 0,
},
{
Version: "4",
Address: net.IPNet{
IP: net.IPv4(192, 168, 100, 42),
Mask: net.CIDRMask(24, 32),
},
Gateway: net.IPv4(192, 168, 100, 1),
Interface: 1,
},
},
Routes: []*cnitypes.Route{
{
Dst: net.IPNet{
IP: net.IPv4zero,
Mask: net.CIDRMask(0, 32),
},
GW: net.IPv4(1, 2, 3, 4),
},
},
DNS: cnitypes.DNS{
Nameservers: []string{"1.2.3.4"},
Search: []string{"some", "search"},
},
}),
expectedNetworkConfig: map[string]interface{}{
"version": float64(1),
"config": []interface{}{
map[string]interface{}{
"mac_address": "00:11:22:33:44:55",
"name": "cni0",
"subnets": []interface{}{
map[string]interface{}{
"address": "1.1.1.1",
"netmask": "255.0.0.0",
"type": "static",
},
},
"type": "physical",
},
map[string]interface{}{
"mac_address": "00:11:22:33:ab:cd",
"name": "cni1",
"subnets": []interface{}{
map[string]interface{}{
"address": "192.168.100.42",
"netmask": "255.255.255.0",
"type": "static",
},
},
"type": "physical",
},
map[string]interface{}{
"destination": "0.0.0.0/0",
"gateway": "1.2.3.4",
"type": "route",
},
map[string]interface{}{
"address": []interface{}{"1.2.3.4"},
"search": []interface{}{"some", "search"},
"type": "nameserver",
},
},
},
},
} {
t.Run(tc.name, func(t *testing.T) {
// we're not invoking actual iso generation here so "/foobar"
// as isoDir will do
g := NewCloudInitGenerator(tc.config, "/foobar")
if tc.expectedMetaData != nil {
metaDataBytes, err := g.generateMetaData()
if err != nil {
t.Fatalf("generateMetaData(): %v", err)
}
var metaData map[string]interface{}
if err := json.Unmarshal(metaDataBytes, &metaData); err != nil {
t.Fatalf("Can't unmarshal meta-data: %v", err)
}
if !reflect.DeepEqual(tc.expectedMetaData, metaData) {
t.Errorf("Bad meta-data:\n%s\nUnmarshaled:\n%s", metaDataBytes, spew.Sdump(metaData))
}
}
userDataBytes, err := g.generateUserData(tc.volumeMap)
if err != nil {
t.Fatalf("generateUserData(): %v", err)
}
if tc.expectedUserDataStr != "" {
if string(userDataBytes) != tc.expectedUserDataStr {
t.Errorf("Bad user-data string:\n%s", userDataBytes)
}
} else {
if !bytes.HasPrefix(userDataBytes, []byte("#cloud-config\n")) {
t.Errorf("No #cloud-config header")
}
var userData map[string]interface{}
if err := yaml.Unmarshal(userDataBytes, &userData); err != nil {
t.Fatalf("Can't unmarshal user-data: %v", err)
}
if !reflect.DeepEqual(tc.expectedUserData, userData) {
t.Errorf("Bad user-data:\n%s\nUnmarshaled:\n%s", userDataBytes, spew.Sdump(userData))
}
}
if tc.expectedNetworkConfig != nil {
networkConfigBytes, err := g.generateNetworkConfiguration()
if err != nil {
t.Fatalf("generateNetworkConfiguration(): %v", err)
}
var networkConfig map[string]interface{}
if err := yaml.Unmarshal(networkConfigBytes, &networkConfig); err != nil {
t.Fatalf("Can't unmarshal user-data: %v", err)
}
if !reflect.DeepEqual(tc.expectedNetworkConfig, networkConfig) {
t.Errorf("Bad network-config:\n%s\nUnmarshaled:\n%s", networkConfigBytes, spew.Sdump(networkConfig))
}
}
})
}
}
func TestCloudInitDiskDef(t *testing.T) {
g := NewCloudInitGenerator(&VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{ImageType: "nocloud"},
}, "")
diskDef := g.DiskDef()
if !reflect.DeepEqual(diskDef, &libvirtxml.DomainDisk{
Type: "file",
Device: "cdrom",
Driver: &libvirtxml.DomainDiskDriver{Name: "qemu", Type: "raw"},
Source: &libvirtxml.DomainDiskSource{File: g.IsoPath()},
ReadOnly: &libvirtxml.DomainDiskReadOnly{},
}) {
t.Errorf("Bad disk definition:\n%s", spew.Sdump(diskDef))
}
}
func TestCloudInitGenerateImage(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "config-")
if err != nil {
t.Fatalf("Can't create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
g := NewCloudInitGenerator(&VMConfig{
PodName: "foo",
PodNamespace: "default",
ParsedAnnotations: &VirtletAnnotations{ImageType: "nocloud"},
}, tmpDir)
if err := g.GenerateImage(nil); err != nil {
t.Fatalf("GenerateImage(): %v", err)
}
m, err := testutils.IsoToMap(g.IsoPath())
if err != nil {
t.Fatalf("IsoToMap(): %v", err)
}
if !reflect.DeepEqual(m, map[string]interface{}{
"meta-data": "{\"instance-id\":\"foo.default\",\"local-hostname\":\"foo\"}",
"network-config": "version: 1\n",
"user-data": "#cloud-config\n",
}) {
t.Errorf("Bad iso content:\n%s", spew.Sdump(m))
}
}
func TestEnvDataGeneration(t *testing.T) {
expected := "key=value\n"
g := NewCloudInitGenerator(&VMConfig{
Environment: []*VMKeyValue{
{Key: "key", Value: "value"},
},
}, "")
output := g.generateEnvVarsContent()
if output != expected {
t.Errorf("Bad environment data generated:\n%s\nExpected:\n%s", output, expected)
}
}
func verifyWriteFiles(t *testing.T, u *writeFilesUpdater, expectedWriteFiles ...interface{}) {
userData := make(map[string]interface{})
u.updateUserData(userData)
expectedUserData := map[string]interface{}{"write_files": expectedWriteFiles}
if !reflect.DeepEqual(userData, expectedUserData) {
t.Errorf("Bad user-data:\n%s\nExpected:\n%s", spew.Sdump(userData), spew.Sdump(expectedUserData))
}
}
func withFakeVolumeDir(t *testing.T, subdir string, perms os.FileMode, toRun func(location string)) {
tmpDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Can't create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
var location, filePath string
if subdir != "" {
location = filepath.Join(tmpDir, subdir)
if err := os.MkdirAll(location, 0755); err != nil {
t.Fatalf("Can't create secrets directory in temp dir: %v", err)
}
filePath = filepath.Join(location, "file")
} else {
filePath = filepath.Join(tmpDir, "file")
location = filePath
}
f, err := os.Create(filePath)
if err != nil {
t.Fatalf("Can't create sample file in temp directory: %v", err)
}
if _, err := f.WriteString("test content"); err != nil {
f.Close()
t.Fatalf("Error writing test file: %v", err)
}
if perms != 0 {
if err := f.Chmod(perms); err != nil {
t.Fatalf("Chmod(): %v", err)
}
}
if err := f.Close(); err != nil {
t.Fatalf("Error closing test file: %v", err)
}
toRun(location)
}
func TestAddingSecrets(t *testing.T) {
withFakeVolumeDir(t, "volumes/kubernetes.io~secret/test-volume", 0640, func(location string) {
u := newWriteFilesUpdater([]*VMMount{
{ContainerPath: "/container", HostPath: location},
})
u.addSecrets()
verifyWriteFiles(t, u, map[string]interface{}{
"path": "/container/file",
"content": "dGVzdCBjb250ZW50",
"encoding": "b64",
"permissions": "0640",
})
})
}
func TestAddingConfigMap(t *testing.T) {
withFakeVolumeDir(t, "volumes/kubernetes.io~configmap/test-volume", 0, func(location string) {
u := newWriteFilesUpdater([]*VMMount{
{ContainerPath: "/container", HostPath: location},
})
u.addConfigMapEntries()
verifyWriteFiles(t, u, map[string]interface{}{
"path": "/container/file",
"content": "dGVzdCBjb250ZW50",
"encoding": "b64",
"permissions": "0644",
})
})
}
func TestAddingFileLikeMount(t *testing.T) {
withFakeVolumeDir(t, "", 0, func(location string) {
u := newWriteFilesUpdater([]*VMMount{
{ContainerPath: "/container", HostPath: location},
})
u.addFileLikeMounts()
verifyWriteFiles(t, u, map[string]interface{}{
"path": "/container",
"content": "dGVzdCBjb250ZW50",
"encoding": "b64",
"permissions": "0644",
})
})
}
|
// Copyright 2017-2018 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package logfields defines common logging fields which are used across packages
package logfields
const (
// LogSubsys is the field denoting the subsystem when logging
LogSubsys = "subsys"
// Signal is the field to print os signals on exit etc.
Signal = "signal"
// Node is a host machine in the cluster, running cilium
Node = "node"
// NodeName is a human readable name for the node
NodeName = "nodeName"
// EndpointID is the numeric endpoint identifier
EndpointID = "endpointID"
// EndpointState is the current endpoint state
EndpointState = "endpointState"
// EventUUID is an event unique identifier
EventUUID = "eventID"
// ContainerID is the container identifier
ContainerID = "containerID"
// IdentityLabels are the labels relevant for the security identity
IdentityLabels = "identityLabels"
// InfoLabels are the labels relevant for the security identity
InfoLabels = "infoLabels"
// Labels are any label, they may not be relevant to the security identity.
Labels = "labels"
// Controller is the name of the controller to log it.
Controller = "controller"
// Identity is the identifier of a security identity
Identity = "identity"
// OldIdentity is a previously used security identity
OldIdentity = "oldIdentity"
// PolicyRevision is the revision of the policy in the repository or of
// the object in question
PolicyRevision = "policyRevision"
// DatapathPolicyRevision is the policy revision currently running in
// the datapath
DatapathPolicyRevision = "datapathPolicyRevision"
// DesiredPolicyRevision is the latest policy revision as evaluated for
// an endpoint. It is the desired policy revision to be implemented
// into the datapath.
DesiredPolicyRevision = "desiredPolicyRevision"
// PolicyID is the identifier of a L3, L4 or L7 Policy. Ideally the .NumericIdentity
PolicyID = "policyID"
// AddedPolicyID is the .NumericIdentity, or set or them
AddedPolicyID = "policyID.Added"
// DeletedPolicyID is the .NumericIdentity, or set or them
DeletedPolicyID = "policyID.Deleted"
// L3PolicyID is the identifier of a L3 Policy
L3PolicyID = "policyID.L3"
// L4PolicyID is the identifier of a L4 Policy
L4PolicyID = "PolicyID.L4"
// DNSName is a FQDN or not fully qualified name intended for DNS lookups
DNSName = "dnsName"
// DNSRequestID is the DNS request id used by dns-proxy
DNSRequestID = "DNSRequestID"
// IPAddr is an IPV4 or IPv6 address
IPAddr = "ipAddr"
// IPMask is an IPV4 or IPv6 address mask
IPMask = "ipMask"
// IPv4 is an IPv4 address
IPv4 = "ipv4"
// IPv6 is an IPv6 address
IPv6 = "ipv6"
// BuildDuration is the time elapsed to build a BPF program
BuildDuration = "buildDuration"
// BPFCompilationTime is the time elapsed to build a BPF endpoint program
BPFCompilationTime = "BPFCompilationTime"
// EndpointRegenerationTime is the time elapsed to generate an endpoint
EndpointRegenerationTime = "endpointRegenerationTime"
// StartTime is the start time of an event
StartTime = "startTime"
// EndTime is the end time of an event
EndTime = "endTime"
// Duration is the duration of a measured operation
Duration = "duration"
// V4HealthIP is an address used to contact the cilium-health endpoint
V4HealthIP = "v4healthIP.IPv4"
// V6HealthIP is an address used to contact the cilium-health endpoint
V6HealthIP = "v6healthIP.IPv6"
// V4CiliumHostIP is an address used for the cilium_host interface.
V4CiliumHostIP = "v4CiliumHostIP.IPv4"
// V6CiliumHostIP is an address used for the cilium_host interface.
V6CiliumHostIP = "v6CiliumHostIP.IPv6"
// L3n4Addr is a L3 (IP) + L4 (port and protocol) address object.
L3n4Addr = "l3n4Addr"
// L3n4AddrID is the allocated ID for a L3n4Addr object
L3n4AddrID = "l3n4AddrID"
// Port is a L4 port
Port = "port"
// Family is the L3 protocol family
Family = "family"
// Protocol is the L4 protocol
Protocol = "protocol"
// V4Prefix is a IPv4 subnet/CIDR prefix
V4Prefix = "v4Prefix"
// V6Prefix is a IPv6 subnet/CIDR prefix
V6Prefix = "v6Prefix"
// Interface is an interface id/name on the system
Interface = "interface"
// Ipvlan is a ipvlan object or ID
Ipvlan = "ipvlan"
// Veth is a veth object or ID
Veth = "veth"
// VethPair is a tuple of Veth that are paired
VethPair = "vethPair"
// NetNSName is a name of a network namespace
NetNSName = "netNSName"
// Hash is a hash of something
Hash = "hash"
// ServiceName is the orchestration framework name for a service
ServiceName = "serviceName"
// ClusterName is the name of the cluster
ClusterName = "clusterName"
// ServiceID is the orchestration unique ID of a service
ServiceID = "serviceID"
// ServiceIP is the IP of the service
ServiceIP = "serviceIP"
// ServiceKey is the key of the service in a BPF map
ServiceKey = "svcKey"
// ServiceValue is the value of the service in a BPF map
ServiceValue = "svcVal"
// ServiceType is the type of the service
ServiceType = "svcType"
// BackendIDs is the map of backend IDs (lbmap) indexed by backend address
BackendIDs = "backendIDs"
// BackendID is the ID of the backend
BackendID = "backendID"
// Backends is the list of the service backends
Backends = "backends"
// BackendName is the name of the backend
BackendName = "backendName"
// SlaveSlot is the slot number in a service BPF map
SlaveSlot = "slaveSlot"
// CiliumNetworkPolicy is a cilium specific NetworkPolicy
CiliumNetworkPolicy = "ciliumNetworkPolicy"
// CiliumNetworkPolicyName is the name of a CiliumNetworkPolicy
CiliumNetworkPolicyName = "ciliumNetworkPolicyName"
// BPFMapKey is a key from a BPF map
BPFMapKey = "bpfMapKey"
// BPFMapValue is a value from a BPF map
BPFMapValue = "bpfMapValue"
// XDPDevice is the device name
XDPDevice = "xdpDevice"
// Device is the device name
Device = "device"
// IpvlanMasterDevice is the ipvlan master device name
IpvlanMasterDevice = "ipvlanMasterDevice"
// DatapathMode is the datapath mode name
DatapathMode = "datapathMode"
// Tunnel is the tunnel name
Tunnel = "tunnel"
// EndpointLabelSelector is a selector for Endpoints by label
EndpointLabelSelector = "EndpointLabelSelector"
// EndpointSelector is a selector for Endpoints
EndpointSelector = "EndpointSelector"
// Path is a filesystem path. It can be a file or directory.
// Note: pkg/proxy/accesslog points to this variable so be careful when
// changing the value
Path = "file-path"
// Line is a line number within a file
Line = "line"
// Object is used when "%+v" printing Go objects for debug or error handling.
// It is often paired with logfields.Repr to render the object.
Object = "obj"
// Request is a request object received by us, reported in debug or error.
// It is often paired with logfields.Repr to render the object.
Request = "req"
// Params are the parameters of a request, reported in debug or error.
Params = "params"
// Response is a response object received by us, reported in debug or error.
// It is often paired with logfields.Repr to render the object.
Response = "resp"
// Route is a L2 or L3 Linux route
Route = "route"
// RetryUUID is an UUID identical for all retries of a set
RetryUUID = "retryUUID"
// Envoy xDS-protocol-specific
// XDSStreamID is the ID of an xDS request stream.
XDSStreamID = "xdsStreamID"
// XDSAckedVersion is the version of an xDS resource acked by Envoy.
XDSAckedVersion = "xdsAckedVersion"
// XDSCachedVersion is the version of an xDS resource currently in cache.
XDSCachedVersion = "xdsCachedVersion"
// XDSTypeURL is the URL that identifies an xDS resource type.
XDSTypeURL = "xdsTypeURL"
// XDSNonce is a nonce sent in xDS requests and responses.
XDSNonce = "xdsNonce"
// XDSCanary is a boolean indicating whether a response is a dry run.
XDSCanary = "xdsCanary"
// XDSResourceName is the name of an xDS resource.
XDSResourceName = "xdsResourceName"
// XDSClientNode is the ID of an XDS client, e.g. an Envoy node.
XDSClientNode = "xdsClientNode"
// XDSResource is an xDS resource message.
XDSResource = "xdsResource"
// K8s-specific
// K8sNodeID is the k8s ID of a K8sNode
K8sNodeID = "k8sNodeID"
// K8sPodName is the name of a k8s pod
K8sPodName = "k8sPodName"
// K8sSvcName is the name of a K8s service
K8sSvcName = "k8sSvcName"
// K8sSvcType is the k8s service type (e.g. NodePort, Loadbalancer etc.)
K8sSvcType = "k8sSvcType"
// K8sEndpointName is the k8s name for a k8s Endpoint (not a cilium Endpoint)
K8sEndpointName = "k8sEndpointName"
// K8sNamespace is the namespace something belongs to
K8sNamespace = "k8sNamespace"
// K8sIdentityAnnotation is a k8s non-identifying annotations on k8s objects
K8sIdentityAnnotation = "k8sIdentityAnnotation"
// K8sNetworkPolicy is a k8s NetworkPolicy object (not a CiliumNetworkObject, above).
K8sNetworkPolicy = "k8sNetworkPolicy"
// K8sNetworkPolicyName is the name of a K8sPolicyObject
K8sNetworkPolicyName = "k8sNetworkPolicyName"
// K8sIngress is a k8s Ingress service object
K8sIngress = "k8sIngress"
// K8sIngressName is the name of a K8sIngress
K8sIngressName = "k8sIngressName"
// K8sAPIVersion is the version of the k8s API an object has
K8sAPIVersion = "k8sApiVersion"
// Attempt is the attempt number if an operation is attempted multiple times
Attempt = "attempt"
// TrafficDirection represents the directionality of traffic with respect
// to an endpoint.
TrafficDirection = "trafficDirection"
// Modification represents a type of state change operation (insert, delete,
// upsert, etc.).
Modification = "modification"
// BPFMapName is the name of a BPF map.
BPFMapName = "bpfMapName"
// BPFHeaderHash is the hash of the BPF header.
BPFHeaderfileHash = "bpfHeaderfileHash"
// BPFMapPath is the path of a BPF map in the filesystem.
BPFMapPath = "bpfMapPath"
// BPFMapFD is the file descriptor for a BPF map.
BPFMapFD = "bpfMapFileDescriptor"
// ThreadID is the Envoy thread ID.
ThreadID = "threadID"
// Reason is a human readable string describing why an operation was
// performed
Reason = "reason"
// Debug is a boolean value for whether debug is set or not.
Debug = "debug"
// PID is an integer value for the process identifier of a process.
PID = "pid"
// PIDFile is a string value for the path to a file containing a PID.
PIDFile = "pidfile"
// Probe is the name of a status probe.
Probe = "probe"
// Key is the identity of the encryption key
Key = "key"
// SysParamName is the name of the kernel parameter (sysctl)
SysParamName = "sysParamName"
// SysParamValue is the value of the kernel parameter (sysctl)
SysParamValue = "sysParamValue"
)
logfields: Add tag for service namespace
Signed-off-by: Sebastian Wicki <db043b2055cb3a47b2eb0b5aebf4e114a8c24a5a@isovalent.com>
// Copyright 2017-2018 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package logfields defines common logging fields which are used across packages
package logfields
const (
// LogSubsys is the field denoting the subsystem when logging
LogSubsys = "subsys"
// Signal is the field to print os signals on exit etc.
Signal = "signal"
// Node is a host machine in the cluster, running cilium
Node = "node"
// NodeName is a human readable name for the node
NodeName = "nodeName"
// EndpointID is the numeric endpoint identifier
EndpointID = "endpointID"
// EndpointState is the current endpoint state
EndpointState = "endpointState"
// EventUUID is an event unique identifier
EventUUID = "eventID"
// ContainerID is the container identifier
ContainerID = "containerID"
// IdentityLabels are the labels relevant for the security identity
IdentityLabels = "identityLabels"
// InfoLabels are the labels relevant for the security identity
InfoLabels = "infoLabels"
// Labels are any label, they may not be relevant to the security identity.
Labels = "labels"
// Controller is the name of the controller to log it.
Controller = "controller"
// Identity is the identifier of a security identity
Identity = "identity"
// OldIdentity is a previously used security identity
OldIdentity = "oldIdentity"
// PolicyRevision is the revision of the policy in the repository or of
// the object in question
PolicyRevision = "policyRevision"
// DatapathPolicyRevision is the policy revision currently running in
// the datapath
DatapathPolicyRevision = "datapathPolicyRevision"
// DesiredPolicyRevision is the latest policy revision as evaluated for
// an endpoint. It is the desired policy revision to be implemented
// into the datapath.
DesiredPolicyRevision = "desiredPolicyRevision"
// PolicyID is the identifier of a L3, L4 or L7 Policy. Ideally the .NumericIdentity
PolicyID = "policyID"
// AddedPolicyID is the .NumericIdentity, or set or them
AddedPolicyID = "policyID.Added"
// DeletedPolicyID is the .NumericIdentity, or set or them
DeletedPolicyID = "policyID.Deleted"
// L3PolicyID is the identifier of a L3 Policy
L3PolicyID = "policyID.L3"
// L4PolicyID is the identifier of a L4 Policy
L4PolicyID = "PolicyID.L4"
// DNSName is a FQDN or not fully qualified name intended for DNS lookups
DNSName = "dnsName"
// DNSRequestID is the DNS request id used by dns-proxy
DNSRequestID = "DNSRequestID"
// IPAddr is an IPV4 or IPv6 address
IPAddr = "ipAddr"
// IPMask is an IPV4 or IPv6 address mask
IPMask = "ipMask"
// IPv4 is an IPv4 address
IPv4 = "ipv4"
// IPv6 is an IPv6 address
IPv6 = "ipv6"
// BuildDuration is the time elapsed to build a BPF program
BuildDuration = "buildDuration"
// BPFCompilationTime is the time elapsed to build a BPF endpoint program
BPFCompilationTime = "BPFCompilationTime"
// EndpointRegenerationTime is the time elapsed to generate an endpoint
EndpointRegenerationTime = "endpointRegenerationTime"
// StartTime is the start time of an event
StartTime = "startTime"
// EndTime is the end time of an event
EndTime = "endTime"
// Duration is the duration of a measured operation
Duration = "duration"
// V4HealthIP is an address used to contact the cilium-health endpoint
V4HealthIP = "v4healthIP.IPv4"
// V6HealthIP is an address used to contact the cilium-health endpoint
V6HealthIP = "v6healthIP.IPv6"
// V4CiliumHostIP is an address used for the cilium_host interface.
V4CiliumHostIP = "v4CiliumHostIP.IPv4"
// V6CiliumHostIP is an address used for the cilium_host interface.
V6CiliumHostIP = "v6CiliumHostIP.IPv6"
// L3n4Addr is a L3 (IP) + L4 (port and protocol) address object.
L3n4Addr = "l3n4Addr"
// L3n4AddrID is the allocated ID for a L3n4Addr object
L3n4AddrID = "l3n4AddrID"
// Port is a L4 port
Port = "port"
// Family is the L3 protocol family
Family = "family"
// Protocol is the L4 protocol
Protocol = "protocol"
// V4Prefix is a IPv4 subnet/CIDR prefix
V4Prefix = "v4Prefix"
// V6Prefix is a IPv6 subnet/CIDR prefix
V6Prefix = "v6Prefix"
// Interface is an interface id/name on the system
Interface = "interface"
// Ipvlan is a ipvlan object or ID
Ipvlan = "ipvlan"
// Veth is a veth object or ID
Veth = "veth"
// VethPair is a tuple of Veth that are paired
VethPair = "vethPair"
// NetNSName is a name of a network namespace
NetNSName = "netNSName"
// Hash is a hash of something
Hash = "hash"
// ServiceName is the orchestration framework name for a service
ServiceName = "serviceName"
// ServiceNamespace is the orchestration framework namespace of a service name
ServiceNamespace = "serviceNamespace"
// ClusterName is the name of the cluster
ClusterName = "clusterName"
// ServiceID is the orchestration unique ID of a service
ServiceID = "serviceID"
// ServiceIP is the IP of the service
ServiceIP = "serviceIP"
// ServiceKey is the key of the service in a BPF map
ServiceKey = "svcKey"
// ServiceValue is the value of the service in a BPF map
ServiceValue = "svcVal"
// ServiceType is the type of the service
ServiceType = "svcType"
// BackendIDs is the map of backend IDs (lbmap) indexed by backend address
BackendIDs = "backendIDs"
// BackendID is the ID of the backend
BackendID = "backendID"
// Backends is the list of the service backends
Backends = "backends"
// BackendName is the name of the backend
BackendName = "backendName"
// SlaveSlot is the slot number in a service BPF map
SlaveSlot = "slaveSlot"
// CiliumNetworkPolicy is a cilium specific NetworkPolicy
CiliumNetworkPolicy = "ciliumNetworkPolicy"
// CiliumNetworkPolicyName is the name of a CiliumNetworkPolicy
CiliumNetworkPolicyName = "ciliumNetworkPolicyName"
// BPFMapKey is a key from a BPF map
BPFMapKey = "bpfMapKey"
// BPFMapValue is a value from a BPF map
BPFMapValue = "bpfMapValue"
// XDPDevice is the device name
XDPDevice = "xdpDevice"
// Device is the device name
Device = "device"
// IpvlanMasterDevice is the ipvlan master device name
IpvlanMasterDevice = "ipvlanMasterDevice"
// DatapathMode is the datapath mode name
DatapathMode = "datapathMode"
// Tunnel is the tunnel name
Tunnel = "tunnel"
// EndpointLabelSelector is a selector for Endpoints by label
EndpointLabelSelector = "EndpointLabelSelector"
// EndpointSelector is a selector for Endpoints
EndpointSelector = "EndpointSelector"
// Path is a filesystem path. It can be a file or directory.
// Note: pkg/proxy/accesslog points to this variable so be careful when
// changing the value
Path = "file-path"
// Line is a line number within a file
Line = "line"
// Object is used when "%+v" printing Go objects for debug or error handling.
// It is often paired with logfields.Repr to render the object.
Object = "obj"
// Request is a request object received by us, reported in debug or error.
// It is often paired with logfields.Repr to render the object.
Request = "req"
// Params are the parameters of a request, reported in debug or error.
Params = "params"
// Response is a response object received by us, reported in debug or error.
// It is often paired with logfields.Repr to render the object.
Response = "resp"
// Route is a L2 or L3 Linux route
Route = "route"
// RetryUUID is an UUID identical for all retries of a set
RetryUUID = "retryUUID"
// Envoy xDS-protocol-specific
// XDSStreamID is the ID of an xDS request stream.
XDSStreamID = "xdsStreamID"
// XDSAckedVersion is the version of an xDS resource acked by Envoy.
XDSAckedVersion = "xdsAckedVersion"
// XDSCachedVersion is the version of an xDS resource currently in cache.
XDSCachedVersion = "xdsCachedVersion"
// XDSTypeURL is the URL that identifies an xDS resource type.
XDSTypeURL = "xdsTypeURL"
// XDSNonce is a nonce sent in xDS requests and responses.
XDSNonce = "xdsNonce"
// XDSCanary is a boolean indicating whether a response is a dry run.
XDSCanary = "xdsCanary"
// XDSResourceName is the name of an xDS resource.
XDSResourceName = "xdsResourceName"
// XDSClientNode is the ID of an XDS client, e.g. an Envoy node.
XDSClientNode = "xdsClientNode"
// XDSResource is an xDS resource message.
XDSResource = "xdsResource"
// K8s-specific
// K8sNodeID is the k8s ID of a K8sNode
K8sNodeID = "k8sNodeID"
// K8sPodName is the name of a k8s pod
K8sPodName = "k8sPodName"
// K8sSvcName is the name of a K8s service
K8sSvcName = "k8sSvcName"
// K8sSvcType is the k8s service type (e.g. NodePort, Loadbalancer etc.)
K8sSvcType = "k8sSvcType"
// K8sEndpointName is the k8s name for a k8s Endpoint (not a cilium Endpoint)
K8sEndpointName = "k8sEndpointName"
// K8sNamespace is the namespace something belongs to
K8sNamespace = "k8sNamespace"
// K8sIdentityAnnotation is a k8s non-identifying annotations on k8s objects
K8sIdentityAnnotation = "k8sIdentityAnnotation"
// K8sNetworkPolicy is a k8s NetworkPolicy object (not a CiliumNetworkObject, above).
K8sNetworkPolicy = "k8sNetworkPolicy"
// K8sNetworkPolicyName is the name of a K8sPolicyObject
K8sNetworkPolicyName = "k8sNetworkPolicyName"
// K8sIngress is a k8s Ingress service object
K8sIngress = "k8sIngress"
// K8sIngressName is the name of a K8sIngress
K8sIngressName = "k8sIngressName"
// K8sAPIVersion is the version of the k8s API an object has
K8sAPIVersion = "k8sApiVersion"
// Attempt is the attempt number if an operation is attempted multiple times
Attempt = "attempt"
// TrafficDirection represents the directionality of traffic with respect
// to an endpoint.
TrafficDirection = "trafficDirection"
// Modification represents a type of state change operation (insert, delete,
// upsert, etc.).
Modification = "modification"
// BPFMapName is the name of a BPF map.
BPFMapName = "bpfMapName"
// BPFHeaderHash is the hash of the BPF header.
BPFHeaderfileHash = "bpfHeaderfileHash"
// BPFMapPath is the path of a BPF map in the filesystem.
BPFMapPath = "bpfMapPath"
// BPFMapFD is the file descriptor for a BPF map.
BPFMapFD = "bpfMapFileDescriptor"
// ThreadID is the Envoy thread ID.
ThreadID = "threadID"
// Reason is a human readable string describing why an operation was
// performed
Reason = "reason"
// Debug is a boolean value for whether debug is set or not.
Debug = "debug"
// PID is an integer value for the process identifier of a process.
PID = "pid"
// PIDFile is a string value for the path to a file containing a PID.
PIDFile = "pidfile"
// Probe is the name of a status probe.
Probe = "probe"
// Key is the identity of the encryption key
Key = "key"
// SysParamName is the name of the kernel parameter (sysctl)
SysParamName = "sysParamName"
// SysParamValue is the value of the kernel parameter (sysctl)
SysParamValue = "sysParamValue"
)
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package iptables
import (
"bytes"
"fmt"
"net"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/component-base/metrics/testutil"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/metrics"
"k8s.io/kubernetes/pkg/proxy/healthcheck"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
utilproxytest "k8s.io/kubernetes/pkg/proxy/util/testing"
"k8s.io/kubernetes/pkg/util/async"
"k8s.io/kubernetes/pkg/util/conntrack"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
utilnet "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer"
)
func checkAllLines(t *testing.T, table utiliptables.Table, save []byte, expectedLines map[utiliptables.Chain]string) {
chainLines := utiliptables.GetChainLines(table, save)
for chain, lineBytes := range chainLines {
line := string(lineBytes)
if expected, exists := expectedLines[chain]; exists {
if expected != line {
t.Errorf("getChainLines expected chain line not present. For chain: %s Expected: %s Got: %s", chain, expected, line)
}
} else {
t.Errorf("getChainLines expected chain not present: %s", chain)
}
}
}
func TestGetChainLines(t *testing.T) {
iptablesSave := `# Generated by iptables-save v1.4.7 on Wed Oct 29 14:56:01 2014
*nat
:PREROUTING ACCEPT [2136997:197881818]
:POSTROUTING ACCEPT [4284525:258542680]
:OUTPUT ACCEPT [5901660:357267963]
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
COMMIT
# Completed on Wed Oct 29 14:56:01 2014`
expected := map[utiliptables.Chain]string{
utiliptables.ChainPrerouting: ":PREROUTING ACCEPT [2136997:197881818]",
utiliptables.ChainPostrouting: ":POSTROUTING ACCEPT [4284525:258542680]",
utiliptables.ChainOutput: ":OUTPUT ACCEPT [5901660:357267963]",
}
checkAllLines(t, utiliptables.TableNAT, []byte(iptablesSave), expected)
}
func TestGetChainLinesMultipleTables(t *testing.T) {
iptablesSave := `# Generated by iptables-save v1.4.21 on Fri Aug 7 14:47:37 2015
*nat
:PREROUTING ACCEPT [2:138]
:INPUT ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
:DOCKER - [0:0]
:KUBE-NODEPORT-CONTAINER - [0:0]
:KUBE-NODEPORT-HOST - [0:0]
:KUBE-PORTALS-CONTAINER - [0:0]
:KUBE-PORTALS-HOST - [0:0]
:KUBE-SVC-1111111111111111 - [0:0]
:KUBE-SVC-2222222222222222 - [0:0]
:KUBE-SVC-3333333333333333 - [0:0]
:KUBE-SVC-4444444444444444 - [0:0]
:KUBE-SVC-5555555555555555 - [0:0]
:KUBE-SVC-6666666666666666 - [0:0]
-A PREROUTING -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-CONTAINER
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A PREROUTING -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-CONTAINER
-A OUTPUT -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-HOST
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-HOST
-A POSTROUTING -s 10.246.1.0/24 ! -o cbr0 -j MASQUERADE
-A POSTROUTING -s 10.0.2.15/32 -d 10.0.2.15/32 -m comment --comment "handle pod connecting to self" -j MASQUERADE
-A KUBE-PORTALS-CONTAINER -d 10.247.0.1/32 -p tcp -m comment --comment "portal for default/kubernetes:" -m state --state NEW -m tcp --dport 443 -j KUBE-SVC-5555555555555555
-A KUBE-PORTALS-CONTAINER -d 10.247.0.10/32 -p udp -m comment --comment "portal for kube-system/kube-dns:dns" -m state --state NEW -m udp --dport 53 -j KUBE-SVC-6666666666666666
-A KUBE-PORTALS-CONTAINER -d 10.247.0.10/32 -p tcp -m comment --comment "portal for kube-system/kube-dns:dns-tcp" -m state --state NEW -m tcp --dport 53 -j KUBE-SVC-2222222222222222
-A KUBE-PORTALS-HOST -d 10.247.0.1/32 -p tcp -m comment --comment "portal for default/kubernetes:" -m state --state NEW -m tcp --dport 443 -j KUBE-SVC-5555555555555555
-A KUBE-PORTALS-HOST -d 10.247.0.10/32 -p udp -m comment --comment "portal for kube-system/kube-dns:dns" -m state --state NEW -m udp --dport 53 -j KUBE-SVC-6666666666666666
-A KUBE-PORTALS-HOST -d 10.247.0.10/32 -p tcp -m comment --comment "portal for kube-system/kube-dns:dns-tcp" -m state --state NEW -m tcp --dport 53 -j KUBE-SVC-2222222222222222
-A KUBE-SVC-1111111111111111 -p udp -m comment --comment "kube-system/kube-dns:dns" -m recent --set --name KUBE-SVC-1111111111111111 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.246.1.2:53
-A KUBE-SVC-2222222222222222 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-SVC-3333333333333333
-A KUBE-SVC-3333333333333333 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m recent --set --name KUBE-SVC-3333333333333333 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.246.1.2:53
-A KUBE-SVC-4444444444444444 -p tcp -m comment --comment "default/kubernetes:" -m recent --set --name KUBE-SVC-4444444444444444 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.245.1.2:443
-A KUBE-SVC-5555555555555555 -m comment --comment "default/kubernetes:" -j KUBE-SVC-4444444444444444
-A KUBE-SVC-6666666666666666 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-SVC-1111111111111111
COMMIT
# Completed on Fri Aug 7 14:47:37 2015
# Generated by iptables-save v1.4.21 on Fri Aug 7 14:47:37 2015
*filter
:INPUT ACCEPT [17514:83115836]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [8909:688225]
:DOCKER - [0:0]
-A FORWARD -o cbr0 -j DOCKER
-A FORWARD -o cbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -i cbr0 ! -o cbr0 -j ACCEPT
-A FORWARD -i cbr0 -o cbr0 -j ACCEPT
COMMIT
`
expected := map[utiliptables.Chain]string{
utiliptables.ChainPrerouting: ":PREROUTING ACCEPT [2:138]",
utiliptables.Chain("INPUT"): ":INPUT ACCEPT [0:0]",
utiliptables.Chain("OUTPUT"): ":OUTPUT ACCEPT [0:0]",
utiliptables.ChainPostrouting: ":POSTROUTING ACCEPT [0:0]",
utiliptables.Chain("DOCKER"): ":DOCKER - [0:0]",
utiliptables.Chain("KUBE-NODEPORT-CONTAINER"): ":KUBE-NODEPORT-CONTAINER - [0:0]",
utiliptables.Chain("KUBE-NODEPORT-HOST"): ":KUBE-NODEPORT-HOST - [0:0]",
utiliptables.Chain("KUBE-PORTALS-CONTAINER"): ":KUBE-PORTALS-CONTAINER - [0:0]",
utiliptables.Chain("KUBE-PORTALS-HOST"): ":KUBE-PORTALS-HOST - [0:0]",
utiliptables.Chain("KUBE-SVC-1111111111111111"): ":KUBE-SVC-1111111111111111 - [0:0]",
utiliptables.Chain("KUBE-SVC-2222222222222222"): ":KUBE-SVC-2222222222222222 - [0:0]",
utiliptables.Chain("KUBE-SVC-3333333333333333"): ":KUBE-SVC-3333333333333333 - [0:0]",
utiliptables.Chain("KUBE-SVC-4444444444444444"): ":KUBE-SVC-4444444444444444 - [0:0]",
utiliptables.Chain("KUBE-SVC-5555555555555555"): ":KUBE-SVC-5555555555555555 - [0:0]",
utiliptables.Chain("KUBE-SVC-6666666666666666"): ":KUBE-SVC-6666666666666666 - [0:0]",
}
checkAllLines(t, utiliptables.TableNAT, []byte(iptablesSave), expected)
}
func TestDeleteEndpointConnectionsIPv4(t *testing.T) {
const (
UDP = v1.ProtocolUDP
TCP = v1.ProtocolTCP
SCTP = v1.ProtocolSCTP
)
testCases := []struct {
description string
svcName string
svcIP string
svcPort int32
protocol v1.Protocol
endpoint string // IP:port endpoint
epSvcPair proxy.ServiceEndpoint // Will be generated by test
simulatedErr string
}{
{
description: "V4 UDP",
svcName: "v4-udp",
svcIP: "10.96.1.1",
svcPort: 80,
protocol: UDP,
endpoint: "10.240.0.3:80",
},
{
description: "V4 TCP",
svcName: "v4-tcp",
svcIP: "10.96.2.2",
svcPort: 80,
protocol: TCP,
endpoint: "10.240.0.4:80",
},
{
description: "V4 SCTP",
svcName: "v4-sctp",
svcIP: "10.96.3.3",
svcPort: 80,
protocol: SCTP,
endpoint: "10.240.0.5:80",
},
{
description: "V4 UDP, nothing to delete, benign error",
svcName: "v4-udp-nothing-to-delete",
svcIP: "10.96.1.1",
svcPort: 80,
protocol: UDP,
endpoint: "10.240.0.3:80",
simulatedErr: conntrack.NoConnectionToDelete,
},
{
description: "V4 UDP, unexpected error, should be glogged",
svcName: "v4-udp-simulated-error",
svcIP: "10.96.1.1",
svcPort: 80,
protocol: UDP,
endpoint: "10.240.0.3:80",
simulatedErr: "simulated error",
},
}
// Create a fake executor for the conntrack utility. This should only be
// invoked for UDP and SCTP connections, since no conntrack cleanup is needed for TCP
fcmd := fakeexec.FakeCmd{}
fexec := fakeexec.FakeExec{
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
}
execFunc := func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
}
for _, tc := range testCases {
if conntrack.IsClearConntrackNeeded(tc.protocol) {
var cmdOutput string
var simErr error
if tc.simulatedErr == "" {
cmdOutput = "1 flow entries have been deleted"
} else {
simErr = fmt.Errorf(tc.simulatedErr)
}
cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, simErr }
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
fexec.CommandScript = append(fexec.CommandScript, execFunc)
}
}
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
fp.exec = &fexec
for _, tc := range testCases {
makeServiceMap(fp,
makeTestService("ns1", tc.svcName, func(svc *v1.Service) {
svc.Spec.ClusterIP = tc.svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: "p80",
Port: tc.svcPort,
Protocol: tc.protocol,
}}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
}),
)
fp.serviceMap.Update(fp.serviceChanges)
}
// Run the test cases
for _, tc := range testCases {
priorExecs := fexec.CommandCalls
priorGlogErrs := klog.Stats.Error.Lines()
svc := proxy.ServicePortName{
NamespacedName: types.NamespacedName{Namespace: "ns1", Name: tc.svcName},
Port: "p80",
Protocol: tc.protocol,
}
input := []proxy.ServiceEndpoint{
{
Endpoint: tc.endpoint,
ServicePortName: svc,
},
}
fp.deleteEndpointConnections(input)
// For UDP and SCTP connections, check the executed conntrack command
var expExecs int
if conntrack.IsClearConntrackNeeded(tc.protocol) {
isIPv6 := func(ip string) bool {
netIP := net.ParseIP(ip)
return netIP.To4() == nil
}
endpointIP := utilproxy.IPPart(tc.endpoint)
expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s --dst-nat %s -p %s", tc.svcIP, endpointIP, strings.ToLower(string((tc.protocol))))
if isIPv6(endpointIP) {
expectCommand += " -f ipv6"
}
actualCommand := strings.Join(fcmd.CombinedOutputLog[fexec.CommandCalls-1], " ")
if actualCommand != expectCommand {
t.Errorf("%s: Expected command: %s, but executed %s", tc.description, expectCommand, actualCommand)
}
expExecs = 1
}
// Check the number of times conntrack was executed
execs := fexec.CommandCalls - priorExecs
if execs != expExecs {
t.Errorf("%s: Expected conntrack to be executed %d times, but got %d", tc.description, expExecs, execs)
}
// Check the number of new glog errors
var expGlogErrs int64
if tc.simulatedErr != "" && tc.simulatedErr != conntrack.NoConnectionToDelete {
expGlogErrs = 1
}
glogErrs := klog.Stats.Error.Lines() - priorGlogErrs
if glogErrs != expGlogErrs {
t.Errorf("%s: Expected %d glogged errors, but got %d", tc.description, expGlogErrs, glogErrs)
}
}
}
func TestDeleteEndpointConnectionsIPv6(t *testing.T) {
const (
UDP = v1.ProtocolUDP
TCP = v1.ProtocolTCP
SCTP = v1.ProtocolSCTP
)
testCases := []struct {
description string
svcName string
svcIP string
svcPort int32
protocol v1.Protocol
endpoint string // IP:port endpoint
epSvcPair proxy.ServiceEndpoint // Will be generated by test
simulatedErr string
}{
{
description: "V6 UDP",
svcName: "v6-udp",
svcIP: "fd00:1234::20",
svcPort: 80,
protocol: UDP,
endpoint: "[2001:db8::2]:80",
},
{
description: "V6 TCP",
svcName: "v6-tcp",
svcIP: "fd00:1234::30",
svcPort: 80,
protocol: TCP,
endpoint: "[2001:db8::3]:80",
},
{
description: "V6 SCTP",
svcName: "v6-sctp",
svcIP: "fd00:1234::40",
svcPort: 80,
protocol: SCTP,
endpoint: "[2001:db8::4]:80",
},
}
// Create a fake executor for the conntrack utility. This should only be
// invoked for UDP and SCTP connections, since no conntrack cleanup is needed for TCP
fcmd := fakeexec.FakeCmd{}
fexec := fakeexec.FakeExec{
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
}
execFunc := func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
}
for _, tc := range testCases {
if conntrack.IsClearConntrackNeeded(tc.protocol) {
var cmdOutput string
var simErr error
if tc.simulatedErr == "" {
cmdOutput = "1 flow entries have been deleted"
} else {
simErr = fmt.Errorf(tc.simulatedErr)
}
cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, simErr }
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
fexec.CommandScript = append(fexec.CommandScript, execFunc)
}
}
ipt := iptablestest.NewIPv6Fake()
fp := NewFakeProxier(ipt, false)
fp.exec = &fexec
for _, tc := range testCases {
makeServiceMap(fp,
makeTestService("ns1", tc.svcName, func(svc *v1.Service) {
svc.Spec.ClusterIP = tc.svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: "p80",
Port: tc.svcPort,
Protocol: tc.protocol,
}}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
}),
)
fp.serviceMap.Update(fp.serviceChanges)
}
// Run the test cases
for _, tc := range testCases {
priorExecs := fexec.CommandCalls
priorGlogErrs := klog.Stats.Error.Lines()
svc := proxy.ServicePortName{
NamespacedName: types.NamespacedName{Namespace: "ns1", Name: tc.svcName},
Port: "p80",
Protocol: tc.protocol,
}
input := []proxy.ServiceEndpoint{
{
Endpoint: tc.endpoint,
ServicePortName: svc,
},
}
fp.deleteEndpointConnections(input)
// For UDP and SCTP connections, check the executed conntrack command
var expExecs int
if conntrack.IsClearConntrackNeeded(tc.protocol) {
isIPv6 := func(ip string) bool {
netIP := net.ParseIP(ip)
return netIP.To4() == nil
}
endpointIP := utilproxy.IPPart(tc.endpoint)
expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s --dst-nat %s -p %s", tc.svcIP, endpointIP, strings.ToLower(string((tc.protocol))))
if isIPv6(endpointIP) {
expectCommand += " -f ipv6"
}
actualCommand := strings.Join(fcmd.CombinedOutputLog[fexec.CommandCalls-1], " ")
if actualCommand != expectCommand {
t.Errorf("%s: Expected command: %s, but executed %s", tc.description, expectCommand, actualCommand)
}
expExecs = 1
}
// Check the number of times conntrack was executed
execs := fexec.CommandCalls - priorExecs
if execs != expExecs {
t.Errorf("%s: Expected conntrack to be executed %d times, but got %d", tc.description, expExecs, execs)
}
// Check the number of new glog errors
var expGlogErrs int64
if tc.simulatedErr != "" && tc.simulatedErr != conntrack.NoConnectionToDelete {
expGlogErrs = 1
}
glogErrs := klog.Stats.Error.Lines() - priorGlogErrs
if glogErrs != expGlogErrs {
t.Errorf("%s: Expected %d glogged errors, but got %d", tc.description, expGlogErrs, glogErrs)
}
}
}
// fakeCloseable implements utilproxy.Closeable
type fakeCloseable struct{}
// Close fakes out the close() used by syncProxyRules to release a local port.
func (f *fakeCloseable) Close() error {
return nil
}
// fakePortOpener implements portOpener.
type fakePortOpener struct {
openPorts []*utilnet.LocalPort
}
// OpenLocalPort fakes out the listen() and bind() used by syncProxyRules
// to lock a local port.
func (f *fakePortOpener) OpenLocalPort(lp *utilnet.LocalPort) (utilnet.Closeable, error) {
f.openPorts = append(f.openPorts, lp)
return &fakeCloseable{}, nil
}
const testHostname = "test-hostname"
func NewFakeProxier(ipt utiliptables.Interface, endpointSlicesEnabled bool) *Proxier {
// TODO: Call NewProxier after refactoring out the goroutine
// invocation into a Run() method.
ipfamily := v1.IPv4Protocol
if ipt.IsIPv6() {
ipfamily = v1.IPv6Protocol
}
detectLocal, _ := proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/24", ipt)
p := &Proxier{
exec: &fakeexec.FakeExec{},
serviceMap: make(proxy.ServiceMap),
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipfamily, nil, nil),
endpointsMap: make(proxy.EndpointsMap),
endpointsChanges: proxy.NewEndpointChangeTracker(testHostname, newEndpointInfo, ipfamily, nil, endpointSlicesEnabled, nil),
iptables: ipt,
masqueradeMark: "0x4000",
localDetector: detectLocal,
hostname: testHostname,
portsMap: make(map[utilnet.LocalPort]utilnet.Closeable),
portMapper: &fakePortOpener{[]*utilnet.LocalPort{}},
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
precomputedProbabilities: make([]string, 0, 1001),
iptablesData: bytes.NewBuffer(nil),
existingFilterChainsData: bytes.NewBuffer(nil),
filterChains: bytes.NewBuffer(nil),
filterRules: bytes.NewBuffer(nil),
natChains: bytes.NewBuffer(nil),
natRules: bytes.NewBuffer(nil),
nodePortAddresses: make([]string, 0),
networkInterfacer: utilproxytest.NewFakeNetwork(),
}
p.setInitialized(true)
p.syncRunner = async.NewBoundedFrequencyRunner("test-sync-runner", p.syncProxyRules, 0, time.Minute, 1)
return p
}
func hasSessionAffinityRule(rules []iptablestest.Rule) bool {
for _, r := range rules {
if _, ok := r[iptablestest.Recent]; ok {
return true
}
}
return false
}
func hasJump(rules []iptablestest.Rule, destChain, destIP string, destPort int) bool {
destPortStr := strconv.Itoa(destPort)
match := false
for _, r := range rules {
if r[iptablestest.Jump] == destChain {
match = true
if destIP != "" {
if strings.Contains(r[iptablestest.Destination], destIP) && (strings.Contains(r[iptablestest.DPort], destPortStr) || r[iptablestest.DPort] == "") {
return true
}
match = false
}
if destPort != 0 {
if strings.Contains(r[iptablestest.DPort], destPortStr) && (strings.Contains(r[iptablestest.Destination], destIP) || r[iptablestest.Destination] == "") {
return true
}
match = false
}
}
}
return match
}
func hasSrcType(rules []iptablestest.Rule, srcType string) bool {
for _, r := range rules {
if r[iptablestest.SrcType] != srcType {
continue
}
return true
}
return false
}
func hasMasqRandomFully(rules []iptablestest.Rule) bool {
for _, r := range rules {
if r[iptablestest.Masquerade] == "--random-fully" {
return true
}
}
return false
}
func TestHasJump(t *testing.T) {
testCases := map[string]struct {
rules []iptablestest.Rule
destChain string
destIP string
destPort int
expected bool
}{
"case 1": {
// Match the 1st rule(both dest IP and dest Port)
rules: []iptablestest.Rule{
{"-d ": "10.20.30.41/32", "--dport ": "80", "-p ": "tcp", "-j ": "REJECT"},
{"--dport ": "3001", "-p ": "tcp", "-j ": "KUBE-MARK-MASQ"},
},
destChain: "REJECT",
destIP: "10.20.30.41",
destPort: 80,
expected: true,
},
"case 2": {
// Match the 2nd rule(dest Port)
rules: []iptablestest.Rule{
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
},
destChain: "REJECT",
destIP: "",
destPort: 3001,
expected: true,
},
"case 3": {
// Match both dest IP and dest Port
rules: []iptablestest.Rule{
{"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"},
},
destChain: "KUBE-XLB-GF53O3C2HZEXL2XN",
destIP: "1.2.3.4",
destPort: 80,
expected: true,
},
"case 4": {
// Match dest IP but doesn't match dest Port
rules: []iptablestest.Rule{
{"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"},
},
destChain: "KUBE-XLB-GF53O3C2HZEXL2XN",
destIP: "1.2.3.4",
destPort: 8080,
expected: false,
},
"case 5": {
// Match dest Port but doesn't match dest IP
rules: []iptablestest.Rule{
{"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"},
},
destChain: "KUBE-XLB-GF53O3C2HZEXL2XN",
destIP: "10.20.30.40",
destPort: 80,
expected: false,
},
"case 6": {
// Match the 2nd rule(dest IP)
rules: []iptablestest.Rule{
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
{"-d ": "1.2.3.4/32", "-p ": "tcp", "-j ": "REJECT"},
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
},
destChain: "REJECT",
destIP: "1.2.3.4",
destPort: 8080,
expected: true,
},
"case 7": {
// Match the 2nd rule(dest Port)
rules: []iptablestest.Rule{
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
},
destChain: "REJECT",
destIP: "1.2.3.4",
destPort: 3001,
expected: true,
},
"case 8": {
// Match the 1st rule(dest IP)
rules: []iptablestest.Rule{
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
},
destChain: "REJECT",
destIP: "10.20.30.41",
destPort: 8080,
expected: true,
},
"case 9": {
rules: []iptablestest.Rule{
{"-j ": "KUBE-SEP-LWSOSDSHMKPJHHJV"},
},
destChain: "KUBE-SEP-LWSOSDSHMKPJHHJV",
destIP: "",
destPort: 0,
expected: true,
},
"case 10": {
rules: []iptablestest.Rule{
{"-j ": "KUBE-SEP-FOO"},
},
destChain: "KUBE-SEP-BAR",
destIP: "",
destPort: 0,
expected: false,
},
}
for k, tc := range testCases {
if got := hasJump(tc.rules, tc.destChain, tc.destIP, tc.destPort); got != tc.expected {
t.Errorf("%v: expected %v, got %v", k, tc.expected, got)
}
}
}
func hasDNAT(rules []iptablestest.Rule, endpoint string) bool {
for _, r := range rules {
if r[iptablestest.ToDest] == endpoint {
return true
}
}
return false
}
func errorf(msg string, rules []iptablestest.Rule, t *testing.T) {
for _, r := range rules {
t.Logf("%q", r)
}
t.Errorf("%v", msg)
}
// TestOverallIPTablesRulesWithMultipleServices creates 4 types of services: ClusterIP,
// LoadBalancer, ExternalIP and NodePort and verifies if the NAT table rules created
// are exactly the same as what is expected. This test provides an overall view of how
// the NAT table rules look like with the different jumps.
func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
metrics.RegisterMetrics()
makeServiceMap(fp,
// create ClusterIP service
makeTestService(makeNSN("ns1", "svc1").Namespace, "svc1", func(svc *v1.Service) {
svc.Spec.ClusterIP = "10.20.30.41"
svc.Spec.Ports = []v1.ServicePort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
}}
}),
// create LoadBalancer service
makeTestService(makeNSN("ns2", "svc2").Namespace, "svc2", func(svc *v1.Service) {
svc.Spec.Type = "LoadBalancer"
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.ClusterIP = "10.20.30.42"
svc.Spec.Ports = []v1.ServicePort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
NodePort: 3001,
}}
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
IP: "1.2.3.4",
}}
// Also ensure that invalid LoadBalancerSourceRanges will not result
// in a crash.
svc.Spec.ExternalIPs = []string{"1.2.3.4"}
svc.Spec.LoadBalancerSourceRanges = []string{" 1.2.3.4/28"}
svc.Spec.HealthCheckNodePort = 30000
}),
// create NodePort service
makeTestService(makeNSN("ns3", "svc3").Namespace, "svc3", func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ClusterIP = "10.20.30.43"
svc.Spec.Ports = []v1.ServicePort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
NodePort: 3001,
}}
}),
// create ExternalIP service
makeTestService(makeNSN("ns4", "svc4").Namespace, "svc4", func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ClusterIP = "10.20.30.44"
svc.Spec.ExternalIPs = []string{"50.60.70.81"}
svc.Spec.Ports = []v1.ServicePort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
TargetPort: intstr.FromInt(80),
}}
}),
)
makeEndpointsMap(fp,
// create ClusterIP service endpoints
makeTestEndpoints(makeNSN("ns1", "svc1").Namespace, "svc1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "10.180.0.1",
}},
Ports: []v1.EndpointPort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
}},
}}
}),
// create LoadBalancer endpoints
makeTestEndpoints(makeNSN("ns2", "svc2").Namespace, "svc2", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "10.180.0.2",
}},
Ports: []v1.EndpointPort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
}},
}}
}),
// create NodePort service endpoints
makeTestEndpoints(makeNSN("ns3", "svc3").Namespace, "svc3", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "10.180.0.3",
}},
Ports: []v1.EndpointPort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
}},
}}
}),
// create ExternalIP service endpoints
makeTestEndpoints(makeNSN("ns4", "svc4").Namespace, "svc4", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "10.180.0.4",
NodeName: nil,
}, {
IP: "10.180.0.5",
NodeName: utilpointer.StringPtr(testHostname),
}},
Ports: []v1.EndpointPort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
fp.syncProxyRules()
expected := `*filter
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-SERVICES - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
:KUBE-XLB-GNZBNJ2PO5MGZ6GT - [0:0]
:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41/32 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 10.20.30.42/32 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 1.2.3.4/32 --dport 80 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4/32 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 10.20.30.42/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 1.2.3.4/28 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -j KUBE-MARK-DROP
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/24 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 10.20.30.43/32 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 10.20.30.43/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3001 -j KUBE-SVC-X27LE4BHSL4DOUIK
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3001 -j KUBE-MARK-MASQ
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 10.20.30.44/32 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 50.60.70.81/32 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 10.20.30.44/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 50.60.70.81/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
`
// Adding logic to split and sort the strings because
// depending on the creation order of services, the user-chains get jumbled.
expectedSlice := strings.Split(strings.TrimSuffix(expected, "\n"), "\n")
sort.Strings(expectedSlice)
originalSlice := strings.Split(strings.TrimSuffix(fp.iptablesData.String(), "\n"), "\n")
sort.Strings(originalSlice)
assert.Equal(t, expectedSlice, originalSlice)
nNatRules, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)))
if err != nil {
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
}
// 43 here is test specific and corresponds to one more than the number of -A lines after `*nat` in `expected`.
if nNatRules != 43.0 {
t.Fatalf("Wrong number of nat rules: expected 43 received %f", nNatRules)
}
}
func TestClusterIPReject(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}}
}),
)
makeEndpointsMap(fp)
fp.syncProxyRules()
svcChain := string(servicePortChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP))))
svcRules := ipt.GetRules(svcChain)
if len(svcRules) != 0 {
errorf(fmt.Sprintf("Unexpected rule for chain %v service %v without endpoints", svcChain, svcPortName), svcRules, t)
}
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
if !hasJump(kubeSvcRules, iptablestest.Reject, svcIP, svcPort) {
errorf(fmt.Sprintf("Failed to find a %v rule for service %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcRules, t)
}
}
func TestClusterIPEndpointsJump(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}}
}),
)
epIP := "10.180.0.1"
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP,
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
fp.syncProxyRules()
epStr := fmt.Sprintf("%s:%d", epIP, svcPort)
svcChain := string(servicePortChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP))))
epChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP)), epStr))
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
if !hasJump(kubeSvcRules, svcChain, svcIP, svcPort) {
errorf(fmt.Sprintf("Failed to find jump from KUBE-SERVICES to %v chain", svcChain), kubeSvcRules, t)
}
svcRules := ipt.GetRules(svcChain)
if !hasJump(svcRules, string(KubeMarkMasqChain), svcIP, svcPort) {
errorf(fmt.Sprintf("Failed to find jump from %v to KUBE-MARK-MASQ chain", svcChain), svcRules, t)
}
if !hasJump(svcRules, epChain, "", 0) {
errorf(fmt.Sprintf("Failed to jump to ep chain %v", epChain), svcRules, t)
}
epRules := ipt.GetRules(epChain)
if !hasDNAT(epRules, epStr) {
errorf(fmt.Sprintf("Endpoint chain %v lacks DNAT to %v", epChain, epStr), epRules, t)
}
}
func TestLoadBalancer(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcLBIP := "1.2.3.4"
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "LoadBalancer"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
IP: svcLBIP,
}}
// Also ensure that invalid LoadBalancerSourceRanges will not result
// in a crash.
svc.Spec.ExternalIPs = []string{svcLBIP}
svc.Spec.LoadBalancerSourceRanges = []string{" 1.2.3.4/28"}
}),
)
epIP := "10.180.0.1"
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP,
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
fp.syncProxyRules()
proto := strings.ToLower(string(v1.ProtocolTCP))
fwChain := string(serviceFirewallChainName(svcPortName.String(), proto))
svcChain := string(servicePortChainName(svcPortName.String(), proto))
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
if !hasJump(kubeSvcRules, fwChain, svcLBIP, svcPort) {
errorf(fmt.Sprintf("Failed to find jump to firewall chain %v", fwChain), kubeSvcRules, t)
}
fwRules := ipt.GetRules(fwChain)
if !hasJump(fwRules, svcChain, "", 0) || !hasJump(fwRules, string(KubeMarkMasqChain), "", 0) {
errorf(fmt.Sprintf("Failed to find jump from firewall chain %v to svc chain %v", fwChain, svcChain), fwRules, t)
}
}
func TestNodePort(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
}),
)
epIP := "10.180.0.1"
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP,
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{utilproxytest.AddrStruct{Val: "127.0.0.1/16"}}
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}
addrs1 := []net.Addr{utilproxytest.AddrStruct{Val: "::1/128"}}
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
fp.nodePortAddresses = []string{}
fp.syncProxyRules()
proto := strings.ToLower(string(v1.ProtocolTCP))
svcChain := string(servicePortChainName(svcPortName.String(), proto))
kubeNodePortRules := ipt.GetRules(string(kubeNodePortsChain))
if !hasJump(kubeNodePortRules, svcChain, "", svcNodePort) {
errorf(fmt.Sprintf("Failed to find jump to svc chain %v", svcChain), kubeNodePortRules, t)
}
expectedNodePortNonLocalTrafficMasqueradeRule := `-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-MARK-MASQ`
svcRules := ipt.GetRules(svcChain)
if !strings.Contains(fp.iptablesData.String(), expectedNodePortNonLocalTrafficMasqueradeRule) {
errorf(fmt.Sprintf("Didn't find the masquerade rule for node port non-local traffic in svc chain %v", svcChain), svcRules, t)
}
}
func TestHealthCheckNodePort(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.42"
svcPort := 80
svcNodePort := 3001
svcHealthCheckNodePort := 30000
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "LoadBalancer"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
}),
)
itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{utilproxytest.AddrStruct{Val: "127.0.0.1/16"}}
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}
addrs1 := []net.Addr{utilproxytest.AddrStruct{Val: "::1/128"}}
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
fp.nodePortAddresses = []string{"127.0.0.1/16"}
fp.syncProxyRules()
kubeNodePortsRules := ipt.GetRules(string(kubeNodePortsChain))
if !hasJump(kubeNodePortsRules, iptablestest.Accept, "", svcHealthCheckNodePort) {
errorf(fmt.Sprintf("Failed to find Accept rule"), kubeNodePortsRules, t)
}
}
func TestMasqueradeRule(t *testing.T) {
for _, testcase := range []bool{false, true} {
ipt := iptablestest.NewFake().SetHasRandomFully(testcase)
fp := NewFakeProxier(ipt, false)
makeServiceMap(fp)
makeEndpointsMap(fp)
fp.syncProxyRules()
postRoutingRules := ipt.GetRules(string(kubePostroutingChain))
if !hasJump(postRoutingRules, "MASQUERADE", "", 0) {
errorf(fmt.Sprintf("Failed to find -j MASQUERADE in %s chain", kubePostroutingChain), postRoutingRules, t)
}
if hasMasqRandomFully(postRoutingRules) != testcase {
probs := map[bool]string{false: "found", true: "did not find"}
errorf(fmt.Sprintf("%s --random-fully in -j MASQUERADE rule in %s chain when HasRandomFully()==%v", probs[testcase], kubePostroutingChain, testcase), postRoutingRules, t)
}
}
}
func TestExternalIPsReject(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcExternalIPs := "50.60.70.81"
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "ClusterIP"
svc.Spec.ClusterIP = svcIP
svc.Spec.ExternalIPs = []string{svcExternalIPs}
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
TargetPort: intstr.FromInt(svcPort),
}}
}),
)
makeEndpointsMap(fp)
fp.syncProxyRules()
kubeSvcRules := ipt.GetRules(string(kubeExternalServicesChain))
if !hasJump(kubeSvcRules, iptablestest.Reject, svcExternalIPs, svcPort) {
errorf(fmt.Sprintf("Failed to find a %v rule for externalIP %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcRules, t)
}
}
func TestOnlyLocalExternalIPs(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcExternalIPs := "50.60.70.81"
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.ClusterIP = svcIP
svc.Spec.ExternalIPs = []string{svcExternalIPs}
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
TargetPort: intstr.FromInt(svcPort),
}}
}),
)
makeEndpointsMap(fp)
epIP1 := "10.180.0.1"
epIP2 := "10.180.2.1"
epStrLocal := fmt.Sprintf("%s:%d", epIP1, svcPort)
epStrNonLocal := fmt.Sprintf("%s:%d", epIP2, svcPort)
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP1,
NodeName: nil,
}, {
IP: epIP2,
NodeName: utilpointer.StringPtr(testHostname),
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
fp.syncProxyRules()
proto := strings.ToLower(string(v1.ProtocolTCP))
lbChain := string(serviceLBChainName(svcPortName.String(), proto))
nonLocalEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP)), epStrLocal))
localEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP)), epStrNonLocal))
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
if !hasJump(kubeSvcRules, lbChain, svcExternalIPs, svcPort) {
errorf(fmt.Sprintf("Failed to find jump to xlb chain %v", lbChain), kubeSvcRules, t)
}
lbRules := ipt.GetRules(lbChain)
if hasJump(lbRules, nonLocalEpChain, "", 0) {
errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, epStrLocal), lbRules, t)
}
if !hasJump(lbRules, localEpChain, "", 0) {
errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, epStrNonLocal), lbRules, t)
}
}
// TestNonLocalExternalIPs tests if we add the masquerade rule into svcChain in order to
// SNAT packets to external IPs if externalTrafficPolicy is cluster and the traffic is NOT Local.
func TestNonLocalExternalIPs(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcExternalIPs := "50.60.70.81"
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.ClusterIP = svcIP
svc.Spec.ExternalIPs = []string{svcExternalIPs}
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
TargetPort: intstr.FromInt(svcPort),
}}
}),
)
makeEndpointsMap(fp)
epIP1 := "10.180.0.1"
epIP2 := "10.180.2.1"
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP1,
NodeName: nil,
}, {
IP: epIP2,
NodeName: utilpointer.StringPtr(testHostname),
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
fp.syncProxyRules()
proto := strings.ToLower(string(v1.ProtocolTCP))
lbChain := string(serviceLBChainName(svcPortName.String(), proto))
svcChain := string(servicePortChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP))))
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
if !hasJump(kubeSvcRules, svcChain, svcExternalIPs, svcPort) {
errorf(fmt.Sprintf("Failed to find jump to svc chain %v", svcChain), kubeSvcRules, t)
}
svcRules := ipt.GetRules(svcChain)
if len(svcRules) != 4 {
t.Errorf("expected svcChain %v to have 4 rules, got %v", svcChain, len(svcRules))
}
if !hasJump(svcRules, string(KubeMarkMasqChain), svcIP, svcPort) {
errorf(fmt.Sprintf("Failed to find jump from %v to KUBE-MARK-MASQ chain", svcChain), svcRules, t)
}
expectedExternalIPNonLocalTrafficMasqueradeRule := `-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 external IP" -m tcp -p tcp -d 50.60.70.81/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ`
if !strings.Contains(fp.iptablesData.String(), expectedExternalIPNonLocalTrafficMasqueradeRule) {
errorf(fmt.Sprintf("Didn't find the masquerade rule for external-ip non-local traffic in svc chain %v", svcChain), svcRules, t)
}
lbRules := ipt.GetRules(lbChain)
if len(lbRules) != 0 {
t.Errorf("expected svclbChain %v to have 0 rules, got %v", lbChain, len(lbRules))
}
}
func TestNodePortReject(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
}),
)
makeEndpointsMap(fp)
fp.syncProxyRules()
kubeSvcRules := ipt.GetRules(string(kubeExternalServicesChain))
if !hasJump(kubeSvcRules, iptablestest.Reject, svcIP, svcNodePort) {
errorf(fmt.Sprintf("Failed to find a %v rule for service %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcRules, t)
}
}
func TestLoadBalancerReject(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcLBIP := "1.2.3.4"
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
svcSessionAffinityTimeout := int32(10800)
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "LoadBalancer"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
IP: svcLBIP,
}}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
ClientIP: &v1.ClientIPConfig{TimeoutSeconds: &svcSessionAffinityTimeout},
}
}),
)
makeEndpointsMap(fp)
fp.syncProxyRules()
kubeSvcExtRules := ipt.GetRules(string(kubeExternalServicesChain))
if !hasJump(kubeSvcExtRules, iptablestest.Reject, svcLBIP, svcPort) {
errorf(fmt.Sprintf("Failed to find a %v rule for LoadBalancer %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcExtRules, t)
}
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
if hasJump(kubeSvcRules, iptablestest.Reject, svcLBIP, svcPort) {
errorf(fmt.Sprintf("Found a %v rule for LoadBalancer %v with no endpoints in kubeServicesChain", iptablestest.Reject, svcPortName), kubeSvcRules, t)
}
}
func TestOnlyLocalLoadBalancing(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcLBIP := "1.2.3.4"
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
svcSessionAffinityTimeout := int32(10800)
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "LoadBalancer"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
IP: svcLBIP,
}}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
ClientIP: &v1.ClientIPConfig{TimeoutSeconds: &svcSessionAffinityTimeout},
}
}),
)
epIP1 := "10.180.0.1"
epIP2 := "10.180.2.1"
epStrLocal := fmt.Sprintf("%s:%d", epIP1, svcPort)
epStrNonLocal := fmt.Sprintf("%s:%d", epIP2, svcPort)
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP1,
NodeName: nil,
}, {
IP: epIP2,
NodeName: utilpointer.StringPtr(testHostname),
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
fp.syncProxyRules()
proto := strings.ToLower(string(v1.ProtocolTCP))
fwChain := string(serviceFirewallChainName(svcPortName.String(), proto))
lbChain := string(serviceLBChainName(svcPortName.String(), proto))
nonLocalEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP)), epStrLocal))
localEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP)), epStrNonLocal))
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
if !hasJump(kubeSvcRules, fwChain, svcLBIP, svcPort) {
errorf(fmt.Sprintf("Failed to find jump to firewall chain %v", fwChain), kubeSvcRules, t)
}
fwRules := ipt.GetRules(fwChain)
if !hasJump(fwRules, lbChain, "", 0) {
errorf(fmt.Sprintf("Failed to find jump from firewall chain %v to svc chain %v", fwChain, lbChain), fwRules, t)
}
if hasJump(fwRules, string(KubeMarkMasqChain), "", 0) {
errorf(fmt.Sprintf("Found jump from fw chain %v to MASQUERADE", fwChain), fwRules, t)
}
lbRules := ipt.GetRules(lbChain)
if hasJump(lbRules, nonLocalEpChain, "", 0) {
errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, epStrLocal), lbRules, t)
}
if !hasJump(lbRules, localEpChain, "", 0) {
errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, epStrNonLocal), lbRules, t)
}
if !hasSessionAffinityRule(lbRules) {
errorf(fmt.Sprintf("Didn't find session affinity rule from lb chain %v", lbChain), lbRules, t)
}
}
func TestOnlyLocalNodePortsNoClusterCIDR(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
onlyLocalNodePorts(t, fp, ipt)
}
func TestOnlyLocalNodePorts(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
onlyLocalNodePorts(t, fp, ipt)
}
func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTables) {
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
}),
)
epIP1 := "10.180.0.1"
epIP2 := "10.180.2.1"
epStrLocal := fmt.Sprintf("%s:%d", epIP1, svcPort)
epStrNonLocal := fmt.Sprintf("%s:%d", epIP2, svcPort)
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP1,
NodeName: nil,
}, {
IP: epIP2,
NodeName: utilpointer.StringPtr(testHostname),
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
itf := net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{utilproxytest.AddrStruct{Val: "10.20.30.51/24"}}
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.nodePortAddresses = []string{"10.20.30.0/24"}
fp.syncProxyRules()
proto := strings.ToLower(string(v1.ProtocolTCP))
lbChain := string(serviceLBChainName(svcPortName.String(), proto))
nonLocalEpChain := string(servicePortEndpointChainName(svcPortName.String(), proto, epStrLocal))
localEpChain := string(servicePortEndpointChainName(svcPortName.String(), proto, epStrNonLocal))
kubeNodePortRules := ipt.GetRules(string(kubeNodePortsChain))
if !hasJump(kubeNodePortRules, lbChain, "", svcNodePort) {
errorf(fmt.Sprintf("Failed to find jump to lb chain %v", lbChain), kubeNodePortRules, t)
}
if !hasJump(kubeNodePortRules, string(KubeMarkMasqChain), "", svcNodePort) {
errorf(fmt.Sprintf("Failed to find jump to %s chain for destination IP %d", KubeMarkMasqChain, svcNodePort), kubeNodePortRules, t)
}
kubeServiceRules := ipt.GetRules(string(kubeServicesChain))
if !hasJump(kubeServiceRules, string(kubeNodePortsChain), "10.20.30.51", 0) {
errorf(fmt.Sprintf("Failed to find jump to KUBE-NODEPORTS chain %v", string(kubeNodePortsChain)), kubeServiceRules, t)
}
svcChain := string(servicePortChainName(svcPortName.String(), proto))
lbRules := ipt.GetRules(lbChain)
if hasJump(lbRules, nonLocalEpChain, "", 0) {
errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, epStrLocal), lbRules, t)
}
if !hasJump(lbRules, svcChain, "", 0) || !hasSrcType(lbRules, "LOCAL") {
errorf(fmt.Sprintf("Did not find jump from lb chain %v to svc %v with src-type LOCAL", lbChain, svcChain), lbRules, t)
}
if !hasJump(lbRules, localEpChain, "", 0) {
errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, epStrLocal), lbRules, t)
}
}
func TestComputeProbability(t *testing.T) {
expectedProbabilities := map[int]string{
1: "1.0000000000",
2: "0.5000000000",
10: "0.1000000000",
100: "0.0100000000",
1000: "0.0010000000",
10000: "0.0001000000",
100000: "0.0000100000",
100001: "0.0000099999",
}
for num, expected := range expectedProbabilities {
actual := computeProbability(num)
if actual != expected {
t.Errorf("Expected computeProbability(%d) to be %s, got: %s", num, expected, actual)
}
}
prevProbability := float64(0)
for i := 100000; i > 1; i-- {
currProbability, err := strconv.ParseFloat(computeProbability(i), 64)
if err != nil {
t.Fatalf("Error parsing float probability for %d: %v", i, err)
}
if currProbability <= prevProbability {
t.Fatalf("Probability unexpectedly <= to previous probability for %d: (%0.10f <= %0.10f)", i, currProbability, prevProbability)
}
prevProbability = currProbability
}
}
func makeTestService(namespace, name string, svcFunc func(*v1.Service)) *v1.Service {
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{},
},
Spec: v1.ServiceSpec{},
Status: v1.ServiceStatus{},
}
svcFunc(svc)
return svc
}
func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port, nodeport int32, targetPort int) []v1.ServicePort {
svcPort := v1.ServicePort{
Name: name,
Protocol: protocol,
Port: port,
NodePort: nodeport,
TargetPort: intstr.FromInt(targetPort),
}
return append(array, svcPort)
}
func TestBuildServiceMapAddRemove(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
services := []*v1.Service{
makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "sctpport", "SCTP", 1236, 6321, 0)
}),
makeTestService("somewhere-else", "node-port", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeNodePort
svc.Spec.ClusterIP = "172.16.55.10"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blahblah", "UDP", 345, 678, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "moreblahblah", "TCP", 344, 677, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "muchmoreblah", "SCTP", 343, 676, 0)
}),
makeTestService("somewhere", "load-balancer", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.11"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001)
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{IP: "10.1.2.4"},
},
}
}),
makeTestService("somewhere", "only-local-load-balancer", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.12"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003)
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{IP: "10.1.2.3"},
},
}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.HealthCheckNodePort = 345
}),
}
for i := range services {
fp.OnServiceAdd(services[i])
}
result := fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 10 {
t.Errorf("expected service map length 10, got %v", fp.serviceMap)
}
// The only-local-loadbalancer ones get added
if len(result.HCServiceNodePorts) != 1 {
t.Errorf("expected 1 healthcheck port, got %v", result.HCServiceNodePorts)
} else {
nsn := makeNSN("somewhere", "only-local-load-balancer")
if port, found := result.HCServiceNodePorts[nsn]; !found || port != 345 {
t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, result.HCServiceNodePorts)
}
}
if len(result.UDPStaleClusterIP) != 0 {
// Services only added, so nothing stale yet
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
}
// Remove some stuff
// oneService is a modification of services[0] with removed first port.
oneService := makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
})
fp.OnServiceUpdate(services[0], oneService)
fp.OnServiceDelete(services[1])
fp.OnServiceDelete(services[2])
fp.OnServiceDelete(services[3])
result = fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 1 {
t.Errorf("expected service map length 1, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected 0 healthcheck ports, got %v", result.HCServiceNodePorts)
}
// All services but one were deleted. While you'd expect only the ClusterIPs
// from the three deleted services here, we still have the ClusterIP for
// the not-deleted service, because one of it's ServicePorts was deleted.
expectedStaleUDPServices := []string{"172.16.55.10", "172.16.55.4", "172.16.55.11", "172.16.55.12"}
if len(result.UDPStaleClusterIP) != len(expectedStaleUDPServices) {
t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.UDPStaleClusterIP.UnsortedList())
}
for _, ip := range expectedStaleUDPServices {
if !result.UDPStaleClusterIP.Has(ip) {
t.Errorf("expected stale UDP service service %s", ip)
}
}
}
func TestBuildServiceMapServiceHeadless(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
makeServiceMap(fp,
makeTestService("somewhere-else", "headless", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = v1.ClusterIPNone
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0)
}),
makeTestService("somewhere-else", "headless-without-port", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = v1.ClusterIPNone
}),
)
// Headless service should be ignored
result := fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 0 {
t.Errorf("expected service map length 0, got %d", len(fp.serviceMap))
}
// No proxied services, so no healthchecks
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected healthcheck ports length 0, got %d", len(result.HCServiceNodePorts))
}
if len(result.UDPStaleClusterIP) != 0 {
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
}
}
func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
makeServiceMap(fp,
makeTestService("somewhere-else", "external-name", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeExternalName
svc.Spec.ClusterIP = "172.16.55.4" // Should be ignored
svc.Spec.ExternalName = "foo2.bar.com"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blah", "UDP", 1235, 5321, 0)
}),
)
result := fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 0 {
t.Errorf("expected service map length 0, got %v", fp.serviceMap)
}
// No proxied services, so no healthchecks
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
}
if len(result.UDPStaleClusterIP) != 0 {
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP)
}
}
func TestBuildServiceMapServiceUpdate(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
servicev1 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 0)
})
servicev2 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 7002)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 7003)
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{IP: "10.1.2.3"},
},
}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.HealthCheckNodePort = 345
})
fp.OnServiceAdd(servicev1)
result := fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
}
if len(result.UDPStaleClusterIP) != 0 {
// Services only added, so nothing stale yet
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
}
// Change service to load-balancer
fp.OnServiceUpdate(servicev1, servicev2)
result = fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 1 {
t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts)
}
if len(result.UDPStaleClusterIP) != 0 {
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList())
}
// No change; make sure the service map stays the same and there are
// no health-check changes
fp.OnServiceUpdate(servicev2, servicev2)
result = fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 1 {
t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts)
}
if len(result.UDPStaleClusterIP) != 0 {
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList())
}
// And back to ClusterIP
fp.OnServiceUpdate(servicev2, servicev1)
result = fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
}
if len(result.UDPStaleClusterIP) != 0 {
// Services only added, so nothing stale yet
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
}
}
func makeTestEndpoints(namespace, name string, eptFunc func(*v1.Endpoints)) *v1.Endpoints {
ept := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
}
eptFunc(ept)
return ept
}
func makeEndpointsMap(proxier *Proxier, allEndpoints ...*v1.Endpoints) {
for i := range allEndpoints {
proxier.OnEndpointsAdd(allEndpoints[i])
}
proxier.mu.Lock()
defer proxier.mu.Unlock()
proxier.endpointsSynced = true
}
func makeNSN(namespace, name string) types.NamespacedName {
return types.NamespacedName{Namespace: namespace, Name: name}
}
func makeServicePortName(ns, name, port string, protocol v1.Protocol) proxy.ServicePortName {
return proxy.ServicePortName{
NamespacedName: makeNSN(ns, name),
Port: port,
Protocol: protocol,
}
}
func makeServiceMap(proxier *Proxier, allServices ...*v1.Service) {
for i := range allServices {
proxier.OnServiceAdd(allServices[i])
}
proxier.mu.Lock()
defer proxier.mu.Unlock()
proxier.servicesSynced = true
}
func compareEndpointsMaps(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]*endpointsInfo) {
if len(newMap) != len(expected) {
t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap)
}
for x := range expected {
if len(newMap[x]) != len(expected[x]) {
t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x]))
} else {
for i := range expected[x] {
newEp, ok := newMap[x][i].(*endpointsInfo)
if !ok {
t.Errorf("Failed to cast endpointsInfo")
continue
}
if newEp.Endpoint != expected[x][i].Endpoint ||
newEp.IsLocal != expected[x][i].IsLocal ||
newEp.protocol != expected[x][i].protocol ||
newEp.chainName != expected[x][i].chainName {
t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newEp)
}
}
}
}
}
func Test_updateEndpointsMap(t *testing.T) {
var nodeName = testHostname
emptyEndpoint := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{}
}
unnamedPort := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
unnamedPortLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
namedPortLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
namedPort := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
namedPortRenamed := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11-2",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
namedPortRenumbered := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 22,
Protocol: v1.ProtocolUDP,
}},
}}
}
namedPortsLocalNoLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "1.1.1.2",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}, {
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}},
}}
}
multipleSubsets := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.2",
}},
Ports: []v1.EndpointPort{{
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}},
}}
}
multipleSubsetsWithLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.2",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}},
}}
}
multipleSubsetsMultiplePortsLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}, {
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.3",
}},
Ports: []v1.EndpointPort{{
Name: "p13",
Port: 13,
Protocol: v1.ProtocolUDP,
}},
}}
}
multipleSubsetsIPsPorts1 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "1.1.1.2",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}, {
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.3",
}, {
IP: "1.1.1.4",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p13",
Port: 13,
Protocol: v1.ProtocolUDP,
}, {
Name: "p14",
Port: 14,
Protocol: v1.ProtocolUDP,
}},
}}
}
multipleSubsetsIPsPorts2 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "2.2.2.1",
}, {
IP: "2.2.2.2",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p21",
Port: 21,
Protocol: v1.ProtocolUDP,
}, {
Name: "p22",
Port: 22,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexBefore1 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexBefore2 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "2.2.2.2",
NodeName: &nodeName,
}, {
IP: "2.2.2.22",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p22",
Port: 22,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "2.2.2.3",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p23",
Port: 23,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexBefore4 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "4.4.4.4",
NodeName: &nodeName,
}, {
IP: "4.4.4.5",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p44",
Port: 44,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "4.4.4.6",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p45",
Port: 45,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexAfter1 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "1.1.1.11",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.2",
}},
Ports: []v1.EndpointPort{{
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}, {
Name: "p122",
Port: 122,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexAfter3 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "3.3.3.3",
}},
Ports: []v1.EndpointPort{{
Name: "p33",
Port: 33,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexAfter4 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "4.4.4.4",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p44",
Port: 44,
Protocol: v1.ProtocolUDP,
}},
}}
}
testCases := []struct {
// previousEndpoints and currentEndpoints are used to call appropriate
// handlers OnEndpoints* (based on whether corresponding values are nil
// or non-nil) and must be of equal length.
previousEndpoints []*v1.Endpoints
currentEndpoints []*v1.Endpoints
oldEndpoints map[proxy.ServicePortName][]*endpointsInfo
expectedResult map[proxy.ServicePortName][]*endpointsInfo
expectedStaleEndpoints []proxy.ServiceEndpoint
expectedStaleServiceNames map[proxy.ServicePortName]bool
expectedHealthchecks map[types.NamespacedName]int
}{{
// Case[0]: nothing
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[1]: no change, unnamed port
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPort),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPort),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[2]: no change, named port, local
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortLocal),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortLocal),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, {
// Case[3]: no change, multiple subsets
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsets),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsets),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[4]: no change, multiple subsets, multiple ports, local
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsMultiplePortsLocal),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsMultiplePortsLocal),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, {
// Case[5]: no change, multiple endpoints, subsets, IPs, and ports
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsIPsPorts1),
makeTestEndpoints("ns2", "ep2", multipleSubsetsIPsPorts2),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsIPsPorts1),
makeTestEndpoints("ns2", "ep2", multipleSubsetsIPsPorts2),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.4:13", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.4:14", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.2:21", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.4:13", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.4:14", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.2:21", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 2,
makeNSN("ns2", "ep2"): 1,
},
}, {
// Case[6]: add an Endpoints
previousEndpoints: []*v1.Endpoints{
nil,
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPortLocal),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, {
// Case[7]: remove an Endpoints
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPortLocal),
},
currentEndpoints: []*v1.Endpoints{
nil,
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{},
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
Endpoint: "1.1.1.1:11",
ServicePortName: makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[8]: add an IP and port
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortsLocalNoLocal),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, {
// Case[9]: remove an IP and port
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortsLocalNoLocal),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
Endpoint: "1.1.1.2:11",
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
}, {
Endpoint: "1.1.1.1:12",
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
}, {
Endpoint: "1.1.1.2:12",
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[10]: add a subset
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsWithLocal),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, {
// Case[11]: remove a subset
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsets),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
Endpoint: "1.1.1.2:12",
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[12]: rename a port
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortRenamed),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
Endpoint: "1.1.1.1:11",
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[13]: renumber a port
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortRenumbered),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
Endpoint: "1.1.1.1:11",
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[14]: complex add and remove
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", complexBefore1),
makeTestEndpoints("ns2", "ep2", complexBefore2),
nil,
makeTestEndpoints("ns4", "ep4", complexBefore4),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", complexAfter1),
nil,
makeTestEndpoints("ns3", "ep3", complexAfter3),
makeTestEndpoints("ns4", "ep4", complexAfter4),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.22:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.3:23", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "4.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "4.4.4.5:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "4.4.4.6:45", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.11:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:122", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "3.3.3.3:33", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "4.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
Endpoint: "2.2.2.2:22",
ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
}, {
Endpoint: "2.2.2.22:22",
ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
}, {
Endpoint: "2.2.2.3:23",
ServicePortName: makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP),
}, {
Endpoint: "4.4.4.5:44",
ServicePortName: makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP),
}, {
Endpoint: "4.4.4.6:45",
ServicePortName: makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): true,
makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns4", "ep4"): 1,
},
}, {
// Case[15]: change from 0 endpoint address to 1 unnamed port
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", emptyEndpoint),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPort),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{},
},
}
for tci, tc := range testCases {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
fp.hostname = nodeName
// First check that after adding all previous versions of endpoints,
// the fp.oldEndpoints is as we expect.
for i := range tc.previousEndpoints {
if tc.previousEndpoints[i] != nil {
fp.OnEndpointsAdd(tc.previousEndpoints[i])
}
}
fp.endpointsMap.Update(fp.endpointsChanges)
compareEndpointsMaps(t, tci, fp.endpointsMap, tc.oldEndpoints)
// Now let's call appropriate handlers to get to state we want to be.
if len(tc.previousEndpoints) != len(tc.currentEndpoints) {
t.Fatalf("[%d] different lengths of previous and current endpoints", tci)
continue
}
for i := range tc.previousEndpoints {
prev, curr := tc.previousEndpoints[i], tc.currentEndpoints[i]
switch {
case prev == nil:
fp.OnEndpointsAdd(curr)
case curr == nil:
fp.OnEndpointsDelete(prev)
default:
fp.OnEndpointsUpdate(prev, curr)
}
}
result := fp.endpointsMap.Update(fp.endpointsChanges)
newMap := fp.endpointsMap
compareEndpointsMaps(t, tci, newMap, tc.expectedResult)
if len(result.StaleEndpoints) != len(tc.expectedStaleEndpoints) {
t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedStaleEndpoints), len(result.StaleEndpoints), result.StaleEndpoints)
}
for _, x := range tc.expectedStaleEndpoints {
found := false
for _, stale := range result.StaleEndpoints {
if stale == x {
found = true
break
}
}
if !found {
t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.StaleEndpoints)
}
}
if len(result.StaleServiceNames) != len(tc.expectedStaleServiceNames) {
t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedStaleServiceNames), len(result.StaleServiceNames), result.StaleServiceNames)
}
for svcName := range tc.expectedStaleServiceNames {
found := false
for _, stale := range result.StaleServiceNames {
if stale == svcName {
found = true
}
}
if !found {
t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.StaleServiceNames)
}
}
if !reflect.DeepEqual(result.HCEndpointsLocalIPSize, tc.expectedHealthchecks) {
t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, result.HCEndpointsLocalIPSize)
}
}
}
// The majority of EndpointSlice specific tests are not iptables specific and focus on
// the shared EndpointChangeTracker and EndpointSliceCache. This test ensures that the
// iptables proxier supports translating EndpointSlices to iptables output.
func TestEndpointSliceE2E(t *testing.T) {
expectedIPTablesWithSlice := `*filter
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-SERVICES - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-XGJFVO3L2O5SRFNT
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
`
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, true)
fp.OnServiceSynced()
fp.OnEndpointsSynced()
fp.OnEndpointSlicesSynced()
serviceName := "svc1"
namespaceName := "ns1"
fp.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
Spec: v1.ServiceSpec{
ClusterIP: "172.20.1.1",
Selector: map[string]string{"foo": "bar"},
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), Protocol: v1.ProtocolTCP}},
},
})
tcpProtocol := v1.ProtocolTCP
endpointSlice := &discovery.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-1", serviceName),
Namespace: namespaceName,
Labels: map[string]string{discovery.LabelServiceName: serviceName},
},
Ports: []discovery.EndpointPort{{
Name: utilpointer.StringPtr(""),
Port: utilpointer.Int32Ptr(80),
Protocol: &tcpProtocol,
}},
AddressType: discovery.AddressTypeIPv4,
Endpoints: []discovery.Endpoint{{
Addresses: []string{"10.0.1.1"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
}, {
Addresses: []string{"10.0.1.2"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": "node2"},
}, {
Addresses: []string{"10.0.1.3"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": "node3"},
}, { // not ready endpoints should be ignored
Addresses: []string{"10.0.1.4"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(false)},
Topology: map[string]string{"kubernetes.io/hostname": "node4"},
}},
}
fp.OnEndpointSliceAdd(endpointSlice)
fp.syncProxyRules()
assert.Equal(t, expectedIPTablesWithSlice, fp.iptablesData.String())
fp.OnEndpointSliceDelete(endpointSlice)
fp.syncProxyRules()
assert.NotEqual(t, expectedIPTablesWithSlice, fp.iptablesData.String())
}
func TestHealthCheckNodePortE2E(t *testing.T) {
expectedIPTables := `*filter
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
-A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-SERVICES - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-NODEPORTS -m comment --comment ns1/svc1 -m tcp -p tcp --dport 30010 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-NODEPORTS -m comment --comment ns1/svc1 -m tcp -p tcp --dport 30010 -j KUBE-XLB-AQI2S6QIMU7PVVRP
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-XGJFVO3L2O5SRFNT
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/24 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Balancing rule 0 for ns1/svc1" -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
`
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, true)
fp.OnServiceSynced()
fp.OnEndpointsSynced()
fp.OnEndpointSlicesSynced()
serviceName := "svc1"
namespaceName := "ns1"
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
Spec: v1.ServiceSpec{
ClusterIP: "172.20.1.1",
Selector: map[string]string{"foo": "bar"},
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), NodePort: 30010, Protocol: v1.ProtocolTCP}},
Type: "LoadBalancer",
HealthCheckNodePort: 30000,
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
},
}
fp.OnServiceAdd(svc)
tcpProtocol := v1.ProtocolTCP
endpointSlice := &discovery.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-1", serviceName),
Namespace: namespaceName,
Labels: map[string]string{discovery.LabelServiceName: serviceName},
},
Ports: []discovery.EndpointPort{{
Name: utilpointer.StringPtr(""),
Port: utilpointer.Int32Ptr(80),
Protocol: &tcpProtocol,
}},
AddressType: discovery.AddressTypeIPv4,
Endpoints: []discovery.Endpoint{{
Addresses: []string{"10.0.1.1"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
}, {
Addresses: []string{"10.0.1.2"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": "node2"},
}, {
Addresses: []string{"10.0.1.3"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": "node3"},
}, { // not ready endpoints should be ignored
Addresses: []string{"10.0.1.4"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(false)},
Topology: map[string]string{"kubernetes.io/hostname": "node4"},
}},
}
fp.OnEndpointSliceAdd(endpointSlice)
fp.syncProxyRules()
assert.Equal(t, expectedIPTables, fp.iptablesData.String())
fp.OnServiceDelete(svc)
fp.syncProxyRules()
assert.NotEqual(t, expectedIPTables, fp.iptablesData.String())
}
func TestProxierDeleteNodePortStaleUDP(t *testing.T) {
fcmd := fakeexec.FakeCmd{}
fexec := fakeexec.FakeExec{
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
}
execFunc := func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
}
cmdOutput := "1 flow entries have been deleted"
cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, nil }
// Delete ClusterIP entries
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
fexec.CommandScript = append(fexec.CommandScript, execFunc)
// Delete NodePort entries
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
fexec.CommandScript = append(fexec.CommandScript, execFunc)
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
fp.exec = &fexec
svcIP := "10.20.30.41"
svcPort := 80
nodePort := 31201
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolUDP,
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolUDP,
NodePort: int32(nodePort),
}}
}),
)
makeEndpointsMap(fp)
fp.syncProxyRules()
if fexec.CommandCalls != 0 {
t.Fatalf("Created service without endpoints must not clear conntrack entries")
}
epIP := "10.180.0.1"
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP,
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolUDP,
}},
}}
}),
)
fp.syncProxyRules()
if fexec.CommandCalls != 2 {
t.Fatalf("Updated UDP service with new endpoints must clear UDP entries")
}
// Delete ClusterIP Conntrack entries
expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s -p %s", svcIP, strings.ToLower(string((v1.ProtocolUDP))))
actualCommand := strings.Join(fcmd.CombinedOutputLog[0], " ")
if actualCommand != expectCommand {
t.Errorf("Expected command: %s, but executed %s", expectCommand, actualCommand)
}
// Delete NodePort Conntrack entrie
expectCommand = fmt.Sprintf("conntrack -D -p %s --dport %d", strings.ToLower(string((v1.ProtocolUDP))), nodePort)
actualCommand = strings.Join(fcmd.CombinedOutputLog[1], " ")
if actualCommand != expectCommand {
t.Errorf("Expected command: %s, but executed %s", expectCommand, actualCommand)
}
}
func TestProxierMetricsIptablesTotalRules(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
metrics.RegisterMetrics()
svcIP := "10.20.30.41"
svcPort := 80
nodePort := 31201
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(nodePort),
}}
}),
)
makeEndpointsMap(fp)
fp.syncProxyRules()
nFilterRules, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)))
if err != nil {
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
}
// -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m udp -p udp -d 10.20.30.41/32 --dport 80 -j REJECT
// -A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m addrtype --dst-type LOCAL -m udp -p udp --dport 31201 -j REJECT
// -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
// COMMIT
if nFilterRules != 7.0 {
t.Fatalf("Wrong number of filter rule: expected 7 received %f", nFilterRules)
}
nNatRules, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)))
if err != nil {
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
}
// rules -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
// -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
// -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
// -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
// -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
// COMMIT
if nNatRules != 6.0 {
t.Fatalf("Wrong number of nat rules: expected 6 received %f", nNatRules)
}
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{
{
IP: "10.0.0.2",
},
{
IP: "10.0.0.5",
},
},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
fp.syncProxyRules()
nFilterRules, err = testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)))
if err != nil {
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
}
// -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
// COMMIT
if nFilterRules != 5.0 {
t.Fatalf("Wrong number of filter rule: expected 5 received %f", nFilterRules)
}
nNatRules, err = testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)))
if err != nil {
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
}
// -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
// -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
// -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
// -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
// -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m udp -p udp -d 10.20.30.41/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
// -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m udp -p udp -d 10.20.30.41/32 --dport 80 -j KUBE-SVC-OJWW7NSBVZTDHXNW
// -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m udp -p udp --dport 31201 -j KUBE-MARK-MASQ
// -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m udp -p udp --dport 31201 -j KUBE-SVC-OJWW7NSBVZTDHXNW
// -A KUBE-SVC-OJWW7NSBVZTDHXNW -m comment --comment ns1/svc1:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-AMT2SNW3YUNHJFJG
// -A KUBE-SEP-AMT2SNW3YUNHJFJG -m comment --comment ns1/svc1:p80 -s 10.0.0.2/32 -j KUBE-MARK-MASQ
// -A KUBE-SEP-AMT2SNW3YUNHJFJG -m comment --comment ns1/svc1:p80 -m udp -p udp -j DNAT --to-destination 10.0.0.2:80
// -A KUBE-SVC-OJWW7NSBVZTDHXNW -m comment --comment ns1/svc1:p80 -j KUBE-SEP-OUFLBLJVR33W4FIZ
// -A KUBE-SEP-OUFLBLJVR33W4FIZ -m comment --comment ns1/svc1:p80 -s 10.0.0.5/32 -j KUBE-MARK-MASQ
// -A KUBE-SEP-OUFLBLJVR33W4FIZ -m comment --comment ns1/svc1:p80 -m udp -p udp -j DNAT --to-destination 10.0.0.5:80
// -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
// COMMIT
if nNatRules != 16.0 {
t.Fatalf("Wrong number of nat rules: expected 16 received %f", nNatRules)
}
}
// TODO(thockin): add *more* tests for syncProxyRules() or break it down further and test the pieces.
// This test ensures that the iptables proxier supports translating Endpoints to
// iptables output when internalTrafficPolicy is specified
func TestInternalTrafficPolicyE2E(t *testing.T) {
type endpoint struct {
ip string
hostname string
}
cluster := v1.ServiceInternalTrafficPolicyCluster
local := v1.ServiceInternalTrafficPolicyLocal
clusterExpectedIPTables := `*filter
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-SERVICES - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-XGJFVO3L2O5SRFNT
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
`
testCases := []struct {
name string
internalTrafficPolicy *v1.ServiceInternalTrafficPolicyType
featureGateOn bool
endpoints []endpoint
expectEndpointRule bool
expectedIPTablesWithSlice string
}{
{
name: "internalTrafficPolicy is cluster",
internalTrafficPolicy: &cluster,
featureGateOn: true,
endpoints: []endpoint{
{"10.0.1.1", testHostname},
{"10.0.1.2", "host1"},
{"10.0.1.3", "host2"},
},
expectEndpointRule: true,
expectedIPTablesWithSlice: clusterExpectedIPTables,
},
{
name: "internalTrafficPolicy is local and there is non-zero local endpoints",
internalTrafficPolicy: &local,
featureGateOn: true,
endpoints: []endpoint{
{"10.0.1.1", testHostname},
{"10.0.1.2", "host1"},
{"10.0.1.3", "host2"},
},
expectEndpointRule: true,
expectedIPTablesWithSlice: `*filter
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-SERVICES - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
`,
},
{
name: "internalTrafficPolicy is local and there is zero local endpoint",
internalTrafficPolicy: &local,
featureGateOn: true,
endpoints: []endpoint{
{"10.0.1.1", "host0"},
{"10.0.1.2", "host1"},
{"10.0.1.3", "host2"},
},
expectEndpointRule: false,
expectedIPTablesWithSlice: `*filter
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
-A KUBE-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 -j REJECT
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-SERVICES - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-MARK-MASQ - [0:0]
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
`,
},
{
name: "internalTrafficPolicy is local and there is non-zero local endpoint with feature gate off",
internalTrafficPolicy: &local,
featureGateOn: false,
endpoints: []endpoint{
{"10.0.1.1", testHostname},
{"10.0.1.2", "host1"},
{"10.0.1.3", "host2"},
},
expectEndpointRule: false,
expectedIPTablesWithSlice: clusterExpectedIPTables,
},
}
for _, tc := range testCases {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ServiceInternalTrafficPolicy, tc.featureGateOn)()
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, true)
fp.OnServiceSynced()
fp.OnEndpointsSynced()
fp.OnEndpointSlicesSynced()
serviceName := "svc1"
namespaceName := "ns1"
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
Spec: v1.ServiceSpec{
ClusterIP: "172.20.1.1",
Selector: map[string]string{"foo": "bar"},
Ports: []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP}},
},
}
if tc.internalTrafficPolicy != nil {
svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
}
fp.OnServiceAdd(svc)
tcpProtocol := v1.ProtocolTCP
endpointSlice := &discovery.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-1", serviceName),
Namespace: namespaceName,
Labels: map[string]string{discovery.LabelServiceName: serviceName},
},
Ports: []discovery.EndpointPort{{
Name: utilpointer.StringPtr(""),
Port: utilpointer.Int32Ptr(80),
Protocol: &tcpProtocol,
}},
AddressType: discovery.AddressTypeIPv4,
}
for _, ep := range tc.endpoints {
endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
Addresses: []string{ep.ip},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": ep.hostname},
})
}
fp.OnEndpointSliceAdd(endpointSlice)
fp.syncProxyRules()
assert.Equal(t, tc.expectedIPTablesWithSlice, fp.iptablesData.String())
if tc.expectEndpointRule {
fp.OnEndpointSliceDelete(endpointSlice)
fp.syncProxyRules()
assert.NotEqual(t, tc.expectedIPTablesWithSlice, fp.iptablesData.String())
}
}
}
proxier/iptables: add unit tests for falling back to terminating endpoints
Signed-off-by: Andrew Sy Kim <216147b618cb827f522f086e90f58ea14ddeaac5@gmail.com>
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package iptables
import (
"bytes"
"fmt"
"net"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/component-base/metrics/testutil"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/metrics"
"k8s.io/kubernetes/pkg/proxy/healthcheck"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
utilproxytest "k8s.io/kubernetes/pkg/proxy/util/testing"
"k8s.io/kubernetes/pkg/util/async"
"k8s.io/kubernetes/pkg/util/conntrack"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
utilnet "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer"
)
func checkAllLines(t *testing.T, table utiliptables.Table, save []byte, expectedLines map[utiliptables.Chain]string) {
chainLines := utiliptables.GetChainLines(table, save)
for chain, lineBytes := range chainLines {
line := string(lineBytes)
if expected, exists := expectedLines[chain]; exists {
if expected != line {
t.Errorf("getChainLines expected chain line not present. For chain: %s Expected: %s Got: %s", chain, expected, line)
}
} else {
t.Errorf("getChainLines expected chain not present: %s", chain)
}
}
}
func TestGetChainLines(t *testing.T) {
iptablesSave := `# Generated by iptables-save v1.4.7 on Wed Oct 29 14:56:01 2014
*nat
:PREROUTING ACCEPT [2136997:197881818]
:POSTROUTING ACCEPT [4284525:258542680]
:OUTPUT ACCEPT [5901660:357267963]
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
COMMIT
# Completed on Wed Oct 29 14:56:01 2014`
expected := map[utiliptables.Chain]string{
utiliptables.ChainPrerouting: ":PREROUTING ACCEPT [2136997:197881818]",
utiliptables.ChainPostrouting: ":POSTROUTING ACCEPT [4284525:258542680]",
utiliptables.ChainOutput: ":OUTPUT ACCEPT [5901660:357267963]",
}
checkAllLines(t, utiliptables.TableNAT, []byte(iptablesSave), expected)
}
func TestGetChainLinesMultipleTables(t *testing.T) {
iptablesSave := `# Generated by iptables-save v1.4.21 on Fri Aug 7 14:47:37 2015
*nat
:PREROUTING ACCEPT [2:138]
:INPUT ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
:DOCKER - [0:0]
:KUBE-NODEPORT-CONTAINER - [0:0]
:KUBE-NODEPORT-HOST - [0:0]
:KUBE-PORTALS-CONTAINER - [0:0]
:KUBE-PORTALS-HOST - [0:0]
:KUBE-SVC-1111111111111111 - [0:0]
:KUBE-SVC-2222222222222222 - [0:0]
:KUBE-SVC-3333333333333333 - [0:0]
:KUBE-SVC-4444444444444444 - [0:0]
:KUBE-SVC-5555555555555555 - [0:0]
:KUBE-SVC-6666666666666666 - [0:0]
-A PREROUTING -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-CONTAINER
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A PREROUTING -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-CONTAINER
-A OUTPUT -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-HOST
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-HOST
-A POSTROUTING -s 10.246.1.0/24 ! -o cbr0 -j MASQUERADE
-A POSTROUTING -s 10.0.2.15/32 -d 10.0.2.15/32 -m comment --comment "handle pod connecting to self" -j MASQUERADE
-A KUBE-PORTALS-CONTAINER -d 10.247.0.1/32 -p tcp -m comment --comment "portal for default/kubernetes:" -m state --state NEW -m tcp --dport 443 -j KUBE-SVC-5555555555555555
-A KUBE-PORTALS-CONTAINER -d 10.247.0.10/32 -p udp -m comment --comment "portal for kube-system/kube-dns:dns" -m state --state NEW -m udp --dport 53 -j KUBE-SVC-6666666666666666
-A KUBE-PORTALS-CONTAINER -d 10.247.0.10/32 -p tcp -m comment --comment "portal for kube-system/kube-dns:dns-tcp" -m state --state NEW -m tcp --dport 53 -j KUBE-SVC-2222222222222222
-A KUBE-PORTALS-HOST -d 10.247.0.1/32 -p tcp -m comment --comment "portal for default/kubernetes:" -m state --state NEW -m tcp --dport 443 -j KUBE-SVC-5555555555555555
-A KUBE-PORTALS-HOST -d 10.247.0.10/32 -p udp -m comment --comment "portal for kube-system/kube-dns:dns" -m state --state NEW -m udp --dport 53 -j KUBE-SVC-6666666666666666
-A KUBE-PORTALS-HOST -d 10.247.0.10/32 -p tcp -m comment --comment "portal for kube-system/kube-dns:dns-tcp" -m state --state NEW -m tcp --dport 53 -j KUBE-SVC-2222222222222222
-A KUBE-SVC-1111111111111111 -p udp -m comment --comment "kube-system/kube-dns:dns" -m recent --set --name KUBE-SVC-1111111111111111 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.246.1.2:53
-A KUBE-SVC-2222222222222222 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-SVC-3333333333333333
-A KUBE-SVC-3333333333333333 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m recent --set --name KUBE-SVC-3333333333333333 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.246.1.2:53
-A KUBE-SVC-4444444444444444 -p tcp -m comment --comment "default/kubernetes:" -m recent --set --name KUBE-SVC-4444444444444444 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.245.1.2:443
-A KUBE-SVC-5555555555555555 -m comment --comment "default/kubernetes:" -j KUBE-SVC-4444444444444444
-A KUBE-SVC-6666666666666666 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-SVC-1111111111111111
COMMIT
# Completed on Fri Aug 7 14:47:37 2015
# Generated by iptables-save v1.4.21 on Fri Aug 7 14:47:37 2015
*filter
:INPUT ACCEPT [17514:83115836]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [8909:688225]
:DOCKER - [0:0]
-A FORWARD -o cbr0 -j DOCKER
-A FORWARD -o cbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -i cbr0 ! -o cbr0 -j ACCEPT
-A FORWARD -i cbr0 -o cbr0 -j ACCEPT
COMMIT
`
expected := map[utiliptables.Chain]string{
utiliptables.ChainPrerouting: ":PREROUTING ACCEPT [2:138]",
utiliptables.Chain("INPUT"): ":INPUT ACCEPT [0:0]",
utiliptables.Chain("OUTPUT"): ":OUTPUT ACCEPT [0:0]",
utiliptables.ChainPostrouting: ":POSTROUTING ACCEPT [0:0]",
utiliptables.Chain("DOCKER"): ":DOCKER - [0:0]",
utiliptables.Chain("KUBE-NODEPORT-CONTAINER"): ":KUBE-NODEPORT-CONTAINER - [0:0]",
utiliptables.Chain("KUBE-NODEPORT-HOST"): ":KUBE-NODEPORT-HOST - [0:0]",
utiliptables.Chain("KUBE-PORTALS-CONTAINER"): ":KUBE-PORTALS-CONTAINER - [0:0]",
utiliptables.Chain("KUBE-PORTALS-HOST"): ":KUBE-PORTALS-HOST - [0:0]",
utiliptables.Chain("KUBE-SVC-1111111111111111"): ":KUBE-SVC-1111111111111111 - [0:0]",
utiliptables.Chain("KUBE-SVC-2222222222222222"): ":KUBE-SVC-2222222222222222 - [0:0]",
utiliptables.Chain("KUBE-SVC-3333333333333333"): ":KUBE-SVC-3333333333333333 - [0:0]",
utiliptables.Chain("KUBE-SVC-4444444444444444"): ":KUBE-SVC-4444444444444444 - [0:0]",
utiliptables.Chain("KUBE-SVC-5555555555555555"): ":KUBE-SVC-5555555555555555 - [0:0]",
utiliptables.Chain("KUBE-SVC-6666666666666666"): ":KUBE-SVC-6666666666666666 - [0:0]",
}
checkAllLines(t, utiliptables.TableNAT, []byte(iptablesSave), expected)
}
func TestDeleteEndpointConnectionsIPv4(t *testing.T) {
const (
UDP = v1.ProtocolUDP
TCP = v1.ProtocolTCP
SCTP = v1.ProtocolSCTP
)
testCases := []struct {
description string
svcName string
svcIP string
svcPort int32
protocol v1.Protocol
endpoint string // IP:port endpoint
epSvcPair proxy.ServiceEndpoint // Will be generated by test
simulatedErr string
}{
{
description: "V4 UDP",
svcName: "v4-udp",
svcIP: "10.96.1.1",
svcPort: 80,
protocol: UDP,
endpoint: "10.240.0.3:80",
},
{
description: "V4 TCP",
svcName: "v4-tcp",
svcIP: "10.96.2.2",
svcPort: 80,
protocol: TCP,
endpoint: "10.240.0.4:80",
},
{
description: "V4 SCTP",
svcName: "v4-sctp",
svcIP: "10.96.3.3",
svcPort: 80,
protocol: SCTP,
endpoint: "10.240.0.5:80",
},
{
description: "V4 UDP, nothing to delete, benign error",
svcName: "v4-udp-nothing-to-delete",
svcIP: "10.96.1.1",
svcPort: 80,
protocol: UDP,
endpoint: "10.240.0.3:80",
simulatedErr: conntrack.NoConnectionToDelete,
},
{
description: "V4 UDP, unexpected error, should be glogged",
svcName: "v4-udp-simulated-error",
svcIP: "10.96.1.1",
svcPort: 80,
protocol: UDP,
endpoint: "10.240.0.3:80",
simulatedErr: "simulated error",
},
}
// Create a fake executor for the conntrack utility. This should only be
// invoked for UDP and SCTP connections, since no conntrack cleanup is needed for TCP
fcmd := fakeexec.FakeCmd{}
fexec := fakeexec.FakeExec{
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
}
execFunc := func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
}
for _, tc := range testCases {
if conntrack.IsClearConntrackNeeded(tc.protocol) {
var cmdOutput string
var simErr error
if tc.simulatedErr == "" {
cmdOutput = "1 flow entries have been deleted"
} else {
simErr = fmt.Errorf(tc.simulatedErr)
}
cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, simErr }
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
fexec.CommandScript = append(fexec.CommandScript, execFunc)
}
}
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
fp.exec = &fexec
for _, tc := range testCases {
makeServiceMap(fp,
makeTestService("ns1", tc.svcName, func(svc *v1.Service) {
svc.Spec.ClusterIP = tc.svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: "p80",
Port: tc.svcPort,
Protocol: tc.protocol,
}}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
}),
)
fp.serviceMap.Update(fp.serviceChanges)
}
// Run the test cases
for _, tc := range testCases {
priorExecs := fexec.CommandCalls
priorGlogErrs := klog.Stats.Error.Lines()
svc := proxy.ServicePortName{
NamespacedName: types.NamespacedName{Namespace: "ns1", Name: tc.svcName},
Port: "p80",
Protocol: tc.protocol,
}
input := []proxy.ServiceEndpoint{
{
Endpoint: tc.endpoint,
ServicePortName: svc,
},
}
fp.deleteEndpointConnections(input)
// For UDP and SCTP connections, check the executed conntrack command
var expExecs int
if conntrack.IsClearConntrackNeeded(tc.protocol) {
isIPv6 := func(ip string) bool {
netIP := net.ParseIP(ip)
return netIP.To4() == nil
}
endpointIP := utilproxy.IPPart(tc.endpoint)
expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s --dst-nat %s -p %s", tc.svcIP, endpointIP, strings.ToLower(string((tc.protocol))))
if isIPv6(endpointIP) {
expectCommand += " -f ipv6"
}
actualCommand := strings.Join(fcmd.CombinedOutputLog[fexec.CommandCalls-1], " ")
if actualCommand != expectCommand {
t.Errorf("%s: Expected command: %s, but executed %s", tc.description, expectCommand, actualCommand)
}
expExecs = 1
}
// Check the number of times conntrack was executed
execs := fexec.CommandCalls - priorExecs
if execs != expExecs {
t.Errorf("%s: Expected conntrack to be executed %d times, but got %d", tc.description, expExecs, execs)
}
// Check the number of new glog errors
var expGlogErrs int64
if tc.simulatedErr != "" && tc.simulatedErr != conntrack.NoConnectionToDelete {
expGlogErrs = 1
}
glogErrs := klog.Stats.Error.Lines() - priorGlogErrs
if glogErrs != expGlogErrs {
t.Errorf("%s: Expected %d glogged errors, but got %d", tc.description, expGlogErrs, glogErrs)
}
}
}
func TestDeleteEndpointConnectionsIPv6(t *testing.T) {
const (
UDP = v1.ProtocolUDP
TCP = v1.ProtocolTCP
SCTP = v1.ProtocolSCTP
)
testCases := []struct {
description string
svcName string
svcIP string
svcPort int32
protocol v1.Protocol
endpoint string // IP:port endpoint
epSvcPair proxy.ServiceEndpoint // Will be generated by test
simulatedErr string
}{
{
description: "V6 UDP",
svcName: "v6-udp",
svcIP: "fd00:1234::20",
svcPort: 80,
protocol: UDP,
endpoint: "[2001:db8::2]:80",
},
{
description: "V6 TCP",
svcName: "v6-tcp",
svcIP: "fd00:1234::30",
svcPort: 80,
protocol: TCP,
endpoint: "[2001:db8::3]:80",
},
{
description: "V6 SCTP",
svcName: "v6-sctp",
svcIP: "fd00:1234::40",
svcPort: 80,
protocol: SCTP,
endpoint: "[2001:db8::4]:80",
},
}
// Create a fake executor for the conntrack utility. This should only be
// invoked for UDP and SCTP connections, since no conntrack cleanup is needed for TCP
fcmd := fakeexec.FakeCmd{}
fexec := fakeexec.FakeExec{
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
}
execFunc := func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
}
for _, tc := range testCases {
if conntrack.IsClearConntrackNeeded(tc.protocol) {
var cmdOutput string
var simErr error
if tc.simulatedErr == "" {
cmdOutput = "1 flow entries have been deleted"
} else {
simErr = fmt.Errorf(tc.simulatedErr)
}
cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, simErr }
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
fexec.CommandScript = append(fexec.CommandScript, execFunc)
}
}
ipt := iptablestest.NewIPv6Fake()
fp := NewFakeProxier(ipt, false)
fp.exec = &fexec
for _, tc := range testCases {
makeServiceMap(fp,
makeTestService("ns1", tc.svcName, func(svc *v1.Service) {
svc.Spec.ClusterIP = tc.svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: "p80",
Port: tc.svcPort,
Protocol: tc.protocol,
}}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
}),
)
fp.serviceMap.Update(fp.serviceChanges)
}
// Run the test cases
for _, tc := range testCases {
priorExecs := fexec.CommandCalls
priorGlogErrs := klog.Stats.Error.Lines()
svc := proxy.ServicePortName{
NamespacedName: types.NamespacedName{Namespace: "ns1", Name: tc.svcName},
Port: "p80",
Protocol: tc.protocol,
}
input := []proxy.ServiceEndpoint{
{
Endpoint: tc.endpoint,
ServicePortName: svc,
},
}
fp.deleteEndpointConnections(input)
// For UDP and SCTP connections, check the executed conntrack command
var expExecs int
if conntrack.IsClearConntrackNeeded(tc.protocol) {
isIPv6 := func(ip string) bool {
netIP := net.ParseIP(ip)
return netIP.To4() == nil
}
endpointIP := utilproxy.IPPart(tc.endpoint)
expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s --dst-nat %s -p %s", tc.svcIP, endpointIP, strings.ToLower(string((tc.protocol))))
if isIPv6(endpointIP) {
expectCommand += " -f ipv6"
}
actualCommand := strings.Join(fcmd.CombinedOutputLog[fexec.CommandCalls-1], " ")
if actualCommand != expectCommand {
t.Errorf("%s: Expected command: %s, but executed %s", tc.description, expectCommand, actualCommand)
}
expExecs = 1
}
// Check the number of times conntrack was executed
execs := fexec.CommandCalls - priorExecs
if execs != expExecs {
t.Errorf("%s: Expected conntrack to be executed %d times, but got %d", tc.description, expExecs, execs)
}
// Check the number of new glog errors
var expGlogErrs int64
if tc.simulatedErr != "" && tc.simulatedErr != conntrack.NoConnectionToDelete {
expGlogErrs = 1
}
glogErrs := klog.Stats.Error.Lines() - priorGlogErrs
if glogErrs != expGlogErrs {
t.Errorf("%s: Expected %d glogged errors, but got %d", tc.description, expGlogErrs, glogErrs)
}
}
}
// fakeCloseable implements utilproxy.Closeable
type fakeCloseable struct{}
// Close fakes out the close() used by syncProxyRules to release a local port.
func (f *fakeCloseable) Close() error {
return nil
}
// fakePortOpener implements portOpener.
type fakePortOpener struct {
openPorts []*utilnet.LocalPort
}
// OpenLocalPort fakes out the listen() and bind() used by syncProxyRules
// to lock a local port.
func (f *fakePortOpener) OpenLocalPort(lp *utilnet.LocalPort) (utilnet.Closeable, error) {
f.openPorts = append(f.openPorts, lp)
return &fakeCloseable{}, nil
}
const testHostname = "test-hostname"
func NewFakeProxier(ipt utiliptables.Interface, endpointSlicesEnabled bool) *Proxier {
// TODO: Call NewProxier after refactoring out the goroutine
// invocation into a Run() method.
ipfamily := v1.IPv4Protocol
if ipt.IsIPv6() {
ipfamily = v1.IPv6Protocol
}
detectLocal, _ := proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/24", ipt)
p := &Proxier{
exec: &fakeexec.FakeExec{},
serviceMap: make(proxy.ServiceMap),
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipfamily, nil, nil),
endpointsMap: make(proxy.EndpointsMap),
endpointsChanges: proxy.NewEndpointChangeTracker(testHostname, newEndpointInfo, ipfamily, nil, endpointSlicesEnabled, nil),
iptables: ipt,
masqueradeMark: "0x4000",
localDetector: detectLocal,
hostname: testHostname,
portsMap: make(map[utilnet.LocalPort]utilnet.Closeable),
portMapper: &fakePortOpener{[]*utilnet.LocalPort{}},
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
precomputedProbabilities: make([]string, 0, 1001),
iptablesData: bytes.NewBuffer(nil),
existingFilterChainsData: bytes.NewBuffer(nil),
filterChains: bytes.NewBuffer(nil),
filterRules: bytes.NewBuffer(nil),
natChains: bytes.NewBuffer(nil),
natRules: bytes.NewBuffer(nil),
nodePortAddresses: make([]string, 0),
networkInterfacer: utilproxytest.NewFakeNetwork(),
}
p.setInitialized(true)
p.syncRunner = async.NewBoundedFrequencyRunner("test-sync-runner", p.syncProxyRules, 0, time.Minute, 1)
return p
}
func hasSessionAffinityRule(rules []iptablestest.Rule) bool {
for _, r := range rules {
if _, ok := r[iptablestest.Recent]; ok {
return true
}
}
return false
}
func hasJump(rules []iptablestest.Rule, destChain, destIP string, destPort int) bool {
destPortStr := strconv.Itoa(destPort)
match := false
for _, r := range rules {
if r[iptablestest.Jump] == destChain {
match = true
if destIP != "" {
if strings.Contains(r[iptablestest.Destination], destIP) && (strings.Contains(r[iptablestest.DPort], destPortStr) || r[iptablestest.DPort] == "") {
return true
}
match = false
}
if destPort != 0 {
if strings.Contains(r[iptablestest.DPort], destPortStr) && (strings.Contains(r[iptablestest.Destination], destIP) || r[iptablestest.Destination] == "") {
return true
}
match = false
}
}
}
return match
}
func hasSrcType(rules []iptablestest.Rule, srcType string) bool {
for _, r := range rules {
if r[iptablestest.SrcType] != srcType {
continue
}
return true
}
return false
}
func hasMasqRandomFully(rules []iptablestest.Rule) bool {
for _, r := range rules {
if r[iptablestest.Masquerade] == "--random-fully" {
return true
}
}
return false
}
func TestHasJump(t *testing.T) {
testCases := map[string]struct {
rules []iptablestest.Rule
destChain string
destIP string
destPort int
expected bool
}{
"case 1": {
// Match the 1st rule(both dest IP and dest Port)
rules: []iptablestest.Rule{
{"-d ": "10.20.30.41/32", "--dport ": "80", "-p ": "tcp", "-j ": "REJECT"},
{"--dport ": "3001", "-p ": "tcp", "-j ": "KUBE-MARK-MASQ"},
},
destChain: "REJECT",
destIP: "10.20.30.41",
destPort: 80,
expected: true,
},
"case 2": {
// Match the 2nd rule(dest Port)
rules: []iptablestest.Rule{
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
},
destChain: "REJECT",
destIP: "",
destPort: 3001,
expected: true,
},
"case 3": {
// Match both dest IP and dest Port
rules: []iptablestest.Rule{
{"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"},
},
destChain: "KUBE-XLB-GF53O3C2HZEXL2XN",
destIP: "1.2.3.4",
destPort: 80,
expected: true,
},
"case 4": {
// Match dest IP but doesn't match dest Port
rules: []iptablestest.Rule{
{"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"},
},
destChain: "KUBE-XLB-GF53O3C2HZEXL2XN",
destIP: "1.2.3.4",
destPort: 8080,
expected: false,
},
"case 5": {
// Match dest Port but doesn't match dest IP
rules: []iptablestest.Rule{
{"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"},
},
destChain: "KUBE-XLB-GF53O3C2HZEXL2XN",
destIP: "10.20.30.40",
destPort: 80,
expected: false,
},
"case 6": {
// Match the 2nd rule(dest IP)
rules: []iptablestest.Rule{
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
{"-d ": "1.2.3.4/32", "-p ": "tcp", "-j ": "REJECT"},
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
},
destChain: "REJECT",
destIP: "1.2.3.4",
destPort: 8080,
expected: true,
},
"case 7": {
// Match the 2nd rule(dest Port)
rules: []iptablestest.Rule{
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
},
destChain: "REJECT",
destIP: "1.2.3.4",
destPort: 3001,
expected: true,
},
"case 8": {
// Match the 1st rule(dest IP)
rules: []iptablestest.Rule{
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
},
destChain: "REJECT",
destIP: "10.20.30.41",
destPort: 8080,
expected: true,
},
"case 9": {
rules: []iptablestest.Rule{
{"-j ": "KUBE-SEP-LWSOSDSHMKPJHHJV"},
},
destChain: "KUBE-SEP-LWSOSDSHMKPJHHJV",
destIP: "",
destPort: 0,
expected: true,
},
"case 10": {
rules: []iptablestest.Rule{
{"-j ": "KUBE-SEP-FOO"},
},
destChain: "KUBE-SEP-BAR",
destIP: "",
destPort: 0,
expected: false,
},
}
for k, tc := range testCases {
if got := hasJump(tc.rules, tc.destChain, tc.destIP, tc.destPort); got != tc.expected {
t.Errorf("%v: expected %v, got %v", k, tc.expected, got)
}
}
}
func hasDNAT(rules []iptablestest.Rule, endpoint string) bool {
for _, r := range rules {
if r[iptablestest.ToDest] == endpoint {
return true
}
}
return false
}
func errorf(msg string, rules []iptablestest.Rule, t *testing.T) {
for _, r := range rules {
t.Logf("%q", r)
}
t.Errorf("%v", msg)
}
// TestOverallIPTablesRulesWithMultipleServices creates 4 types of services: ClusterIP,
// LoadBalancer, ExternalIP and NodePort and verifies if the NAT table rules created
// are exactly the same as what is expected. This test provides an overall view of how
// the NAT table rules look like with the different jumps.
func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
metrics.RegisterMetrics()
makeServiceMap(fp,
// create ClusterIP service
makeTestService(makeNSN("ns1", "svc1").Namespace, "svc1", func(svc *v1.Service) {
svc.Spec.ClusterIP = "10.20.30.41"
svc.Spec.Ports = []v1.ServicePort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
}}
}),
// create LoadBalancer service
makeTestService(makeNSN("ns2", "svc2").Namespace, "svc2", func(svc *v1.Service) {
svc.Spec.Type = "LoadBalancer"
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.ClusterIP = "10.20.30.42"
svc.Spec.Ports = []v1.ServicePort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
NodePort: 3001,
}}
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
IP: "1.2.3.4",
}}
// Also ensure that invalid LoadBalancerSourceRanges will not result
// in a crash.
svc.Spec.ExternalIPs = []string{"1.2.3.4"}
svc.Spec.LoadBalancerSourceRanges = []string{" 1.2.3.4/28"}
svc.Spec.HealthCheckNodePort = 30000
}),
// create NodePort service
makeTestService(makeNSN("ns3", "svc3").Namespace, "svc3", func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ClusterIP = "10.20.30.43"
svc.Spec.Ports = []v1.ServicePort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
NodePort: 3001,
}}
}),
// create ExternalIP service
makeTestService(makeNSN("ns4", "svc4").Namespace, "svc4", func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ClusterIP = "10.20.30.44"
svc.Spec.ExternalIPs = []string{"50.60.70.81"}
svc.Spec.Ports = []v1.ServicePort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
TargetPort: intstr.FromInt(80),
}}
}),
)
makeEndpointsMap(fp,
// create ClusterIP service endpoints
makeTestEndpoints(makeNSN("ns1", "svc1").Namespace, "svc1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "10.180.0.1",
}},
Ports: []v1.EndpointPort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
}},
}}
}),
// create LoadBalancer endpoints
makeTestEndpoints(makeNSN("ns2", "svc2").Namespace, "svc2", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "10.180.0.2",
}},
Ports: []v1.EndpointPort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
}},
}}
}),
// create NodePort service endpoints
makeTestEndpoints(makeNSN("ns3", "svc3").Namespace, "svc3", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "10.180.0.3",
}},
Ports: []v1.EndpointPort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
}},
}}
}),
// create ExternalIP service endpoints
makeTestEndpoints(makeNSN("ns4", "svc4").Namespace, "svc4", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "10.180.0.4",
NodeName: nil,
}, {
IP: "10.180.0.5",
NodeName: utilpointer.StringPtr(testHostname),
}},
Ports: []v1.EndpointPort{{
Name: "p80",
Port: 80,
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
fp.syncProxyRules()
expected := `*filter
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-SERVICES - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
:KUBE-XLB-GNZBNJ2PO5MGZ6GT - [0:0]
:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41/32 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 10.20.30.42/32 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 1.2.3.4/32 --dport 80 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4/32 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 10.20.30.42/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 1.2.3.4/28 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -j KUBE-MARK-DROP
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/24 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 10.20.30.43/32 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 10.20.30.43/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3001 -j KUBE-SVC-X27LE4BHSL4DOUIK
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3001 -j KUBE-MARK-MASQ
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 10.20.30.44/32 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 50.60.70.81/32 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 10.20.30.44/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 50.60.70.81/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
`
// Adding logic to split and sort the strings because
// depending on the creation order of services, the user-chains get jumbled.
expectedSlice := strings.Split(strings.TrimSuffix(expected, "\n"), "\n")
sort.Strings(expectedSlice)
originalSlice := strings.Split(strings.TrimSuffix(fp.iptablesData.String(), "\n"), "\n")
sort.Strings(originalSlice)
assert.Equal(t, expectedSlice, originalSlice)
nNatRules, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)))
if err != nil {
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
}
// 43 here is test specific and corresponds to one more than the number of -A lines after `*nat` in `expected`.
if nNatRules != 43.0 {
t.Fatalf("Wrong number of nat rules: expected 43 received %f", nNatRules)
}
}
func TestClusterIPReject(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}}
}),
)
makeEndpointsMap(fp)
fp.syncProxyRules()
svcChain := string(servicePortChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP))))
svcRules := ipt.GetRules(svcChain)
if len(svcRules) != 0 {
errorf(fmt.Sprintf("Unexpected rule for chain %v service %v without endpoints", svcChain, svcPortName), svcRules, t)
}
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
if !hasJump(kubeSvcRules, iptablestest.Reject, svcIP, svcPort) {
errorf(fmt.Sprintf("Failed to find a %v rule for service %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcRules, t)
}
}
func TestClusterIPEndpointsJump(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}}
}),
)
epIP := "10.180.0.1"
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP,
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
fp.syncProxyRules()
epStr := fmt.Sprintf("%s:%d", epIP, svcPort)
svcChain := string(servicePortChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP))))
epChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP)), epStr))
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
if !hasJump(kubeSvcRules, svcChain, svcIP, svcPort) {
errorf(fmt.Sprintf("Failed to find jump from KUBE-SERVICES to %v chain", svcChain), kubeSvcRules, t)
}
svcRules := ipt.GetRules(svcChain)
if !hasJump(svcRules, string(KubeMarkMasqChain), svcIP, svcPort) {
errorf(fmt.Sprintf("Failed to find jump from %v to KUBE-MARK-MASQ chain", svcChain), svcRules, t)
}
if !hasJump(svcRules, epChain, "", 0) {
errorf(fmt.Sprintf("Failed to jump to ep chain %v", epChain), svcRules, t)
}
epRules := ipt.GetRules(epChain)
if !hasDNAT(epRules, epStr) {
errorf(fmt.Sprintf("Endpoint chain %v lacks DNAT to %v", epChain, epStr), epRules, t)
}
}
func TestLoadBalancer(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcLBIP := "1.2.3.4"
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "LoadBalancer"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
IP: svcLBIP,
}}
// Also ensure that invalid LoadBalancerSourceRanges will not result
// in a crash.
svc.Spec.ExternalIPs = []string{svcLBIP}
svc.Spec.LoadBalancerSourceRanges = []string{" 1.2.3.4/28"}
}),
)
epIP := "10.180.0.1"
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP,
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
fp.syncProxyRules()
proto := strings.ToLower(string(v1.ProtocolTCP))
fwChain := string(serviceFirewallChainName(svcPortName.String(), proto))
svcChain := string(servicePortChainName(svcPortName.String(), proto))
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
if !hasJump(kubeSvcRules, fwChain, svcLBIP, svcPort) {
errorf(fmt.Sprintf("Failed to find jump to firewall chain %v", fwChain), kubeSvcRules, t)
}
fwRules := ipt.GetRules(fwChain)
if !hasJump(fwRules, svcChain, "", 0) || !hasJump(fwRules, string(KubeMarkMasqChain), "", 0) {
errorf(fmt.Sprintf("Failed to find jump from firewall chain %v to svc chain %v", fwChain, svcChain), fwRules, t)
}
}
func TestNodePort(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
}),
)
epIP := "10.180.0.1"
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP,
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{utilproxytest.AddrStruct{Val: "127.0.0.1/16"}}
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}
addrs1 := []net.Addr{utilproxytest.AddrStruct{Val: "::1/128"}}
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
fp.nodePortAddresses = []string{}
fp.syncProxyRules()
proto := strings.ToLower(string(v1.ProtocolTCP))
svcChain := string(servicePortChainName(svcPortName.String(), proto))
kubeNodePortRules := ipt.GetRules(string(kubeNodePortsChain))
if !hasJump(kubeNodePortRules, svcChain, "", svcNodePort) {
errorf(fmt.Sprintf("Failed to find jump to svc chain %v", svcChain), kubeNodePortRules, t)
}
expectedNodePortNonLocalTrafficMasqueradeRule := `-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-MARK-MASQ`
svcRules := ipt.GetRules(svcChain)
if !strings.Contains(fp.iptablesData.String(), expectedNodePortNonLocalTrafficMasqueradeRule) {
errorf(fmt.Sprintf("Didn't find the masquerade rule for node port non-local traffic in svc chain %v", svcChain), svcRules, t)
}
}
func TestHealthCheckNodePort(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.42"
svcPort := 80
svcNodePort := 3001
svcHealthCheckNodePort := 30000
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "LoadBalancer"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
}),
)
itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{utilproxytest.AddrStruct{Val: "127.0.0.1/16"}}
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}
addrs1 := []net.Addr{utilproxytest.AddrStruct{Val: "::1/128"}}
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
fp.nodePortAddresses = []string{"127.0.0.1/16"}
fp.syncProxyRules()
kubeNodePortsRules := ipt.GetRules(string(kubeNodePortsChain))
if !hasJump(kubeNodePortsRules, iptablestest.Accept, "", svcHealthCheckNodePort) {
errorf(fmt.Sprintf("Failed to find Accept rule"), kubeNodePortsRules, t)
}
}
func TestMasqueradeRule(t *testing.T) {
for _, testcase := range []bool{false, true} {
ipt := iptablestest.NewFake().SetHasRandomFully(testcase)
fp := NewFakeProxier(ipt, false)
makeServiceMap(fp)
makeEndpointsMap(fp)
fp.syncProxyRules()
postRoutingRules := ipt.GetRules(string(kubePostroutingChain))
if !hasJump(postRoutingRules, "MASQUERADE", "", 0) {
errorf(fmt.Sprintf("Failed to find -j MASQUERADE in %s chain", kubePostroutingChain), postRoutingRules, t)
}
if hasMasqRandomFully(postRoutingRules) != testcase {
probs := map[bool]string{false: "found", true: "did not find"}
errorf(fmt.Sprintf("%s --random-fully in -j MASQUERADE rule in %s chain when HasRandomFully()==%v", probs[testcase], kubePostroutingChain, testcase), postRoutingRules, t)
}
}
}
func TestExternalIPsReject(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcExternalIPs := "50.60.70.81"
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "ClusterIP"
svc.Spec.ClusterIP = svcIP
svc.Spec.ExternalIPs = []string{svcExternalIPs}
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
TargetPort: intstr.FromInt(svcPort),
}}
}),
)
makeEndpointsMap(fp)
fp.syncProxyRules()
kubeSvcRules := ipt.GetRules(string(kubeExternalServicesChain))
if !hasJump(kubeSvcRules, iptablestest.Reject, svcExternalIPs, svcPort) {
errorf(fmt.Sprintf("Failed to find a %v rule for externalIP %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcRules, t)
}
}
func TestOnlyLocalExternalIPs(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcExternalIPs := "50.60.70.81"
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.ClusterIP = svcIP
svc.Spec.ExternalIPs = []string{svcExternalIPs}
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
TargetPort: intstr.FromInt(svcPort),
}}
}),
)
makeEndpointsMap(fp)
epIP1 := "10.180.0.1"
epIP2 := "10.180.2.1"
epStrLocal := fmt.Sprintf("%s:%d", epIP1, svcPort)
epStrNonLocal := fmt.Sprintf("%s:%d", epIP2, svcPort)
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP1,
NodeName: nil,
}, {
IP: epIP2,
NodeName: utilpointer.StringPtr(testHostname),
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
fp.syncProxyRules()
proto := strings.ToLower(string(v1.ProtocolTCP))
lbChain := string(serviceLBChainName(svcPortName.String(), proto))
nonLocalEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP)), epStrLocal))
localEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP)), epStrNonLocal))
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
if !hasJump(kubeSvcRules, lbChain, svcExternalIPs, svcPort) {
errorf(fmt.Sprintf("Failed to find jump to xlb chain %v", lbChain), kubeSvcRules, t)
}
lbRules := ipt.GetRules(lbChain)
if hasJump(lbRules, nonLocalEpChain, "", 0) {
errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, epStrLocal), lbRules, t)
}
if !hasJump(lbRules, localEpChain, "", 0) {
errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, epStrNonLocal), lbRules, t)
}
}
// TestNonLocalExternalIPs tests if we add the masquerade rule into svcChain in order to
// SNAT packets to external IPs if externalTrafficPolicy is cluster and the traffic is NOT Local.
func TestNonLocalExternalIPs(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcExternalIPs := "50.60.70.81"
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.ClusterIP = svcIP
svc.Spec.ExternalIPs = []string{svcExternalIPs}
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
TargetPort: intstr.FromInt(svcPort),
}}
}),
)
makeEndpointsMap(fp)
epIP1 := "10.180.0.1"
epIP2 := "10.180.2.1"
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP1,
NodeName: nil,
}, {
IP: epIP2,
NodeName: utilpointer.StringPtr(testHostname),
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
fp.syncProxyRules()
proto := strings.ToLower(string(v1.ProtocolTCP))
lbChain := string(serviceLBChainName(svcPortName.String(), proto))
svcChain := string(servicePortChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP))))
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
if !hasJump(kubeSvcRules, svcChain, svcExternalIPs, svcPort) {
errorf(fmt.Sprintf("Failed to find jump to svc chain %v", svcChain), kubeSvcRules, t)
}
svcRules := ipt.GetRules(svcChain)
if len(svcRules) != 4 {
t.Errorf("expected svcChain %v to have 4 rules, got %v", svcChain, len(svcRules))
}
if !hasJump(svcRules, string(KubeMarkMasqChain), svcIP, svcPort) {
errorf(fmt.Sprintf("Failed to find jump from %v to KUBE-MARK-MASQ chain", svcChain), svcRules, t)
}
expectedExternalIPNonLocalTrafficMasqueradeRule := `-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 external IP" -m tcp -p tcp -d 50.60.70.81/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ`
if !strings.Contains(fp.iptablesData.String(), expectedExternalIPNonLocalTrafficMasqueradeRule) {
errorf(fmt.Sprintf("Didn't find the masquerade rule for external-ip non-local traffic in svc chain %v", svcChain), svcRules, t)
}
lbRules := ipt.GetRules(lbChain)
if len(lbRules) != 0 {
t.Errorf("expected svclbChain %v to have 0 rules, got %v", lbChain, len(lbRules))
}
}
func TestNodePortReject(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
}),
)
makeEndpointsMap(fp)
fp.syncProxyRules()
kubeSvcRules := ipt.GetRules(string(kubeExternalServicesChain))
if !hasJump(kubeSvcRules, iptablestest.Reject, svcIP, svcNodePort) {
errorf(fmt.Sprintf("Failed to find a %v rule for service %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcRules, t)
}
}
func TestLoadBalancerReject(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcLBIP := "1.2.3.4"
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
svcSessionAffinityTimeout := int32(10800)
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "LoadBalancer"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
IP: svcLBIP,
}}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
ClientIP: &v1.ClientIPConfig{TimeoutSeconds: &svcSessionAffinityTimeout},
}
}),
)
makeEndpointsMap(fp)
fp.syncProxyRules()
kubeSvcExtRules := ipt.GetRules(string(kubeExternalServicesChain))
if !hasJump(kubeSvcExtRules, iptablestest.Reject, svcLBIP, svcPort) {
errorf(fmt.Sprintf("Failed to find a %v rule for LoadBalancer %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcExtRules, t)
}
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
if hasJump(kubeSvcRules, iptablestest.Reject, svcLBIP, svcPort) {
errorf(fmt.Sprintf("Found a %v rule for LoadBalancer %v with no endpoints in kubeServicesChain", iptablestest.Reject, svcPortName), kubeSvcRules, t)
}
}
func TestOnlyLocalLoadBalancing(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcLBIP := "1.2.3.4"
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
svcSessionAffinityTimeout := int32(10800)
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "LoadBalancer"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
IP: svcLBIP,
}}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
ClientIP: &v1.ClientIPConfig{TimeoutSeconds: &svcSessionAffinityTimeout},
}
}),
)
epIP1 := "10.180.0.1"
epIP2 := "10.180.2.1"
epStrLocal := fmt.Sprintf("%s:%d", epIP1, svcPort)
epStrNonLocal := fmt.Sprintf("%s:%d", epIP2, svcPort)
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP1,
NodeName: nil,
}, {
IP: epIP2,
NodeName: utilpointer.StringPtr(testHostname),
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
fp.syncProxyRules()
proto := strings.ToLower(string(v1.ProtocolTCP))
fwChain := string(serviceFirewallChainName(svcPortName.String(), proto))
lbChain := string(serviceLBChainName(svcPortName.String(), proto))
nonLocalEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP)), epStrLocal))
localEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP)), epStrNonLocal))
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
if !hasJump(kubeSvcRules, fwChain, svcLBIP, svcPort) {
errorf(fmt.Sprintf("Failed to find jump to firewall chain %v", fwChain), kubeSvcRules, t)
}
fwRules := ipt.GetRules(fwChain)
if !hasJump(fwRules, lbChain, "", 0) {
errorf(fmt.Sprintf("Failed to find jump from firewall chain %v to svc chain %v", fwChain, lbChain), fwRules, t)
}
if hasJump(fwRules, string(KubeMarkMasqChain), "", 0) {
errorf(fmt.Sprintf("Found jump from fw chain %v to MASQUERADE", fwChain), fwRules, t)
}
lbRules := ipt.GetRules(lbChain)
if hasJump(lbRules, nonLocalEpChain, "", 0) {
errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, epStrLocal), lbRules, t)
}
if !hasJump(lbRules, localEpChain, "", 0) {
errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, epStrNonLocal), lbRules, t)
}
if !hasSessionAffinityRule(lbRules) {
errorf(fmt.Sprintf("Didn't find session affinity rule from lb chain %v", lbChain), lbRules, t)
}
}
func TestOnlyLocalNodePortsNoClusterCIDR(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
onlyLocalNodePorts(t, fp, ipt)
}
func TestOnlyLocalNodePorts(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
onlyLocalNodePorts(t, fp, ipt)
}
func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTables) {
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
}),
)
epIP1 := "10.180.0.1"
epIP2 := "10.180.2.1"
epStrLocal := fmt.Sprintf("%s:%d", epIP1, svcPort)
epStrNonLocal := fmt.Sprintf("%s:%d", epIP2, svcPort)
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP1,
NodeName: nil,
}, {
IP: epIP2,
NodeName: utilpointer.StringPtr(testHostname),
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
itf := net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{utilproxytest.AddrStruct{Val: "10.20.30.51/24"}}
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.nodePortAddresses = []string{"10.20.30.0/24"}
fp.syncProxyRules()
proto := strings.ToLower(string(v1.ProtocolTCP))
lbChain := string(serviceLBChainName(svcPortName.String(), proto))
nonLocalEpChain := string(servicePortEndpointChainName(svcPortName.String(), proto, epStrLocal))
localEpChain := string(servicePortEndpointChainName(svcPortName.String(), proto, epStrNonLocal))
kubeNodePortRules := ipt.GetRules(string(kubeNodePortsChain))
if !hasJump(kubeNodePortRules, lbChain, "", svcNodePort) {
errorf(fmt.Sprintf("Failed to find jump to lb chain %v", lbChain), kubeNodePortRules, t)
}
if !hasJump(kubeNodePortRules, string(KubeMarkMasqChain), "", svcNodePort) {
errorf(fmt.Sprintf("Failed to find jump to %s chain for destination IP %d", KubeMarkMasqChain, svcNodePort), kubeNodePortRules, t)
}
kubeServiceRules := ipt.GetRules(string(kubeServicesChain))
if !hasJump(kubeServiceRules, string(kubeNodePortsChain), "10.20.30.51", 0) {
errorf(fmt.Sprintf("Failed to find jump to KUBE-NODEPORTS chain %v", string(kubeNodePortsChain)), kubeServiceRules, t)
}
svcChain := string(servicePortChainName(svcPortName.String(), proto))
lbRules := ipt.GetRules(lbChain)
if hasJump(lbRules, nonLocalEpChain, "", 0) {
errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, epStrLocal), lbRules, t)
}
if !hasJump(lbRules, svcChain, "", 0) || !hasSrcType(lbRules, "LOCAL") {
errorf(fmt.Sprintf("Did not find jump from lb chain %v to svc %v with src-type LOCAL", lbChain, svcChain), lbRules, t)
}
if !hasJump(lbRules, localEpChain, "", 0) {
errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, epStrLocal), lbRules, t)
}
}
func TestComputeProbability(t *testing.T) {
expectedProbabilities := map[int]string{
1: "1.0000000000",
2: "0.5000000000",
10: "0.1000000000",
100: "0.0100000000",
1000: "0.0010000000",
10000: "0.0001000000",
100000: "0.0000100000",
100001: "0.0000099999",
}
for num, expected := range expectedProbabilities {
actual := computeProbability(num)
if actual != expected {
t.Errorf("Expected computeProbability(%d) to be %s, got: %s", num, expected, actual)
}
}
prevProbability := float64(0)
for i := 100000; i > 1; i-- {
currProbability, err := strconv.ParseFloat(computeProbability(i), 64)
if err != nil {
t.Fatalf("Error parsing float probability for %d: %v", i, err)
}
if currProbability <= prevProbability {
t.Fatalf("Probability unexpectedly <= to previous probability for %d: (%0.10f <= %0.10f)", i, currProbability, prevProbability)
}
prevProbability = currProbability
}
}
func makeTestService(namespace, name string, svcFunc func(*v1.Service)) *v1.Service {
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{},
},
Spec: v1.ServiceSpec{},
Status: v1.ServiceStatus{},
}
svcFunc(svc)
return svc
}
func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port, nodeport int32, targetPort int) []v1.ServicePort {
svcPort := v1.ServicePort{
Name: name,
Protocol: protocol,
Port: port,
NodePort: nodeport,
TargetPort: intstr.FromInt(targetPort),
}
return append(array, svcPort)
}
func TestBuildServiceMapAddRemove(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
services := []*v1.Service{
makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "sctpport", "SCTP", 1236, 6321, 0)
}),
makeTestService("somewhere-else", "node-port", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeNodePort
svc.Spec.ClusterIP = "172.16.55.10"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blahblah", "UDP", 345, 678, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "moreblahblah", "TCP", 344, 677, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "muchmoreblah", "SCTP", 343, 676, 0)
}),
makeTestService("somewhere", "load-balancer", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.11"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001)
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{IP: "10.1.2.4"},
},
}
}),
makeTestService("somewhere", "only-local-load-balancer", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.12"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003)
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{IP: "10.1.2.3"},
},
}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.HealthCheckNodePort = 345
}),
}
for i := range services {
fp.OnServiceAdd(services[i])
}
result := fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 10 {
t.Errorf("expected service map length 10, got %v", fp.serviceMap)
}
// The only-local-loadbalancer ones get added
if len(result.HCServiceNodePorts) != 1 {
t.Errorf("expected 1 healthcheck port, got %v", result.HCServiceNodePorts)
} else {
nsn := makeNSN("somewhere", "only-local-load-balancer")
if port, found := result.HCServiceNodePorts[nsn]; !found || port != 345 {
t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, result.HCServiceNodePorts)
}
}
if len(result.UDPStaleClusterIP) != 0 {
// Services only added, so nothing stale yet
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
}
// Remove some stuff
// oneService is a modification of services[0] with removed first port.
oneService := makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
})
fp.OnServiceUpdate(services[0], oneService)
fp.OnServiceDelete(services[1])
fp.OnServiceDelete(services[2])
fp.OnServiceDelete(services[3])
result = fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 1 {
t.Errorf("expected service map length 1, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected 0 healthcheck ports, got %v", result.HCServiceNodePorts)
}
// All services but one were deleted. While you'd expect only the ClusterIPs
// from the three deleted services here, we still have the ClusterIP for
// the not-deleted service, because one of it's ServicePorts was deleted.
expectedStaleUDPServices := []string{"172.16.55.10", "172.16.55.4", "172.16.55.11", "172.16.55.12"}
if len(result.UDPStaleClusterIP) != len(expectedStaleUDPServices) {
t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.UDPStaleClusterIP.UnsortedList())
}
for _, ip := range expectedStaleUDPServices {
if !result.UDPStaleClusterIP.Has(ip) {
t.Errorf("expected stale UDP service service %s", ip)
}
}
}
func TestBuildServiceMapServiceHeadless(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
makeServiceMap(fp,
makeTestService("somewhere-else", "headless", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = v1.ClusterIPNone
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0)
}),
makeTestService("somewhere-else", "headless-without-port", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = v1.ClusterIPNone
}),
)
// Headless service should be ignored
result := fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 0 {
t.Errorf("expected service map length 0, got %d", len(fp.serviceMap))
}
// No proxied services, so no healthchecks
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected healthcheck ports length 0, got %d", len(result.HCServiceNodePorts))
}
if len(result.UDPStaleClusterIP) != 0 {
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
}
}
func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
makeServiceMap(fp,
makeTestService("somewhere-else", "external-name", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeExternalName
svc.Spec.ClusterIP = "172.16.55.4" // Should be ignored
svc.Spec.ExternalName = "foo2.bar.com"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blah", "UDP", 1235, 5321, 0)
}),
)
result := fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 0 {
t.Errorf("expected service map length 0, got %v", fp.serviceMap)
}
// No proxied services, so no healthchecks
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
}
if len(result.UDPStaleClusterIP) != 0 {
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP)
}
}
func TestBuildServiceMapServiceUpdate(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
servicev1 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 0)
})
servicev2 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 7002)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 7003)
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{IP: "10.1.2.3"},
},
}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.HealthCheckNodePort = 345
})
fp.OnServiceAdd(servicev1)
result := fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
}
if len(result.UDPStaleClusterIP) != 0 {
// Services only added, so nothing stale yet
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
}
// Change service to load-balancer
fp.OnServiceUpdate(servicev1, servicev2)
result = fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 1 {
t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts)
}
if len(result.UDPStaleClusterIP) != 0 {
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList())
}
// No change; make sure the service map stays the same and there are
// no health-check changes
fp.OnServiceUpdate(servicev2, servicev2)
result = fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 1 {
t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts)
}
if len(result.UDPStaleClusterIP) != 0 {
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList())
}
// And back to ClusterIP
fp.OnServiceUpdate(servicev2, servicev1)
result = fp.serviceMap.Update(fp.serviceChanges)
if len(fp.serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
}
if len(result.UDPStaleClusterIP) != 0 {
// Services only added, so nothing stale yet
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
}
}
func makeTestEndpoints(namespace, name string, eptFunc func(*v1.Endpoints)) *v1.Endpoints {
ept := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
}
eptFunc(ept)
return ept
}
func makeEndpointsMap(proxier *Proxier, allEndpoints ...*v1.Endpoints) {
for i := range allEndpoints {
proxier.OnEndpointsAdd(allEndpoints[i])
}
proxier.mu.Lock()
defer proxier.mu.Unlock()
proxier.endpointsSynced = true
}
func makeNSN(namespace, name string) types.NamespacedName {
return types.NamespacedName{Namespace: namespace, Name: name}
}
func makeServicePortName(ns, name, port string, protocol v1.Protocol) proxy.ServicePortName {
return proxy.ServicePortName{
NamespacedName: makeNSN(ns, name),
Port: port,
Protocol: protocol,
}
}
func makeServiceMap(proxier *Proxier, allServices ...*v1.Service) {
for i := range allServices {
proxier.OnServiceAdd(allServices[i])
}
proxier.mu.Lock()
defer proxier.mu.Unlock()
proxier.servicesSynced = true
}
func compareEndpointsMaps(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]*endpointsInfo) {
if len(newMap) != len(expected) {
t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap)
}
for x := range expected {
if len(newMap[x]) != len(expected[x]) {
t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x]))
} else {
for i := range expected[x] {
newEp, ok := newMap[x][i].(*endpointsInfo)
if !ok {
t.Errorf("Failed to cast endpointsInfo")
continue
}
if newEp.Endpoint != expected[x][i].Endpoint ||
newEp.IsLocal != expected[x][i].IsLocal ||
newEp.protocol != expected[x][i].protocol ||
newEp.chainName != expected[x][i].chainName {
t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newEp)
}
}
}
}
}
func Test_updateEndpointsMap(t *testing.T) {
var nodeName = testHostname
emptyEndpoint := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{}
}
unnamedPort := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
unnamedPortLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
namedPortLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
namedPort := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
namedPortRenamed := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11-2",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
namedPortRenumbered := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 22,
Protocol: v1.ProtocolUDP,
}},
}}
}
namedPortsLocalNoLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "1.1.1.2",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}, {
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}},
}}
}
multipleSubsets := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.2",
}},
Ports: []v1.EndpointPort{{
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}},
}}
}
multipleSubsetsWithLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.2",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}},
}}
}
multipleSubsetsMultiplePortsLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}, {
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.3",
}},
Ports: []v1.EndpointPort{{
Name: "p13",
Port: 13,
Protocol: v1.ProtocolUDP,
}},
}}
}
multipleSubsetsIPsPorts1 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "1.1.1.2",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}, {
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.3",
}, {
IP: "1.1.1.4",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p13",
Port: 13,
Protocol: v1.ProtocolUDP,
}, {
Name: "p14",
Port: 14,
Protocol: v1.ProtocolUDP,
}},
}}
}
multipleSubsetsIPsPorts2 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "2.2.2.1",
}, {
IP: "2.2.2.2",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p21",
Port: 21,
Protocol: v1.ProtocolUDP,
}, {
Name: "p22",
Port: 22,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexBefore1 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexBefore2 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "2.2.2.2",
NodeName: &nodeName,
}, {
IP: "2.2.2.22",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p22",
Port: 22,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "2.2.2.3",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p23",
Port: 23,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexBefore4 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "4.4.4.4",
NodeName: &nodeName,
}, {
IP: "4.4.4.5",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p44",
Port: 44,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "4.4.4.6",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p45",
Port: 45,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexAfter1 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "1.1.1.11",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.2",
}},
Ports: []v1.EndpointPort{{
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}, {
Name: "p122",
Port: 122,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexAfter3 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "3.3.3.3",
}},
Ports: []v1.EndpointPort{{
Name: "p33",
Port: 33,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexAfter4 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "4.4.4.4",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p44",
Port: 44,
Protocol: v1.ProtocolUDP,
}},
}}
}
testCases := []struct {
// previousEndpoints and currentEndpoints are used to call appropriate
// handlers OnEndpoints* (based on whether corresponding values are nil
// or non-nil) and must be of equal length.
previousEndpoints []*v1.Endpoints
currentEndpoints []*v1.Endpoints
oldEndpoints map[proxy.ServicePortName][]*endpointsInfo
expectedResult map[proxy.ServicePortName][]*endpointsInfo
expectedStaleEndpoints []proxy.ServiceEndpoint
expectedStaleServiceNames map[proxy.ServicePortName]bool
expectedHealthchecks map[types.NamespacedName]int
}{{
// Case[0]: nothing
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[1]: no change, unnamed port
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPort),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPort),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[2]: no change, named port, local
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortLocal),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortLocal),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, {
// Case[3]: no change, multiple subsets
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsets),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsets),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[4]: no change, multiple subsets, multiple ports, local
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsMultiplePortsLocal),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsMultiplePortsLocal),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, {
// Case[5]: no change, multiple endpoints, subsets, IPs, and ports
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsIPsPorts1),
makeTestEndpoints("ns2", "ep2", multipleSubsetsIPsPorts2),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsIPsPorts1),
makeTestEndpoints("ns2", "ep2", multipleSubsetsIPsPorts2),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.4:13", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.4:14", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.2:21", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.4:13", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.4:14", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.2:21", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 2,
makeNSN("ns2", "ep2"): 1,
},
}, {
// Case[6]: add an Endpoints
previousEndpoints: []*v1.Endpoints{
nil,
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPortLocal),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, {
// Case[7]: remove an Endpoints
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPortLocal),
},
currentEndpoints: []*v1.Endpoints{
nil,
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{},
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
Endpoint: "1.1.1.1:11",
ServicePortName: makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[8]: add an IP and port
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortsLocalNoLocal),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, {
// Case[9]: remove an IP and port
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortsLocalNoLocal),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
Endpoint: "1.1.1.2:11",
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
}, {
Endpoint: "1.1.1.1:12",
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
}, {
Endpoint: "1.1.1.2:12",
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[10]: add a subset
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsWithLocal),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, {
// Case[11]: remove a subset
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsets),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
Endpoint: "1.1.1.2:12",
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[12]: rename a port
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortRenamed),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
Endpoint: "1.1.1.1:11",
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[13]: renumber a port
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortRenumbered),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
Endpoint: "1.1.1.1:11",
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[14]: complex add and remove
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", complexBefore1),
makeTestEndpoints("ns2", "ep2", complexBefore2),
nil,
makeTestEndpoints("ns4", "ep4", complexBefore4),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", complexAfter1),
nil,
makeTestEndpoints("ns3", "ep3", complexAfter3),
makeTestEndpoints("ns4", "ep4", complexAfter4),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.22:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.3:23", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "4.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "4.4.4.5:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "4.4.4.6:45", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.11:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:122", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "3.3.3.3:33", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "4.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
Endpoint: "2.2.2.2:22",
ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
}, {
Endpoint: "2.2.2.22:22",
ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
}, {
Endpoint: "2.2.2.3:23",
ServicePortName: makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP),
}, {
Endpoint: "4.4.4.5:44",
ServicePortName: makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP),
}, {
Endpoint: "4.4.4.6:45",
ServicePortName: makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): true,
makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns4", "ep4"): 1,
},
}, {
// Case[15]: change from 0 endpoint address to 1 unnamed port
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", emptyEndpoint),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPort),
},
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): {
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
},
},
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{},
},
}
for tci, tc := range testCases {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
fp.hostname = nodeName
// First check that after adding all previous versions of endpoints,
// the fp.oldEndpoints is as we expect.
for i := range tc.previousEndpoints {
if tc.previousEndpoints[i] != nil {
fp.OnEndpointsAdd(tc.previousEndpoints[i])
}
}
fp.endpointsMap.Update(fp.endpointsChanges)
compareEndpointsMaps(t, tci, fp.endpointsMap, tc.oldEndpoints)
// Now let's call appropriate handlers to get to state we want to be.
if len(tc.previousEndpoints) != len(tc.currentEndpoints) {
t.Fatalf("[%d] different lengths of previous and current endpoints", tci)
continue
}
for i := range tc.previousEndpoints {
prev, curr := tc.previousEndpoints[i], tc.currentEndpoints[i]
switch {
case prev == nil:
fp.OnEndpointsAdd(curr)
case curr == nil:
fp.OnEndpointsDelete(prev)
default:
fp.OnEndpointsUpdate(prev, curr)
}
}
result := fp.endpointsMap.Update(fp.endpointsChanges)
newMap := fp.endpointsMap
compareEndpointsMaps(t, tci, newMap, tc.expectedResult)
if len(result.StaleEndpoints) != len(tc.expectedStaleEndpoints) {
t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedStaleEndpoints), len(result.StaleEndpoints), result.StaleEndpoints)
}
for _, x := range tc.expectedStaleEndpoints {
found := false
for _, stale := range result.StaleEndpoints {
if stale == x {
found = true
break
}
}
if !found {
t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.StaleEndpoints)
}
}
if len(result.StaleServiceNames) != len(tc.expectedStaleServiceNames) {
t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedStaleServiceNames), len(result.StaleServiceNames), result.StaleServiceNames)
}
for svcName := range tc.expectedStaleServiceNames {
found := false
for _, stale := range result.StaleServiceNames {
if stale == svcName {
found = true
}
}
if !found {
t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.StaleServiceNames)
}
}
if !reflect.DeepEqual(result.HCEndpointsLocalIPSize, tc.expectedHealthchecks) {
t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, result.HCEndpointsLocalIPSize)
}
}
}
// The majority of EndpointSlice specific tests are not iptables specific and focus on
// the shared EndpointChangeTracker and EndpointSliceCache. This test ensures that the
// iptables proxier supports translating EndpointSlices to iptables output.
func TestEndpointSliceE2E(t *testing.T) {
expectedIPTablesWithSlice := `*filter
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-SERVICES - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
:KUBE-SEP-VLJB2F747S6W7EX4 - [0:0]
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-XGJFVO3L2O5SRFNT
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -s 10.0.1.4/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.4:80
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
`
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, true)
fp.OnServiceSynced()
fp.OnEndpointsSynced()
fp.OnEndpointSlicesSynced()
serviceName := "svc1"
namespaceName := "ns1"
fp.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
Spec: v1.ServiceSpec{
ClusterIP: "172.20.1.1",
Selector: map[string]string{"foo": "bar"},
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), Protocol: v1.ProtocolTCP}},
},
})
tcpProtocol := v1.ProtocolTCP
endpointSlice := &discovery.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-1", serviceName),
Namespace: namespaceName,
Labels: map[string]string{discovery.LabelServiceName: serviceName},
},
Ports: []discovery.EndpointPort{{
Name: utilpointer.StringPtr(""),
Port: utilpointer.Int32Ptr(80),
Protocol: &tcpProtocol,
}},
AddressType: discovery.AddressTypeIPv4,
Endpoints: []discovery.Endpoint{{
Addresses: []string{"10.0.1.1"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
}, {
Addresses: []string{"10.0.1.2"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": "node2"},
}, {
Addresses: []string{"10.0.1.3"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": "node3"},
}, {
Addresses: []string{"10.0.1.4"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(false)},
Topology: map[string]string{"kubernetes.io/hostname": "node4"},
}},
}
fp.OnEndpointSliceAdd(endpointSlice)
fp.syncProxyRules()
assert.Equal(t, expectedIPTablesWithSlice, fp.iptablesData.String())
fp.OnEndpointSliceDelete(endpointSlice)
fp.syncProxyRules()
assert.NotEqual(t, expectedIPTablesWithSlice, fp.iptablesData.String())
}
func TestHealthCheckNodePortE2E(t *testing.T) {
expectedIPTables := `*filter
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
-A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-SERVICES - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
:KUBE-SEP-VLJB2F747S6W7EX4 - [0:0]
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-NODEPORTS -m comment --comment ns1/svc1 -m tcp -p tcp --dport 30010 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-NODEPORTS -m comment --comment ns1/svc1 -m tcp -p tcp --dport 30010 -j KUBE-XLB-AQI2S6QIMU7PVVRP
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-XGJFVO3L2O5SRFNT
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -s 10.0.1.4/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.4:80
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/24 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Balancing rule 0 for ns1/svc1" -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
`
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, true)
fp.OnServiceSynced()
fp.OnEndpointsSynced()
fp.OnEndpointSlicesSynced()
serviceName := "svc1"
namespaceName := "ns1"
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
Spec: v1.ServiceSpec{
ClusterIP: "172.20.1.1",
Selector: map[string]string{"foo": "bar"},
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), NodePort: 30010, Protocol: v1.ProtocolTCP}},
Type: "LoadBalancer",
HealthCheckNodePort: 30000,
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
},
}
fp.OnServiceAdd(svc)
tcpProtocol := v1.ProtocolTCP
endpointSlice := &discovery.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-1", serviceName),
Namespace: namespaceName,
Labels: map[string]string{discovery.LabelServiceName: serviceName},
},
Ports: []discovery.EndpointPort{{
Name: utilpointer.StringPtr(""),
Port: utilpointer.Int32Ptr(80),
Protocol: &tcpProtocol,
}},
AddressType: discovery.AddressTypeIPv4,
Endpoints: []discovery.Endpoint{{
Addresses: []string{"10.0.1.1"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
}, {
Addresses: []string{"10.0.1.2"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": "node2"},
}, {
Addresses: []string{"10.0.1.3"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": "node3"},
}, {
Addresses: []string{"10.0.1.4"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(false)},
Topology: map[string]string{"kubernetes.io/hostname": "node4"},
}},
}
fp.OnEndpointSliceAdd(endpointSlice)
fp.syncProxyRules()
assert.Equal(t, expectedIPTables, fp.iptablesData.String())
fp.OnServiceDelete(svc)
fp.syncProxyRules()
assert.NotEqual(t, expectedIPTables, fp.iptablesData.String())
}
func TestProxierDeleteNodePortStaleUDP(t *testing.T) {
fcmd := fakeexec.FakeCmd{}
fexec := fakeexec.FakeExec{
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
}
execFunc := func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
}
cmdOutput := "1 flow entries have been deleted"
cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, nil }
// Delete ClusterIP entries
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
fexec.CommandScript = append(fexec.CommandScript, execFunc)
// Delete NodePort entries
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
fexec.CommandScript = append(fexec.CommandScript, execFunc)
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
fp.exec = &fexec
svcIP := "10.20.30.41"
svcPort := 80
nodePort := 31201
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolUDP,
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolUDP,
NodePort: int32(nodePort),
}}
}),
)
makeEndpointsMap(fp)
fp.syncProxyRules()
if fexec.CommandCalls != 0 {
t.Fatalf("Created service without endpoints must not clear conntrack entries")
}
epIP := "10.180.0.1"
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIP,
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolUDP,
}},
}}
}),
)
fp.syncProxyRules()
if fexec.CommandCalls != 2 {
t.Fatalf("Updated UDP service with new endpoints must clear UDP entries")
}
// Delete ClusterIP Conntrack entries
expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s -p %s", svcIP, strings.ToLower(string((v1.ProtocolUDP))))
actualCommand := strings.Join(fcmd.CombinedOutputLog[0], " ")
if actualCommand != expectCommand {
t.Errorf("Expected command: %s, but executed %s", expectCommand, actualCommand)
}
// Delete NodePort Conntrack entrie
expectCommand = fmt.Sprintf("conntrack -D -p %s --dport %d", strings.ToLower(string((v1.ProtocolUDP))), nodePort)
actualCommand = strings.Join(fcmd.CombinedOutputLog[1], " ")
if actualCommand != expectCommand {
t.Errorf("Expected command: %s, but executed %s", expectCommand, actualCommand)
}
}
func TestProxierMetricsIptablesTotalRules(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
metrics.RegisterMetrics()
svcIP := "10.20.30.41"
svcPort := 80
nodePort := 31201
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}
makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(nodePort),
}}
}),
)
makeEndpointsMap(fp)
fp.syncProxyRules()
nFilterRules, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)))
if err != nil {
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
}
// -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m udp -p udp -d 10.20.30.41/32 --dport 80 -j REJECT
// -A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m addrtype --dst-type LOCAL -m udp -p udp --dport 31201 -j REJECT
// -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
// COMMIT
if nFilterRules != 7.0 {
t.Fatalf("Wrong number of filter rule: expected 7 received %f", nFilterRules)
}
nNatRules, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)))
if err != nil {
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
}
// rules -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
// -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
// -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
// -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
// -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
// COMMIT
if nNatRules != 6.0 {
t.Fatalf("Wrong number of nat rules: expected 6 received %f", nNatRules)
}
makeEndpointsMap(fp,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{
{
IP: "10.0.0.2",
},
{
IP: "10.0.0.5",
},
},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
}},
}}
}),
)
fp.syncProxyRules()
nFilterRules, err = testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)))
if err != nil {
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
}
// -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
// COMMIT
if nFilterRules != 5.0 {
t.Fatalf("Wrong number of filter rule: expected 5 received %f", nFilterRules)
}
nNatRules, err = testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)))
if err != nil {
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
}
// -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
// -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
// -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
// -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
// -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m udp -p udp -d 10.20.30.41/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
// -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m udp -p udp -d 10.20.30.41/32 --dport 80 -j KUBE-SVC-OJWW7NSBVZTDHXNW
// -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m udp -p udp --dport 31201 -j KUBE-MARK-MASQ
// -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m udp -p udp --dport 31201 -j KUBE-SVC-OJWW7NSBVZTDHXNW
// -A KUBE-SVC-OJWW7NSBVZTDHXNW -m comment --comment ns1/svc1:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-AMT2SNW3YUNHJFJG
// -A KUBE-SEP-AMT2SNW3YUNHJFJG -m comment --comment ns1/svc1:p80 -s 10.0.0.2/32 -j KUBE-MARK-MASQ
// -A KUBE-SEP-AMT2SNW3YUNHJFJG -m comment --comment ns1/svc1:p80 -m udp -p udp -j DNAT --to-destination 10.0.0.2:80
// -A KUBE-SVC-OJWW7NSBVZTDHXNW -m comment --comment ns1/svc1:p80 -j KUBE-SEP-OUFLBLJVR33W4FIZ
// -A KUBE-SEP-OUFLBLJVR33W4FIZ -m comment --comment ns1/svc1:p80 -s 10.0.0.5/32 -j KUBE-MARK-MASQ
// -A KUBE-SEP-OUFLBLJVR33W4FIZ -m comment --comment ns1/svc1:p80 -m udp -p udp -j DNAT --to-destination 10.0.0.5:80
// -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
// COMMIT
if nNatRules != 16.0 {
t.Fatalf("Wrong number of nat rules: expected 16 received %f", nNatRules)
}
}
// TODO(thockin): add *more* tests for syncProxyRules() or break it down further and test the pieces.
// This test ensures that the iptables proxier supports translating Endpoints to
// iptables output when internalTrafficPolicy is specified
func TestInternalTrafficPolicyE2E(t *testing.T) {
type endpoint struct {
ip string
hostname string
}
cluster := v1.ServiceInternalTrafficPolicyCluster
local := v1.ServiceInternalTrafficPolicyLocal
clusterExpectedIPTables := `*filter
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-SERVICES - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-XGJFVO3L2O5SRFNT
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
`
testCases := []struct {
name string
internalTrafficPolicy *v1.ServiceInternalTrafficPolicyType
featureGateOn bool
endpoints []endpoint
expectEndpointRule bool
expectedIPTablesWithSlice string
}{
{
name: "internalTrafficPolicy is cluster",
internalTrafficPolicy: &cluster,
featureGateOn: true,
endpoints: []endpoint{
{"10.0.1.1", testHostname},
{"10.0.1.2", "host1"},
{"10.0.1.3", "host2"},
},
expectEndpointRule: true,
expectedIPTablesWithSlice: clusterExpectedIPTables,
},
{
name: "internalTrafficPolicy is local and there is non-zero local endpoints",
internalTrafficPolicy: &local,
featureGateOn: true,
endpoints: []endpoint{
{"10.0.1.1", testHostname},
{"10.0.1.2", "host1"},
{"10.0.1.3", "host2"},
},
expectEndpointRule: true,
expectedIPTablesWithSlice: `*filter
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-SERVICES - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
`,
},
{
name: "internalTrafficPolicy is local and there is zero local endpoint",
internalTrafficPolicy: &local,
featureGateOn: true,
endpoints: []endpoint{
{"10.0.1.1", "host0"},
{"10.0.1.2", "host1"},
{"10.0.1.3", "host2"},
},
expectEndpointRule: false,
expectedIPTablesWithSlice: `*filter
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
-A KUBE-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 -j REJECT
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-SERVICES - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-MARK-MASQ - [0:0]
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
`,
},
{
name: "internalTrafficPolicy is local and there is non-zero local endpoint with feature gate off",
internalTrafficPolicy: &local,
featureGateOn: false,
endpoints: []endpoint{
{"10.0.1.1", testHostname},
{"10.0.1.2", "host1"},
{"10.0.1.3", "host2"},
},
expectEndpointRule: false,
expectedIPTablesWithSlice: clusterExpectedIPTables,
},
}
for _, tc := range testCases {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ServiceInternalTrafficPolicy, tc.featureGateOn)()
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, true)
fp.OnServiceSynced()
fp.OnEndpointsSynced()
fp.OnEndpointSlicesSynced()
serviceName := "svc1"
namespaceName := "ns1"
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
Spec: v1.ServiceSpec{
ClusterIP: "172.20.1.1",
Selector: map[string]string{"foo": "bar"},
Ports: []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP}},
},
}
if tc.internalTrafficPolicy != nil {
svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
}
fp.OnServiceAdd(svc)
tcpProtocol := v1.ProtocolTCP
endpointSlice := &discovery.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-1", serviceName),
Namespace: namespaceName,
Labels: map[string]string{discovery.LabelServiceName: serviceName},
},
Ports: []discovery.EndpointPort{{
Name: utilpointer.StringPtr(""),
Port: utilpointer.Int32Ptr(80),
Protocol: &tcpProtocol,
}},
AddressType: discovery.AddressTypeIPv4,
}
for _, ep := range tc.endpoints {
endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
Addresses: []string{ep.ip},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": ep.hostname},
})
}
fp.OnEndpointSliceAdd(endpointSlice)
fp.syncProxyRules()
assert.Equal(t, tc.expectedIPTablesWithSlice, fp.iptablesData.String())
if tc.expectEndpointRule {
fp.OnEndpointSliceDelete(endpointSlice)
fp.syncProxyRules()
assert.NotEqual(t, tc.expectedIPTablesWithSlice, fp.iptablesData.String())
}
}
}
// Test_EndpointSliceReadyAndTerminatingLocal tests that when there are local ready and ready + terminating
// endpoints, only the ready endpoints are used.
func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) {
expectedIPTablesWithSlice := `*filter
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-SERVICES - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
:KUBE-SEP-VLJB2F747S6W7EX4 - [0:0]
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-EQCHZ7S2PJ72OHAY
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -s 10.0.1.4/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.4:80
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/24 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Balancing rule 0 for ns1/svc1" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Balancing rule 1 for ns1/svc1" -j KUBE-SEP-IO5XOSKPAXIFQXAJ
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
`
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, true)
fp.OnServiceSynced()
fp.OnEndpointsSynced()
fp.OnEndpointSlicesSynced()
serviceName := "svc1"
namespaceName := "ns1"
fp.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
Spec: v1.ServiceSpec{
ClusterIP: "172.20.1.1",
Type: v1.ServiceTypeNodePort,
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
Selector: map[string]string{"foo": "bar"},
Ports: []v1.ServicePort{
{
Name: "",
TargetPort: intstr.FromInt(80),
Protocol: v1.ProtocolTCP,
},
},
},
})
tcpProtocol := v1.ProtocolTCP
endpointSlice := &discovery.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-1", serviceName),
Namespace: namespaceName,
Labels: map[string]string{discovery.LabelServiceName: serviceName},
},
Ports: []discovery.EndpointPort{{
Name: utilpointer.StringPtr(""),
Port: utilpointer.Int32Ptr(80),
Protocol: &tcpProtocol,
}},
AddressType: discovery.AddressTypeIPv4,
Endpoints: []discovery.Endpoint{
{
Addresses: []string{"10.0.1.1"},
Conditions: discovery.EndpointConditions{
Ready: utilpointer.BoolPtr(true),
Serving: utilpointer.BoolPtr(true),
Terminating: utilpointer.BoolPtr(false),
},
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
},
{
Addresses: []string{"10.0.1.2"},
Conditions: discovery.EndpointConditions{
Ready: utilpointer.BoolPtr(true),
Serving: utilpointer.BoolPtr(true),
Terminating: utilpointer.BoolPtr(false),
},
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
},
{
// this endpoint should be ignored for node ports since there are ready non-terminating endpoints
Addresses: []string{"10.0.1.3"},
Conditions: discovery.EndpointConditions{
Ready: utilpointer.BoolPtr(false),
Serving: utilpointer.BoolPtr(true),
Terminating: utilpointer.BoolPtr(true),
},
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
},
{
// this endpoint should be ignored for node ports since there are ready non-terminating endpoints
Addresses: []string{"10.0.1.4"},
Conditions: discovery.EndpointConditions{
Ready: utilpointer.BoolPtr(false),
Serving: utilpointer.BoolPtr(false),
Terminating: utilpointer.BoolPtr(true),
},
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
},
{
// this endpoint should be ignored for node ports since it's not local
Addresses: []string{"10.0.1.5"},
Conditions: discovery.EndpointConditions{
Ready: utilpointer.BoolPtr(true),
Serving: utilpointer.BoolPtr(true),
Terminating: utilpointer.BoolPtr(false),
},
Topology: map[string]string{"kubernetes.io/hostname": "host-1"},
},
},
}
fp.OnEndpointSliceAdd(endpointSlice)
fp.syncProxyRules()
t.Log(fp.iptablesData.String())
assert.Equal(t, expectedIPTablesWithSlice, fp.iptablesData.String())
fp.OnEndpointSliceDelete(endpointSlice)
fp.syncProxyRules()
assert.NotEqual(t, expectedIPTablesWithSlice, fp.iptablesData.String())
}
// Test_EndpointSliceOnlyReadyTerminatingLocal tests that when there are only local ready terminating
// endpoints, we fall back to those endpoints.
func Test_EndpointSliceOnlyReadyTerminatingLocal(t *testing.T) {
expectedIPTablesWithSlice := `*filter
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-SERVICES - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
:KUBE-SEP-VLJB2F747S6W7EX4 - [0:0]
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-VLJB2F747S6W7EX4
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -s 10.0.1.4/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.4:80
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/24 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Balancing rule 0 for ns1/svc1" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Balancing rule 1 for ns1/svc1" -j KUBE-SEP-IO5XOSKPAXIFQXAJ
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
`
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, true)
fp.OnServiceSynced()
fp.OnEndpointsSynced()
fp.OnEndpointSlicesSynced()
serviceName := "svc1"
namespaceName := "ns1"
fp.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
Spec: v1.ServiceSpec{
ClusterIP: "172.20.1.1",
Type: v1.ServiceTypeNodePort,
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
Selector: map[string]string{"foo": "bar"},
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), Protocol: v1.ProtocolTCP}},
},
})
tcpProtocol := v1.ProtocolTCP
endpointSlice := &discovery.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-1", serviceName),
Namespace: namespaceName,
Labels: map[string]string{discovery.LabelServiceName: serviceName},
},
Ports: []discovery.EndpointPort{{
Name: utilpointer.StringPtr(""),
Port: utilpointer.Int32Ptr(80),
Protocol: &tcpProtocol,
}},
AddressType: discovery.AddressTypeIPv4,
Endpoints: []discovery.Endpoint{
{
Addresses: []string{"10.0.1.1"},
Conditions: discovery.EndpointConditions{
Ready: utilpointer.BoolPtr(false),
Serving: utilpointer.BoolPtr(true),
Terminating: utilpointer.BoolPtr(true),
},
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
},
{
Addresses: []string{"10.0.1.2"},
Conditions: discovery.EndpointConditions{
Ready: utilpointer.BoolPtr(false),
Serving: utilpointer.BoolPtr(true),
Terminating: utilpointer.BoolPtr(true),
},
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
},
{
// this endpoint should be ignored since it is a not ready terminating endpoint
Addresses: []string{"10.0.1.3"},
Conditions: discovery.EndpointConditions{
Ready: utilpointer.BoolPtr(false),
Serving: utilpointer.BoolPtr(false),
Terminating: utilpointer.BoolPtr(true),
},
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
},
{
// this endpoint should be ignored since it is on another host.
Addresses: []string{"10.0.1.4"},
Conditions: discovery.EndpointConditions{
Ready: utilpointer.BoolPtr(true),
Serving: utilpointer.BoolPtr(true),
Terminating: utilpointer.BoolPtr(false),
},
Topology: map[string]string{"kubernetes.io/hostname": "another-host"},
},
},
}
fp.OnEndpointSliceAdd(endpointSlice)
fp.syncProxyRules()
t.Log(fp.iptablesData.String())
assert.Equal(t, expectedIPTablesWithSlice, fp.iptablesData.String())
fp.OnEndpointSliceDelete(endpointSlice)
fp.syncProxyRules()
assert.NotEqual(t, expectedIPTablesWithSlice, fp.iptablesData.String())
}
|
package statsd
import (
"context"
"fmt"
"math/rand"
"runtime"
"sync"
"testing"
"time"
"github.com/atlassian/gostatsd"
"github.com/atlassian/gostatsd/pkg/stats"
"github.com/ash2k/stager/wait"
"github.com/stretchr/testify/assert"
)
type testAggregator struct {
agrNumber int
af *testAggregatorFactory
gostatsd.MetricMap
}
func (a *testAggregator) TrackMetrics(statser stats.Statser) {
}
func (a *testAggregator) Receive(m ...*gostatsd.Metric) {
a.af.Mutex.Lock()
defer a.af.Mutex.Unlock()
a.af.receiveInvocations[a.agrNumber] += len(m)
}
func (a *testAggregator) ReceiveMap(mm *gostatsd.MetricMap) {
a.af.Mutex.Lock()
defer a.af.Mutex.Unlock()
a.af.receiveMapInvocations[a.agrNumber]++
}
func (a *testAggregator) Flush(interval time.Duration) {
a.af.Mutex.Lock()
defer a.af.Mutex.Unlock()
a.af.flushInvocations[a.agrNumber]++
}
func (a *testAggregator) Process(f ProcessFunc) {
a.af.Mutex.Lock()
defer a.af.Mutex.Unlock()
a.af.processInvocations[a.agrNumber]++
f(&a.MetricMap)
}
func (a *testAggregator) Reset() {
a.af.Mutex.Lock()
defer a.af.Mutex.Unlock()
a.af.resetInvocations[a.agrNumber]++
}
type testAggregatorFactory struct {
sync.Mutex
receiveInvocations map[int]int
receiveMapInvocations map[int]int
flushInvocations map[int]int
processInvocations map[int]int
resetInvocations map[int]int
numAgrs int
}
func (af *testAggregatorFactory) Create() Aggregator {
agrNumber := af.numAgrs
af.numAgrs++
af.receiveInvocations[agrNumber] = 0
af.receiveMapInvocations[agrNumber] = 0
af.flushInvocations[agrNumber] = 0
af.processInvocations[agrNumber] = 0
af.resetInvocations[agrNumber] = 0
agr := testAggregator{
agrNumber: agrNumber,
af: af,
}
agr.Counters = gostatsd.Counters{}
agr.Timers = gostatsd.Timers{}
agr.Gauges = gostatsd.Gauges{}
agr.Sets = gostatsd.Sets{}
return &agr
}
func newTestFactory() *testAggregatorFactory {
return &testAggregatorFactory{
receiveInvocations: make(map[int]int),
receiveMapInvocations: make(map[int]int),
flushInvocations: make(map[int]int),
processInvocations: make(map[int]int),
resetInvocations: make(map[int]int),
}
}
func TestNewBackendHandlerShouldCreateCorrectNumberOfWorkers(t *testing.T) {
t.Parallel()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
n := r.Intn(5) + 1
factory := newTestFactory()
h := NewBackendHandler(nil, 0, n, 1, factory)
assert.Equal(t, n, len(h.workers))
assert.Equal(t, n, factory.numAgrs)
}
func TestRunShouldReturnWhenContextCancelled(t *testing.T) {
t.Parallel()
h := NewBackendHandler(nil, 0, 5, 1, newTestFactory())
ctx, cancelFunc := context.WithTimeout(context.Background(), 1*time.Second)
defer cancelFunc()
h.Run(ctx)
}
func TestDispatchMetricsShouldDistributeMetrics(t *testing.T) {
t.Parallel()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
n := r.Intn(5) + 1
factory := newTestFactory()
h := NewBackendHandler(nil, 0, n, 10, factory)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
var wgFinish wait.Group
wgFinish.StartWithContext(ctx, h.Run)
numMetrics := r.Intn(1000) + n*10
var wg sync.WaitGroup
wg.Add(numMetrics)
for i := 0; i < numMetrics; i++ {
m := &gostatsd.Metric{
Type: gostatsd.COUNTER,
Name: fmt.Sprintf("counter.metric.%d", r.Int63()),
Tags: nil,
Value: r.Float64(),
}
go func() {
defer wg.Done()
h.DispatchMetrics(ctx, []*gostatsd.Metric{m})
}()
}
wg.Wait() // Wait for all metrics to be dispatched
cancelFunc() // After all metrics have been dispatched, we signal dispatcher to shut down
wgFinish.Wait() // Wait for dispatcher to shutdown
receiveInvocations := getTotalInvocations(factory.receiveInvocations)
assert.Equal(t, numMetrics, receiveInvocations)
for agrNum, count := range factory.receiveInvocations {
if count == 0 {
t.Errorf("aggregator %d was never invoked", agrNum)
} else {
t.Logf("aggregator %d was invoked %d time(s)", agrNum, count)
}
}
}
func TestDispatchMetricMapShouldDistributeMetrics(t *testing.T) {
t.Parallel()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
numAggregators := r.Intn(5) + 1
factory := newTestFactory()
// use a sync channel to force the workers to process events before the context is cancelled
h := NewBackendHandler(nil, 0, numAggregators, 0, factory)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
var wgFinish wait.Group
wgFinish.StartWithContext(ctx, h.Run)
mm := gostatsd.NewMetricMap()
for i := 0; i < numAggregators*100; i++ {
m := &gostatsd.Metric{
Type: gostatsd.COUNTER,
Name: fmt.Sprintf("counter.metric.%d", r.Int63()),
Tags: nil,
Value: r.Float64(),
}
m.TagsKey = m.FormatTagsKey()
mm.Receive(m)
}
h.DispatchMetricMap(ctx, mm)
cancelFunc() // After dispatch, we signal dispatcher to shut down
wgFinish.Wait() // Wait for dispatcher to shutdown
for agrNum, count := range factory.receiveMapInvocations {
assert.NotZerof(t, count, "aggregator=%d", agrNum)
if count == 0 {
t.Errorf("aggregator %d was never invoked", agrNum)
for idx, mmSplit := range mm.Split(numAggregators) {
fmt.Printf("aggr %d, names %d\n", idx, len(mmSplit.Counters))
}
} else {
t.Logf("aggregator %d was invoked %d time(s)", agrNum, count)
}
}
}
func getTotalInvocations(inv map[int]int) int {
var counter int
for _, i := range inv {
counter += i
}
return counter
}
func BenchmarkBackendHandler(b *testing.B) {
rand.Seed(time.Now().UnixNano())
factory := newTestFactory()
h := NewBackendHandler(nil, 0, runtime.NumCPU(), 10, factory)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
var wgFinish wait.Group
wgFinish.StartWithContext(ctx, h.Run)
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m := &gostatsd.Metric{
Type: gostatsd.COUNTER,
Name: fmt.Sprintf("counter.metric.%d", rand.Int63()),
Tags: nil,
Value: rand.Float64(),
}
h.DispatchMetrics(ctx, []*gostatsd.Metric{m})
}
})
cancelFunc() // After all metrics have been dispatched, we signal dispatcher to shut down
wgFinish.Wait() // Wait for dispatcher to shutdown
}
Fixes a race in TestDispatchMetricsShouldDistributeMetrics (#273)
This fixes a race condition by forcing everything through an
unbuffered channel, ensuring it will be processed before the
test checks the results.
package statsd
import (
"context"
"fmt"
"math/rand"
"runtime"
"sync"
"testing"
"time"
"github.com/atlassian/gostatsd"
"github.com/atlassian/gostatsd/pkg/stats"
"github.com/ash2k/stager/wait"
"github.com/stretchr/testify/assert"
)
type testAggregator struct {
agrNumber int
af *testAggregatorFactory
gostatsd.MetricMap
}
func (a *testAggregator) TrackMetrics(statser stats.Statser) {
}
func (a *testAggregator) Receive(m ...*gostatsd.Metric) {
a.af.Mutex.Lock()
defer a.af.Mutex.Unlock()
a.af.receiveInvocations[a.agrNumber] += len(m)
}
func (a *testAggregator) ReceiveMap(mm *gostatsd.MetricMap) {
a.af.Mutex.Lock()
defer a.af.Mutex.Unlock()
a.af.receiveMapInvocations[a.agrNumber]++
}
func (a *testAggregator) Flush(interval time.Duration) {
a.af.Mutex.Lock()
defer a.af.Mutex.Unlock()
a.af.flushInvocations[a.agrNumber]++
}
func (a *testAggregator) Process(f ProcessFunc) {
a.af.Mutex.Lock()
defer a.af.Mutex.Unlock()
a.af.processInvocations[a.agrNumber]++
f(&a.MetricMap)
}
func (a *testAggregator) Reset() {
a.af.Mutex.Lock()
defer a.af.Mutex.Unlock()
a.af.resetInvocations[a.agrNumber]++
}
type testAggregatorFactory struct {
sync.Mutex
receiveInvocations map[int]int
receiveMapInvocations map[int]int
flushInvocations map[int]int
processInvocations map[int]int
resetInvocations map[int]int
numAgrs int
}
func (af *testAggregatorFactory) Create() Aggregator {
agrNumber := af.numAgrs
af.numAgrs++
af.receiveInvocations[agrNumber] = 0
af.receiveMapInvocations[agrNumber] = 0
af.flushInvocations[agrNumber] = 0
af.processInvocations[agrNumber] = 0
af.resetInvocations[agrNumber] = 0
agr := testAggregator{
agrNumber: agrNumber,
af: af,
}
agr.Counters = gostatsd.Counters{}
agr.Timers = gostatsd.Timers{}
agr.Gauges = gostatsd.Gauges{}
agr.Sets = gostatsd.Sets{}
return &agr
}
func newTestFactory() *testAggregatorFactory {
return &testAggregatorFactory{
receiveInvocations: make(map[int]int),
receiveMapInvocations: make(map[int]int),
flushInvocations: make(map[int]int),
processInvocations: make(map[int]int),
resetInvocations: make(map[int]int),
}
}
func TestNewBackendHandlerShouldCreateCorrectNumberOfWorkers(t *testing.T) {
t.Parallel()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
n := r.Intn(5) + 1
factory := newTestFactory()
h := NewBackendHandler(nil, 0, n, 1, factory)
assert.Equal(t, n, len(h.workers))
assert.Equal(t, n, factory.numAgrs)
}
func TestRunShouldReturnWhenContextCancelled(t *testing.T) {
t.Parallel()
h := NewBackendHandler(nil, 0, 5, 1, newTestFactory())
ctx, cancelFunc := context.WithTimeout(context.Background(), 1*time.Second)
defer cancelFunc()
h.Run(ctx)
}
func TestDispatchMetricsShouldDistributeMetrics(t *testing.T) {
t.Parallel()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
n := r.Intn(5) + 1
factory := newTestFactory()
// use a sync channel (perWorkerBufferSize = 0) to force the workers to process events before the context is cancelled
h := NewBackendHandler(nil, 0, n, 0, factory)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
var wgFinish wait.Group
wgFinish.StartWithContext(ctx, h.Run)
numMetrics := r.Intn(1000) + n*10
var wg sync.WaitGroup
wg.Add(numMetrics)
for i := 0; i < numMetrics; i++ {
m := &gostatsd.Metric{
Type: gostatsd.COUNTER,
Name: fmt.Sprintf("counter.metric.%d", r.Int63()),
Tags: nil,
Value: r.Float64(),
}
go func() {
defer wg.Done()
h.DispatchMetrics(ctx, []*gostatsd.Metric{m})
}()
}
wg.Wait() // Wait for all metrics to be dispatched
cancelFunc() // After all metrics have been dispatched, we signal dispatcher to shut down
wgFinish.Wait() // Wait for dispatcher to shutdown
receiveInvocations := getTotalInvocations(factory.receiveInvocations)
assert.Equal(t, numMetrics, receiveInvocations)
for agrNum, count := range factory.receiveInvocations {
if count == 0 {
t.Errorf("aggregator %d was never invoked", agrNum)
} else {
t.Logf("aggregator %d was invoked %d time(s)", agrNum, count)
}
}
}
func TestDispatchMetricMapShouldDistributeMetrics(t *testing.T) {
t.Parallel()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
numAggregators := r.Intn(5) + 1
factory := newTestFactory()
// use a sync channel (perWorkerBufferSize = 0) to force the workers to process events before the context is cancelled
h := NewBackendHandler(nil, 0, numAggregators, 0, factory)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
var wgFinish wait.Group
wgFinish.StartWithContext(ctx, h.Run)
mm := gostatsd.NewMetricMap()
for i := 0; i < numAggregators*100; i++ {
m := &gostatsd.Metric{
Type: gostatsd.COUNTER,
Name: fmt.Sprintf("counter.metric.%d", r.Int63()),
Tags: nil,
Value: r.Float64(),
}
m.TagsKey = m.FormatTagsKey()
mm.Receive(m)
}
h.DispatchMetricMap(ctx, mm)
cancelFunc() // After dispatch, we signal dispatcher to shut down
wgFinish.Wait() // Wait for dispatcher to shutdown
for agrNum, count := range factory.receiveMapInvocations {
assert.NotZerof(t, count, "aggregator=%d", agrNum)
if count == 0 {
t.Errorf("aggregator %d was never invoked", agrNum)
for idx, mmSplit := range mm.Split(numAggregators) {
fmt.Printf("aggr %d, names %d\n", idx, len(mmSplit.Counters))
}
} else {
t.Logf("aggregator %d was invoked %d time(s)", agrNum, count)
}
}
}
func getTotalInvocations(inv map[int]int) int {
var counter int
for _, i := range inv {
counter += i
}
return counter
}
func BenchmarkBackendHandler(b *testing.B) {
rand.Seed(time.Now().UnixNano())
factory := newTestFactory()
h := NewBackendHandler(nil, 0, runtime.NumCPU(), 10, factory)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
var wgFinish wait.Group
wgFinish.StartWithContext(ctx, h.Run)
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m := &gostatsd.Metric{
Type: gostatsd.COUNTER,
Name: fmt.Sprintf("counter.metric.%d", rand.Int63()),
Tags: nil,
Value: rand.Float64(),
}
h.DispatchMetrics(ctx, []*gostatsd.Metric{m})
}
})
cancelFunc() // After all metrics have been dispatched, we signal dispatcher to shut down
wgFinish.Wait() // Wait for dispatcher to shutdown
}
|
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2017 Red Hat, Inc.
*
*/
package virt_operator
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"kubevirt.io/kubevirt/pkg/certificates/triple/cert"
promv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1"
secv1 "github.com/openshift/api/security/v1"
secv1fake "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
k8sv1 "k8s.io/api/core/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
extclientfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
framework "k8s.io/client-go/tools/cache/testing"
"k8s.io/client-go/tools/record"
apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
v1 "kubevirt.io/client-go/api/v1"
promclientfake "kubevirt.io/client-go/generated/prometheus-operator/clientset/versioned/fake"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/version"
kubecontroller "kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/testutils"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/apply"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/components"
install "kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/install"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/rbac"
"kubevirt.io/kubevirt/pkg/virt-operator/util"
)
const (
Added = "added"
Updated = "updated"
Patched = "patched"
Deleted = "deleted"
)
var _ = Describe("KubeVirt Operator", func() {
var ctrl *gomock.Controller
var kvInterface *kubecli.MockKubeVirtInterface
var kvSource *framework.FakeControllerSource
var kvInformer cache.SharedIndexInformer
var apiServiceClient *install.MockAPIServiceInterface
var serviceAccountSource *framework.FakeControllerSource
var clusterRoleSource *framework.FakeControllerSource
var clusterRoleBindingSource *framework.FakeControllerSource
var roleSource *framework.FakeControllerSource
var roleBindingSource *framework.FakeControllerSource
var crdSource *framework.FakeControllerSource
var serviceSource *framework.FakeControllerSource
var deploymentSource *framework.FakeControllerSource
var daemonSetSource *framework.FakeControllerSource
var validatingWebhookSource *framework.FakeControllerSource
var mutatingWebhookSource *framework.FakeControllerSource
var apiserviceSource *framework.FakeControllerSource
var sccSource *framework.FakeControllerSource
var installStrategyConfigMapSource *framework.FakeControllerSource
var installStrategyJobSource *framework.FakeControllerSource
var infrastructurePodSource *framework.FakeControllerSource
var podDisruptionBudgetSource *framework.FakeControllerSource
var serviceMonitorSource *framework.FakeControllerSource
var namespaceSource *framework.FakeControllerSource
var prometheusRuleSource *framework.FakeControllerSource
var secretsSource *framework.FakeControllerSource
var configMapSource *framework.FakeControllerSource
var stop chan struct{}
var controller *KubeVirtController
var recorder *record.FakeRecorder
var mockQueue *testutils.MockWorkQueue
var virtClient *kubecli.MockKubevirtClient
var kubeClient *fake.Clientset
var secClient *secv1fake.FakeSecurityV1
var extClient *extclientfake.Clientset
var promClient *promclientfake.Clientset
var informers util.Informers
var stores util.Stores
NAMESPACE := "kubevirt-test"
getConfig := func(registry, version string) *util.KubeVirtDeploymentConfig {
return util.GetTargetConfigFromKV(&v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Namespace: NAMESPACE,
},
Spec: v1.KubeVirtSpec{
ImageRegistry: registry,
ImageTag: version,
},
})
}
var totalAdds int
var totalUpdates int
var totalPatches int
var totalDeletions int
var resourceChanges map[string]map[string]int
resourceCount := 53
patchCount := 34
updateCount := 20
deleteFromCache := true
addToCache := true
syncCaches := func(stop chan struct{}) {
go kvInformer.Run(stop)
go informers.ServiceAccount.Run(stop)
go informers.ClusterRole.Run(stop)
go informers.ClusterRoleBinding.Run(stop)
go informers.Role.Run(stop)
go informers.RoleBinding.Run(stop)
go informers.Crd.Run(stop)
go informers.Service.Run(stop)
go informers.Deployment.Run(stop)
go informers.DaemonSet.Run(stop)
go informers.ValidationWebhook.Run(stop)
go informers.MutatingWebhook.Run(stop)
go informers.APIService.Run(stop)
go informers.SCC.Run(stop)
go informers.InstallStrategyJob.Run(stop)
go informers.InstallStrategyConfigMap.Run(stop)
go informers.InfrastructurePod.Run(stop)
go informers.PodDisruptionBudget.Run(stop)
go informers.ServiceMonitor.Run(stop)
go informers.Namespace.Run(stop)
go informers.PrometheusRule.Run(stop)
go informers.Secrets.Run(stop)
go informers.ConfigMap.Run(stop)
Expect(cache.WaitForCacheSync(stop, kvInformer.HasSynced)).To(BeTrue())
cache.WaitForCacheSync(stop, informers.ServiceAccount.HasSynced)
cache.WaitForCacheSync(stop, informers.ClusterRole.HasSynced)
cache.WaitForCacheSync(stop, informers.ClusterRoleBinding.HasSynced)
cache.WaitForCacheSync(stop, informers.Role.HasSynced)
cache.WaitForCacheSync(stop, informers.RoleBinding.HasSynced)
cache.WaitForCacheSync(stop, informers.Crd.HasSynced)
cache.WaitForCacheSync(stop, informers.Service.HasSynced)
cache.WaitForCacheSync(stop, informers.Deployment.HasSynced)
cache.WaitForCacheSync(stop, informers.DaemonSet.HasSynced)
cache.WaitForCacheSync(stop, informers.ValidationWebhook.HasSynced)
cache.WaitForCacheSync(stop, informers.MutatingWebhook.HasSynced)
cache.WaitForCacheSync(stop, informers.APIService.HasSynced)
cache.WaitForCacheSync(stop, informers.SCC.HasSynced)
cache.WaitForCacheSync(stop, informers.InstallStrategyJob.HasSynced)
cache.WaitForCacheSync(stop, informers.InstallStrategyConfigMap.HasSynced)
cache.WaitForCacheSync(stop, informers.InfrastructurePod.HasSynced)
cache.WaitForCacheSync(stop, informers.PodDisruptionBudget.HasSynced)
cache.WaitForCacheSync(stop, informers.ServiceMonitor.HasSynced)
cache.WaitForCacheSync(stop, informers.Namespace.HasSynced)
cache.WaitForCacheSync(stop, informers.PrometheusRule.HasSynced)
cache.WaitForCacheSync(stop, informers.Secrets.HasSynced)
cache.WaitForCacheSync(stop, informers.ConfigMap.HasSynced)
}
getSCC := func() secv1.SecurityContextConstraints {
return secv1.SecurityContextConstraints{
ObjectMeta: metav1.ObjectMeta{
Name: "privileged",
},
Users: []string{
"someUser",
},
}
}
var defaultConfig *util.KubeVirtDeploymentConfig
BeforeEach(func() {
err := os.Setenv(util.OperatorImageEnvName, fmt.Sprintf("%s/virt-operator:%s", "someregistry", "v9.9.9"))
Expect(err).NotTo(HaveOccurred())
defaultConfig = getConfig("", "")
totalAdds = 0
totalUpdates = 0
totalPatches = 0
totalDeletions = 0
resourceChanges = make(map[string]map[string]int)
deleteFromCache = true
addToCache = true
stop = make(chan struct{})
ctrl = gomock.NewController(GinkgoT())
virtClient = kubecli.NewMockKubevirtClient(ctrl)
kvInterface = kubecli.NewMockKubeVirtInterface(ctrl)
apiServiceClient = install.NewMockAPIServiceInterface(ctrl)
kvInformer, kvSource = testutils.NewFakeInformerFor(&v1.KubeVirt{})
recorder = record.NewFakeRecorder(100)
informers.ServiceAccount, serviceAccountSource = testutils.NewFakeInformerFor(&k8sv1.ServiceAccount{})
stores.ServiceAccountCache = informers.ServiceAccount.GetStore()
informers.ClusterRole, clusterRoleSource = testutils.NewFakeInformerFor(&rbacv1.ClusterRole{})
stores.ClusterRoleCache = informers.ClusterRole.GetStore()
informers.ClusterRoleBinding, clusterRoleBindingSource = testutils.NewFakeInformerFor(&rbacv1.ClusterRoleBinding{})
stores.ClusterRoleBindingCache = informers.ClusterRoleBinding.GetStore()
informers.Role, roleSource = testutils.NewFakeInformerFor(&rbacv1.Role{})
stores.RoleCache = informers.Role.GetStore()
informers.RoleBinding, roleBindingSource = testutils.NewFakeInformerFor(&rbacv1.RoleBinding{})
stores.RoleBindingCache = informers.RoleBinding.GetStore()
informers.Crd, crdSource = testutils.NewFakeInformerFor(&extv1.CustomResourceDefinition{})
stores.CrdCache = informers.Crd.GetStore()
informers.Service, serviceSource = testutils.NewFakeInformerFor(&k8sv1.Service{})
stores.ServiceCache = informers.Service.GetStore()
informers.Deployment, deploymentSource = testutils.NewFakeInformerFor(&appsv1.Deployment{})
stores.DeploymentCache = informers.Deployment.GetStore()
informers.DaemonSet, daemonSetSource = testutils.NewFakeInformerFor(&appsv1.DaemonSet{})
stores.DaemonSetCache = informers.DaemonSet.GetStore()
informers.ValidationWebhook, validatingWebhookSource = testutils.NewFakeInformerFor(&admissionregistrationv1.ValidatingWebhookConfiguration{})
stores.ValidationWebhookCache = informers.ValidationWebhook.GetStore()
informers.MutatingWebhook, mutatingWebhookSource = testutils.NewFakeInformerFor(&admissionregistrationv1.MutatingWebhookConfiguration{})
stores.MutatingWebhookCache = informers.MutatingWebhook.GetStore()
informers.APIService, apiserviceSource = testutils.NewFakeInformerFor(&apiregv1.APIService{})
stores.APIServiceCache = informers.APIService.GetStore()
informers.SCC, sccSource = testutils.NewFakeInformerFor(&secv1.SecurityContextConstraints{})
stores.SCCCache = informers.SCC.GetStore()
informers.InstallStrategyConfigMap, installStrategyConfigMapSource = testutils.NewFakeInformerFor(&k8sv1.ConfigMap{})
stores.InstallStrategyConfigMapCache = informers.InstallStrategyConfigMap.GetStore()
informers.InstallStrategyJob, installStrategyJobSource = testutils.NewFakeInformerFor(&batchv1.Job{})
stores.InstallStrategyJobCache = informers.InstallStrategyJob.GetStore()
informers.InfrastructurePod, infrastructurePodSource = testutils.NewFakeInformerFor(&k8sv1.Pod{})
stores.InfrastructurePodCache = informers.InfrastructurePod.GetStore()
informers.PodDisruptionBudget, podDisruptionBudgetSource = testutils.NewFakeInformerFor(&policyv1beta1.PodDisruptionBudget{})
stores.PodDisruptionBudgetCache = informers.PodDisruptionBudget.GetStore()
informers.Namespace, namespaceSource = testutils.NewFakeInformerWithIndexersFor(
&k8sv1.Namespace{}, cache.Indexers{
"namespace_name": func(obj interface{}) ([]string, error) {
return []string{obj.(*k8sv1.Namespace).GetName()}, nil
},
})
stores.NamespaceCache = informers.Namespace.GetStore()
// test OpenShift components
stores.IsOnOpenshift = true
informers.ServiceMonitor, serviceMonitorSource = testutils.NewFakeInformerFor(&promv1.ServiceMonitor{Spec: promv1.ServiceMonitorSpec{}})
stores.ServiceMonitorCache = informers.ServiceMonitor.GetStore()
stores.ServiceMonitorEnabled = true
informers.PrometheusRule, prometheusRuleSource = testutils.NewFakeInformerFor(&promv1.PrometheusRule{Spec: promv1.PrometheusRuleSpec{}})
stores.PrometheusRuleCache = informers.PrometheusRule.GetStore()
stores.PrometheusRulesEnabled = true
informers.Secrets, secretsSource = testutils.NewFakeInformerFor(&k8sv1.Secret{})
stores.SecretCache = informers.Secrets.GetStore()
informers.ConfigMap, configMapSource = testutils.NewFakeInformerFor(&k8sv1.ConfigMap{})
stores.ConfigMapCache = informers.ConfigMap.GetStore()
controller = NewKubeVirtController(virtClient, apiServiceClient, kvInformer, recorder, stores, informers, NAMESPACE)
// Wrap our workqueue to have a way to detect when we are done processing updates
mockQueue = testutils.NewMockWorkQueue(controller.queue)
controller.queue = mockQueue
// Set up mock client
virtClient.EXPECT().KubeVirt(NAMESPACE).Return(kvInterface).AnyTimes()
kubeClient = fake.NewSimpleClientset()
secClient = &secv1fake.FakeSecurityV1{
Fake: &fake.NewSimpleClientset().Fake,
}
extClient = extclientfake.NewSimpleClientset()
promClient = promclientfake.NewSimpleClientset()
virtClient.EXPECT().AdmissionregistrationV1().Return(kubeClient.AdmissionregistrationV1()).AnyTimes()
virtClient.EXPECT().CoreV1().Return(kubeClient.CoreV1()).AnyTimes()
virtClient.EXPECT().BatchV1().Return(kubeClient.BatchV1()).AnyTimes()
virtClient.EXPECT().RbacV1().Return(kubeClient.RbacV1()).AnyTimes()
virtClient.EXPECT().AppsV1().Return(kubeClient.AppsV1()).AnyTimes()
virtClient.EXPECT().SecClient().Return(secClient).AnyTimes()
virtClient.EXPECT().ExtensionsClient().Return(extClient).AnyTimes()
virtClient.EXPECT().PolicyV1beta1().Return(kubeClient.PolicyV1beta1()).AnyTimes()
virtClient.EXPECT().PrometheusClient().Return(promClient).AnyTimes()
// Make sure that all unexpected calls to kubeClient will fail
kubeClient.Fake.PrependReactor("*", "*", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
if action.GetVerb() == "get" && action.GetResource().Resource == "secrets" {
return true, nil, errors.NewNotFound(schema.GroupResource{Group: "", Resource: "secrets"}, "whatever")
}
if action.GetVerb() == "get" && action.GetResource().Resource == "validatingwebhookconfigurations" {
return true, nil, errors.NewNotFound(schema.GroupResource{Group: "", Resource: "validatingwebhookconfigurations"}, "whatever")
}
if action.GetVerb() == "get" && action.GetResource().Resource == "mutatingwebhookconfigurations" {
return true, nil, errors.NewNotFound(schema.GroupResource{Group: "", Resource: "mutatingwebhookconfigurations"}, "whatever")
}
if action.GetVerb() != "get" || action.GetResource().Resource != "namespaces" {
Expect(action).To(BeNil())
}
return true, nil, nil
})
apiServiceClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil, errors.NewNotFound(schema.GroupResource{Group: "", Resource: "apiservices"}, "whatever"))
secClient.Fake.PrependReactor("*", "*", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
Expect(action).To(BeNil())
return true, nil, nil
})
extClient.Fake.PrependReactor("*", "*", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
Expect(action).To(BeNil())
return true, nil, nil
})
promClient.Fake.PrependReactor("*", "*", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
Expect(action).To(BeNil())
return true, nil, nil
})
syncCaches(stop)
// add the privileged SCC without KubeVirt accounts
scc := getSCC()
sccSource.Add(&scc)
})
AfterEach(func() {
close(stop)
// Ensure that we add checks for expected events to every test
Expect(recorder.Events).To(BeEmpty())
ctrl.Finish()
})
injectMetadata := func(objectMeta *metav1.ObjectMeta, config *util.KubeVirtDeploymentConfig) {
if config == nil {
return
}
if objectMeta.Labels == nil {
objectMeta.Labels = make(map[string]string)
}
objectMeta.Labels[v1.ManagedByLabel] = v1.ManagedByLabelOperatorValue
if config.GetProductVersion() != "" {
objectMeta.Labels[v1.AppVersionLabel] = config.GetProductVersion()
}
if config.GetProductName() != "" {
objectMeta.Labels[v1.AppPartOfLabel] = config.GetProductName()
}
if objectMeta.Annotations == nil {
objectMeta.Annotations = make(map[string]string)
}
objectMeta.Annotations[v1.InstallStrategyVersionAnnotation] = config.GetKubeVirtVersion()
objectMeta.Annotations[v1.InstallStrategyRegistryAnnotation] = config.GetImageRegistry()
objectMeta.Annotations[v1.InstallStrategyIdentifierAnnotation] = config.GetDeploymentID()
objectMeta.Annotations[v1.KubeVirtGenerationAnnotation] = "1"
objectMeta.Labels[v1.AppComponentLabel] = v1.AppComponent
}
addKubeVirt := func(kv *v1.KubeVirt) {
mockQueue.ExpectAdds(1)
kvSource.Add(kv)
mockQueue.Wait()
}
addServiceAccount := func(sa *k8sv1.ServiceAccount) {
mockQueue.ExpectAdds(1)
serviceAccountSource.Add(sa)
mockQueue.Wait()
}
addClusterRole := func(cr *rbacv1.ClusterRole) {
mockQueue.ExpectAdds(1)
clusterRoleSource.Add(cr)
mockQueue.Wait()
}
addClusterRoleBinding := func(crb *rbacv1.ClusterRoleBinding) {
mockQueue.ExpectAdds(1)
clusterRoleBindingSource.Add(crb)
mockQueue.Wait()
}
addRole := func(role *rbacv1.Role) {
mockQueue.ExpectAdds(1)
roleSource.Add(role)
mockQueue.Wait()
}
addRoleBinding := func(rb *rbacv1.RoleBinding) {
mockQueue.ExpectAdds(1)
roleBindingSource.Add(rb)
mockQueue.Wait()
}
addCrd := func(crd *extv1.CustomResourceDefinition, kv *v1.KubeVirt) {
mockQueue.ExpectAdds(1)
if kv != nil {
apply.SetGeneration(&kv.Status.Generations, crd)
}
crdSource.Add(crd)
mockQueue.Wait()
}
addService := func(svc *k8sv1.Service) {
mockQueue.ExpectAdds(1)
serviceSource.Add(svc)
mockQueue.Wait()
}
addDeployment := func(depl *appsv1.Deployment, kv *v1.KubeVirt) {
mockQueue.ExpectAdds(1)
if kv != nil {
apply.SetGeneration(&kv.Status.Generations, depl)
}
deploymentSource.Add(depl)
mockQueue.Wait()
}
addDaemonset := func(ds *appsv1.DaemonSet, kv *v1.KubeVirt) {
mockQueue.ExpectAdds(1)
if kv != nil {
apply.SetGeneration(&kv.Status.Generations, ds)
}
daemonSetSource.Add(ds)
mockQueue.Wait()
}
addValidatingWebhook := func(wh *admissionregistrationv1.ValidatingWebhookConfiguration, kv *v1.KubeVirt) {
mockQueue.ExpectAdds(1)
if kv != nil {
apply.SetGeneration(&kv.Status.Generations, wh)
}
validatingWebhookSource.Add(wh)
mockQueue.Wait()
}
addMutatingWebhook := func(wh *admissionregistrationv1.MutatingWebhookConfiguration, kv *v1.KubeVirt) {
mockQueue.ExpectAdds(1)
if kv != nil {
apply.SetGeneration(&kv.Status.Generations, wh)
}
mutatingWebhookSource.Add(wh)
mockQueue.Wait()
}
addAPIService := func(wh *apiregv1.APIService) {
mockQueue.ExpectAdds(1)
apiserviceSource.Add(wh)
mockQueue.Wait()
}
addInstallStrategyJob := func(job *batchv1.Job) {
mockQueue.ExpectAdds(1)
installStrategyJobSource.Add(job)
mockQueue.Wait()
}
addPod := func(pod *k8sv1.Pod) {
mockQueue.ExpectAdds(1)
infrastructurePodSource.Add(pod)
mockQueue.Wait()
}
addPodDisruptionBudget := func(podDisruptionBudget *policyv1beta1.PodDisruptionBudget, kv *v1.KubeVirt) {
mockQueue.ExpectAdds(1)
if kv != nil {
apply.SetGeneration(&kv.Status.Generations, podDisruptionBudget)
}
podDisruptionBudgetSource.Add(podDisruptionBudget)
mockQueue.Wait()
}
addSecret := func(secret *k8sv1.Secret) {
mockQueue.ExpectAdds(1)
secretsSource.Add(secret)
mockQueue.Wait()
}
addConfigMap := func(configMap *k8sv1.ConfigMap) {
mockQueue.ExpectAdds(1)
if _, ok := configMap.Labels[v1.InstallStrategyLabel]; ok {
installStrategyConfigMapSource.Add(configMap)
} else {
configMapSource.Add(configMap)
}
mockQueue.Wait()
}
addSCC := func(scc *secv1.SecurityContextConstraints) {
mockQueue.ExpectAdds(1)
sccSource.Add(scc)
mockQueue.Wait()
}
addServiceMonitor := func(serviceMonitor *promv1.ServiceMonitor) {
mockQueue.ExpectAdds(1)
serviceMonitorSource.Add(serviceMonitor)
mockQueue.Wait()
}
addPrometheusRule := func(prometheusRule *promv1.PrometheusRule) {
mockQueue.ExpectAdds(1)
prometheusRuleSource.Add(prometheusRule)
mockQueue.Wait()
}
addResource := func(obj runtime.Object, config *util.KubeVirtDeploymentConfig, kv *v1.KubeVirt) {
switch resource := obj.(type) {
case *k8sv1.ServiceAccount:
injectMetadata(&obj.(*k8sv1.ServiceAccount).ObjectMeta, config)
addServiceAccount(resource)
case *rbacv1.ClusterRole:
injectMetadata(&obj.(*rbacv1.ClusterRole).ObjectMeta, config)
addClusterRole(resource)
case *rbacv1.ClusterRoleBinding:
injectMetadata(&obj.(*rbacv1.ClusterRoleBinding).ObjectMeta, config)
addClusterRoleBinding(resource)
case *rbacv1.Role:
injectMetadata(&obj.(*rbacv1.Role).ObjectMeta, config)
addRole(resource)
case *rbacv1.RoleBinding:
injectMetadata(&obj.(*rbacv1.RoleBinding).ObjectMeta, config)
addRoleBinding(resource)
case *extv1.CustomResourceDefinition:
injectMetadata(&obj.(*extv1.CustomResourceDefinition).ObjectMeta, config)
addCrd(resource, kv)
case *k8sv1.Service:
injectMetadata(&obj.(*k8sv1.Service).ObjectMeta, config)
addService(resource)
case *appsv1.Deployment:
injectMetadata(&obj.(*appsv1.Deployment).ObjectMeta, config)
addDeployment(resource, kv)
case *appsv1.DaemonSet:
injectMetadata(&obj.(*appsv1.DaemonSet).ObjectMeta, config)
addDaemonset(resource, kv)
case *admissionregistrationv1.ValidatingWebhookConfiguration:
injectMetadata(&obj.(*admissionregistrationv1.ValidatingWebhookConfiguration).ObjectMeta, config)
addValidatingWebhook(resource, kv)
case *admissionregistrationv1.MutatingWebhookConfiguration:
injectMetadata(&obj.(*admissionregistrationv1.MutatingWebhookConfiguration).ObjectMeta, config)
addMutatingWebhook(resource, kv)
case *apiregv1.APIService:
injectMetadata(&obj.(*apiregv1.APIService).ObjectMeta, config)
addAPIService(resource)
case *batchv1.Job:
injectMetadata(&obj.(*batchv1.Job).ObjectMeta, config)
addInstallStrategyJob(resource)
case *k8sv1.ConfigMap:
injectMetadata(&obj.(*k8sv1.ConfigMap).ObjectMeta, config)
addConfigMap(resource)
case *k8sv1.Pod:
injectMetadata(&obj.(*k8sv1.Pod).ObjectMeta, config)
addPod(resource)
case *policyv1beta1.PodDisruptionBudget:
injectMetadata(&obj.(*policyv1beta1.PodDisruptionBudget).ObjectMeta, config)
addPodDisruptionBudget(resource, kv)
case *k8sv1.Secret:
injectMetadata(&obj.(*k8sv1.Secret).ObjectMeta, config)
addSecret(resource)
case *secv1.SecurityContextConstraints:
injectMetadata(&obj.(*secv1.SecurityContextConstraints).ObjectMeta, config)
addSCC(resource)
case *promv1.ServiceMonitor:
injectMetadata(&obj.(*promv1.ServiceMonitor).ObjectMeta, config)
addServiceMonitor(resource)
case *promv1.PrometheusRule:
injectMetadata(&obj.(*promv1.PrometheusRule).ObjectMeta, config)
addPrometheusRule(resource)
default:
Fail("unknown resource type")
}
split := strings.Split(fmt.Sprintf("%T", obj), ".")
resourceKey := strings.ToLower(split[len(split)-1]) + "s"
if _, ok := resourceChanges[resourceKey]; !ok {
resourceChanges[resourceKey] = make(map[string]int)
}
resourceChanges[resourceKey][Added]++
}
addInstallStrategy := func(config *util.KubeVirtDeploymentConfig) {
// install strategy config
resource, _ := install.NewInstallStrategyConfigMap(config, true, NAMESPACE)
resource.Name = fmt.Sprintf("%s-%s", resource.Name, rand.String(10))
injectMetadata(&resource.ObjectMeta, config)
addConfigMap(resource)
}
addPodDisruptionBudgets := func(config *util.KubeVirtDeploymentConfig, apiDeployment *appsv1.Deployment, controller *appsv1.Deployment, kv *v1.KubeVirt) {
minAvailable := intstr.FromInt(int(1))
apiPodDisruptionBudget := &policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Namespace: apiDeployment.Namespace,
Name: apiDeployment.Name + "-pdb",
Labels: apiDeployment.Labels,
},
Spec: policyv1beta1.PodDisruptionBudgetSpec{
MinAvailable: &minAvailable,
Selector: apiDeployment.Spec.Selector,
},
}
injectMetadata(&apiPodDisruptionBudget.ObjectMeta, config)
addPodDisruptionBudget(apiPodDisruptionBudget, kv)
controllerPodDisruptionBudget := &policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Namespace: controller.Namespace,
Name: controller.Name + "-pdb",
Labels: controller.Labels,
},
Spec: policyv1beta1.PodDisruptionBudgetSpec{
MinAvailable: &minAvailable,
Selector: controller.Spec.Selector,
},
}
injectMetadata(&controllerPodDisruptionBudget.ObjectMeta, config)
addPodDisruptionBudget(controllerPodDisruptionBudget, kv)
}
addPodsWithIndividualConfigs := func(config *util.KubeVirtDeploymentConfig,
configController *util.KubeVirtDeploymentConfig,
configHandler *util.KubeVirtDeploymentConfig,
shouldAddPodDisruptionBudgets bool,
kv *v1.KubeVirt) {
// we need at least one active pod for
// virt-api
// virt-controller
// virt-handler
apiDeployment, _ := components.NewApiServerDeployment(NAMESPACE, config.GetImageRegistry(), config.GetImagePrefix(), config.GetApiVersion(), "", "", config.GetImagePullPolicy(), config.GetVerbosity(), config.GetExtraEnv())
pod := &k8sv1.Pod{
ObjectMeta: apiDeployment.Spec.Template.ObjectMeta,
Spec: apiDeployment.Spec.Template.Spec,
Status: k8sv1.PodStatus{
Phase: k8sv1.PodRunning,
ContainerStatuses: []k8sv1.ContainerStatus{
{Ready: true, Name: "somecontainer"},
},
},
}
injectMetadata(&pod.ObjectMeta, config)
pod.Name = "virt-api-xxxx"
addPod(pod)
controller, _ := components.NewControllerDeployment(NAMESPACE, configController.GetImageRegistry(), configController.GetImagePrefix(), configController.GetControllerVersion(), configController.GetLauncherVersion(), "", "", configController.GetImagePullPolicy(), configController.GetVerbosity(), configController.GetExtraEnv())
pod = &k8sv1.Pod{
ObjectMeta: controller.Spec.Template.ObjectMeta,
Spec: controller.Spec.Template.Spec,
Status: k8sv1.PodStatus{
Phase: k8sv1.PodRunning,
ContainerStatuses: []k8sv1.ContainerStatus{
{Ready: true, Name: "somecontainer"},
},
},
}
pod.Name = "virt-controller-xxxx"
injectMetadata(&pod.ObjectMeta, configController)
addPod(pod)
handler, _ := components.NewHandlerDaemonSet(NAMESPACE, configHandler.GetImageRegistry(), configHandler.GetImagePrefix(), configHandler.GetHandlerVersion(), "", "", configController.GetLauncherVersion(), configHandler.GetImagePullPolicy(), configHandler.GetVerbosity(), configHandler.GetExtraEnv())
pod = &k8sv1.Pod{
ObjectMeta: handler.Spec.Template.ObjectMeta,
Spec: handler.Spec.Template.Spec,
Status: k8sv1.PodStatus{
Phase: k8sv1.PodRunning,
ContainerStatuses: []k8sv1.ContainerStatus{
{Ready: true, Name: "somecontainer"},
},
},
}
injectMetadata(&pod.ObjectMeta, configHandler)
pod.Name = "virt-handler-xxxx"
addPod(pod)
if shouldAddPodDisruptionBudgets {
addPodDisruptionBudgets(config, apiDeployment, controller, kv)
}
}
addPodsWithOptionalPodDisruptionBudgets := func(config *util.KubeVirtDeploymentConfig, shouldAddPodDisruptionBudgets bool, kv *v1.KubeVirt) {
addPodsWithIndividualConfigs(config, config, config, shouldAddPodDisruptionBudgets, kv)
}
addPodsAndPodDisruptionBudgets := func(config *util.KubeVirtDeploymentConfig, kv *v1.KubeVirt) {
addPodsWithOptionalPodDisruptionBudgets(config, true, kv)
}
generateRandomResources := func() int {
version := fmt.Sprintf("rand-%s", rand.String(10))
registry := fmt.Sprintf("rand-%s", rand.String(10))
config := getConfig(registry, version)
all := make([]runtime.Object, 0)
all = append(all, &k8sv1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &extv1.CustomResourceDefinition{
TypeMeta: metav1.TypeMeta{
APIVersion: "apiextensions.k8s.io/v1",
Kind: "CustomResourceDefinition",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &k8sv1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &secv1.SecurityContextConstraints{
TypeMeta: metav1.TypeMeta{
APIVersion: "security.openshift.io/v1",
Kind: "SecurityContextConstraints",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
for _, obj := range all {
addResource(obj, config, nil)
}
return len(all)
}
addDummyValidationWebhook := func() {
version := fmt.Sprintf("rand-%s", rand.String(10))
registry := fmt.Sprintf("rand-%s", rand.String(10))
config := getConfig(registry, version)
validationWebhook := &admissionregistrationv1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: "virt-operator-tmp-webhook",
},
}
injectMetadata(&validationWebhook.ObjectMeta, config)
addValidatingWebhook(validationWebhook, nil)
}
addAll := func(config *util.KubeVirtDeploymentConfig, kv *v1.KubeVirt) {
c, _ := apply.NewCustomizer(kv.Spec.CustomizeComponents)
all := make([]runtime.Object, 0)
// rbac
all = append(all, rbac.GetAllCluster()...)
all = append(all, rbac.GetAllApiServer(NAMESPACE)...)
all = append(all, rbac.GetAllHandler(NAMESPACE)...)
all = append(all, rbac.GetAllController(NAMESPACE)...)
// crds
functions := []func() (*extv1.CustomResourceDefinition, error){
components.NewVirtualMachineInstanceCrd, components.NewPresetCrd, components.NewReplicaSetCrd,
components.NewVirtualMachineCrd, components.NewVirtualMachineInstanceMigrationCrd,
components.NewVirtualMachineSnapshotCrd, components.NewVirtualMachineSnapshotContentCrd,
components.NewVirtualMachineRestoreCrd,
}
for _, f := range functions {
crd, err := f()
if err != nil {
panic(fmt.Errorf("This should not happen, %v", err))
}
all = append(all, crd)
}
// cr
all = append(all, components.NewPrometheusRuleCR(config.GetNamespace(), config.WorkloadUpdatesEnabled()))
// sccs
all = append(all, components.NewKubeVirtControllerSCC(NAMESPACE))
all = append(all, components.NewKubeVirtHandlerSCC(NAMESPACE))
// services and deployments
all = append(all, components.NewOperatorWebhookService(NAMESPACE))
all = append(all, components.NewPrometheusService(NAMESPACE))
all = append(all, components.NewApiServerService(NAMESPACE))
apiDeployment, _ := components.NewApiServerDeployment(NAMESPACE, config.GetImageRegistry(), config.GetImagePrefix(), config.GetApiVersion(), "", "", config.GetImagePullPolicy(), config.GetVerbosity(), config.GetExtraEnv())
apiDeploymentPdb := components.NewPodDisruptionBudgetForDeployment(apiDeployment)
controller, _ := components.NewControllerDeployment(NAMESPACE, config.GetImageRegistry(), config.GetImagePrefix(), config.GetControllerVersion(), config.GetLauncherVersion(), "", "", config.GetImagePullPolicy(), config.GetVerbosity(), config.GetExtraEnv())
controllerPdb := components.NewPodDisruptionBudgetForDeployment(controller)
handler, _ := components.NewHandlerDaemonSet(NAMESPACE, config.GetImageRegistry(), config.GetImagePrefix(), config.GetHandlerVersion(), "", "", config.GetLauncherVersion(), config.GetImagePullPolicy(), config.GetVerbosity(), config.GetExtraEnv())
all = append(all, apiDeployment, apiDeploymentPdb, controller, controllerPdb, handler)
all = append(all, rbac.GetAllServiceMonitor(NAMESPACE, config.GetMonitorNamespace(), config.GetMonitorServiceAccount())...)
all = append(all, components.NewServiceMonitorCR(NAMESPACE, config.GetMonitorNamespace(), true))
// ca certificate
caSecret := components.NewCACertSecret(NAMESPACE)
components.PopulateSecretWithCertificate(caSecret, nil, &metav1.Duration{Duration: apply.Duration7d})
caCert, _ := components.LoadCertificates(caSecret)
caBundle := cert.EncodeCertPEM(caCert.Leaf)
all = append(all, caSecret)
caConfigMap := components.NewKubeVirtCAConfigMap(NAMESPACE)
caConfigMap.Data = map[string]string{components.CABundleKey: string(caBundle)}
all = append(all, caConfigMap)
// webhooks and apiservice
validatingWebhook := components.NewVirtAPIValidatingWebhookConfiguration(config.GetNamespace())
for i := range validatingWebhook.Webhooks {
validatingWebhook.Webhooks[i].ClientConfig.CABundle = caBundle
}
all = append(all, validatingWebhook)
mutatingWebhook := components.NewVirtAPIMutatingWebhookConfiguration(config.GetNamespace())
for i := range mutatingWebhook.Webhooks {
mutatingWebhook.Webhooks[i].ClientConfig.CABundle = caBundle
}
all = append(all, mutatingWebhook)
apiServices := components.NewVirtAPIAPIServices(config.GetNamespace())
for _, apiService := range apiServices {
apiService.Spec.CABundle = caBundle
all = append(all, apiService)
}
validatingWebhook = components.NewOpertorValidatingWebhookConfiguration(NAMESPACE)
for i := range validatingWebhook.Webhooks {
validatingWebhook.Webhooks[i].ClientConfig.CABundle = caBundle
}
all = append(all, validatingWebhook)
secrets := components.NewCertSecrets(NAMESPACE, config.GetNamespace())
for _, secret := range secrets {
components.PopulateSecretWithCertificate(secret, caCert, &metav1.Duration{Duration: apply.Duration1d})
all = append(all, secret)
}
for _, obj := range all {
m := obj.(metav1.Object)
a := m.GetAnnotations()
if len(a) == 0 {
a = map[string]string{}
}
a[v1.KubeVirtCustomizeComponentAnnotationHash] = c.Hash()
m.SetAnnotations(a)
addResource(obj, config, kv)
}
}
makePodDisruptionBudgetsReady := func() {
for _, pdbname := range []string{"/virt-api-pdb", "/virt-controller-pdb"} {
exists := false
// we need to wait until the pdb exists
for !exists {
_, exists, _ = stores.PodDisruptionBudgetCache.GetByKey(NAMESPACE + pdbname)
if !exists {
time.Sleep(time.Second)
}
}
}
}
makeApiAndControllerReady := func() {
makeDeploymentReady := func(item interface{}) {
depl, _ := item.(*appsv1.Deployment)
deplNew := depl.DeepCopy()
var replicas int32 = 1
if depl.Spec.Replicas != nil {
replicas = *depl.Spec.Replicas
}
deplNew.Status.Replicas = replicas
deplNew.Status.ReadyReplicas = replicas
deploymentSource.Modify(deplNew)
}
for _, name := range []string{"/virt-api", "/virt-controller"} {
exists := false
var obj interface{}
// we need to wait until the deployment exists
for !exists {
obj, exists, _ = controller.stores.DeploymentCache.GetByKey(NAMESPACE + name)
if exists {
makeDeploymentReady(obj)
}
time.Sleep(time.Second)
}
}
makePodDisruptionBudgetsReady()
}
makeHandlerReady := func() {
exists := false
var obj interface{}
// we need to wait until the daemonset exists
for !exists {
obj, exists, _ = controller.stores.DaemonSetCache.GetByKey(NAMESPACE + "/virt-handler")
if exists {
handler, _ := obj.(*appsv1.DaemonSet)
handlerNew := handler.DeepCopy()
handlerNew.Status.DesiredNumberScheduled = 1
handlerNew.Status.NumberReady = 1
daemonSetSource.Modify(handlerNew)
}
time.Sleep(time.Second)
}
}
deleteServiceAccount := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.ServiceAccount.GetStore().GetByKey(key); exists {
serviceAccountSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteClusterRole := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.ClusterRole.GetStore().GetByKey(key); exists {
clusterRoleSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteClusterRoleBinding := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.ClusterRoleBinding.GetStore().GetByKey(key); exists {
clusterRoleBindingSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteRole := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.Role.GetStore().GetByKey(key); exists {
roleSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteRoleBinding := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.RoleBinding.GetStore().GetByKey(key); exists {
roleBindingSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteCrd := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.Crd.GetStore().GetByKey(key); exists {
crdSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteService := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.Service.GetStore().GetByKey(key); exists {
serviceSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteDeployment := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.Deployment.GetStore().GetByKey(key); exists {
deploymentSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteDaemonset := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.DaemonSet.GetStore().GetByKey(key); exists {
daemonSetSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteValidationWebhook := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.ValidationWebhook.GetStore().GetByKey(key); exists {
validatingWebhookSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteMutatingWebhook := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.MutatingWebhook.GetStore().GetByKey(key); exists {
mutatingWebhookSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteAPIService := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.APIService.GetStore().GetByKey(key); exists {
apiserviceSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteInstallStrategyJob := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.InstallStrategyJob.GetStore().GetByKey(key); exists {
installStrategyJobSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deletePodDisruptionBudget := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.PodDisruptionBudget.GetStore().GetByKey(key); exists {
podDisruptionBudgetSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteSecret := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.Secrets.GetStore().GetByKey(key); exists {
secretsSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteConfigMap := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.ConfigMap.GetStore().GetByKey(key); exists {
configMap := obj.(*k8sv1.ConfigMap)
configMapSource.Delete(configMap)
} else if obj, exists, _ := informers.InstallStrategyConfigMap.GetStore().GetByKey(key); exists {
configMap := obj.(*k8sv1.ConfigMap)
installStrategyConfigMapSource.Delete(configMap)
}
mockQueue.Wait()
}
deleteSCC := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.SCC.GetStore().GetByKey(key); exists {
sccSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteServiceMonitor := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.ServiceMonitor.GetStore().GetByKey(key); exists {
serviceMonitorSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deletePrometheusRule := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.PrometheusRule.GetStore().GetByKey(key); exists {
prometheusRuleSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteResource := func(resource string, key string) {
switch resource {
case "serviceaccounts":
deleteServiceAccount(key)
case "clusterroles":
deleteClusterRole(key)
case "clusterrolebindings":
deleteClusterRoleBinding(key)
case "roles":
deleteRole(key)
case "rolebindings":
deleteRoleBinding(key)
case "customresourcedefinitions":
deleteCrd(key)
case "services":
deleteService(key)
case "deployments":
deleteDeployment(key)
case "daemonsets":
deleteDaemonset(key)
case "validatingwebhookconfigurations":
deleteValidationWebhook(key)
case "mutatingwebhookconfigurations":
deleteMutatingWebhook(key)
case "apiservices":
deleteAPIService(key)
case "jobs":
deleteInstallStrategyJob(key)
case "configmaps":
deleteConfigMap(key)
case "poddisruptionbudgets":
deletePodDisruptionBudget(key)
case "secrets":
deleteSecret(key)
case "securitycontextconstraints":
deleteSCC(key)
case "servicemonitors":
deleteServiceMonitor(key)
case "prometheusrules":
deletePrometheusRule(key)
default:
Fail(fmt.Sprintf("unknown resource type %+v", resource))
}
if _, ok := resourceChanges[resource]; !ok {
resourceChanges[resource] = make(map[string]int)
}
resourceChanges[resource][Deleted]++
}
genericUpdateFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
update, ok := action.(testing.UpdateAction)
Expect(ok).To(BeTrue(), "genericUpdateFunction testing ok")
totalUpdates++
resource := action.GetResource().Resource
if _, ok := resourceChanges[resource]; !ok {
resourceChanges[resource] = make(map[string]int)
}
resourceChanges[resource][Updated]++
return true, update.GetObject(), nil
}
genericPatchFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
_, ok := action.(testing.PatchAction)
Expect(ok).To(BeTrue())
totalPatches++
resource := action.GetResource().Resource
if _, ok := resourceChanges[resource]; !ok {
resourceChanges[resource] = make(map[string]int)
}
resourceChanges[resource][Patched]++
return true, nil, nil
}
webhookValidationPatchFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
genericPatchFunc(action)
return true, &admissionregistrationv1.ValidatingWebhookConfiguration{}, nil
}
webhookMutatingPatchFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
genericPatchFunc(action)
return true, &admissionregistrationv1.MutatingWebhookConfiguration{}, nil
}
deploymentPatchFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
genericPatchFunc(action)
return true, &appsv1.Deployment{}, nil
}
daemonsetPatchFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
genericPatchFunc(action)
return true, &appsv1.DaemonSet{}, nil
}
podDisruptionBudgetPatchFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
genericPatchFunc(action)
return true, &policyv1beta1.PodDisruptionBudget{}, nil
}
crdPatchFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
genericPatchFunc(action)
return true, &extv1.CustomResourceDefinition{}, nil
}
genericCreateFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
create, ok := action.(testing.CreateAction)
Expect(ok).To(BeTrue())
totalAdds++
if addToCache {
addResource(create.GetObject(), nil, nil)
}
return true, create.GetObject(), nil
}
genericDeleteFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
deleted, ok := action.(testing.DeleteAction)
Expect(ok).To(BeTrue())
totalDeletions++
var key string
if len(deleted.GetNamespace()) > 0 {
key = deleted.GetNamespace() + "/"
}
key += deleted.GetName()
if deleteFromCache {
deleteResource(deleted.GetResource().Resource, key)
}
return true, nil, nil
}
shouldExpectInstallStrategyDeletion := func() {
kubeClient.Fake.PrependReactor("delete", "configmaps", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
deleted, ok := action.(testing.DeleteAction)
Expect(ok).To(BeTrue())
if deleted.GetName() == "kubevirt-ca" {
return false, nil, nil
}
var key string
if len(deleted.GetNamespace()) > 0 {
key = deleted.GetNamespace() + "/"
}
key += deleted.GetName()
deleteResource(deleted.GetResource().Resource, key)
return true, nil, nil
})
}
shouldExpectDeletions := func() {
kubeClient.Fake.PrependReactor("delete", "serviceaccounts", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "clusterroles", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "clusterrolebindings", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "roles", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "rolebindings", genericDeleteFunc)
extClient.Fake.PrependReactor("delete", "customresourcedefinitions", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "services", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "deployments", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "daemonsets", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "validatingwebhookconfigurations", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "mutatingwebhookconfigurations", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "secrets", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "configmaps", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "poddisruptionbudgets", genericDeleteFunc)
secClient.Fake.PrependReactor("delete", "securitycontextconstraints", genericDeleteFunc)
promClient.Fake.PrependReactor("delete", "servicemonitors", genericDeleteFunc)
promClient.Fake.PrependReactor("delete", "prometheusrules", genericDeleteFunc)
apiServiceClient.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Do(func(ctx context.Context, name string, options interface{}) {
genericDeleteFunc(&testing.DeleteActionImpl{ActionImpl: testing.ActionImpl{Resource: schema.GroupVersionResource{Resource: "apiservices"}}, Name: name})
})
}
shouldExpectJobDeletion := func() {
kubeClient.Fake.PrependReactor("delete", "jobs", genericDeleteFunc)
}
shouldExpectJobCreation := func() {
kubeClient.Fake.PrependReactor("create", "jobs", genericCreateFunc)
}
shouldExpectPatchesAndUpdates := func() {
extClient.Fake.PrependReactor("patch", "customresourcedefinitions", crdPatchFunc)
kubeClient.Fake.PrependReactor("patch", "serviceaccounts", genericPatchFunc)
kubeClient.Fake.PrependReactor("update", "clusterroles", genericUpdateFunc)
kubeClient.Fake.PrependReactor("update", "clusterrolebindings", genericUpdateFunc)
kubeClient.Fake.PrependReactor("update", "roles", genericUpdateFunc)
kubeClient.Fake.PrependReactor("update", "rolebindings", genericUpdateFunc)
kubeClient.Fake.PrependReactor("patch", "validatingwebhookconfigurations", webhookValidationPatchFunc)
kubeClient.Fake.PrependReactor("patch", "mutatingwebhookconfigurations", webhookMutatingPatchFunc)
kubeClient.Fake.PrependReactor("patch", "secrets", genericPatchFunc)
kubeClient.Fake.PrependReactor("patch", "configmaps", genericPatchFunc)
kubeClient.Fake.PrependReactor("patch", "services", genericPatchFunc)
kubeClient.Fake.PrependReactor("patch", "daemonsets", daemonsetPatchFunc)
kubeClient.Fake.PrependReactor("patch", "deployments", deploymentPatchFunc)
kubeClient.Fake.PrependReactor("patch", "poddisruptionbudgets", podDisruptionBudgetPatchFunc)
secClient.Fake.PrependReactor("update", "securitycontextconstraints", genericUpdateFunc)
promClient.Fake.PrependReactor("patch", "servicemonitors", genericPatchFunc)
promClient.Fake.PrependReactor("patch", "prometheusrules", genericPatchFunc)
apiServiceClient.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Do(func(args ...interface{}) {
genericPatchFunc(&testing.PatchActionImpl{ActionImpl: testing.ActionImpl{Resource: schema.GroupVersionResource{Resource: "apiservices"}}})
})
}
shouldExpectRbacBackupCreations := func() {
kubeClient.Fake.PrependReactor("create", "clusterroles", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "clusterrolebindings", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "roles", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "rolebindings", genericCreateFunc)
}
shouldExpectCreations := func() {
kubeClient.Fake.PrependReactor("create", "serviceaccounts", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "clusterroles", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "clusterrolebindings", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "roles", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "rolebindings", genericCreateFunc)
extClient.Fake.PrependReactor("create", "customresourcedefinitions", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "services", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "deployments", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "daemonsets", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "validatingwebhookconfigurations", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "mutatingwebhookconfigurations", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "secrets", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "configmaps", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "poddisruptionbudgets", genericCreateFunc)
secClient.Fake.PrependReactor("create", "securitycontextconstraints", genericCreateFunc)
promClient.Fake.PrependReactor("create", "servicemonitors", genericCreateFunc)
promClient.Fake.PrependReactor("create", "prometheusrules", genericCreateFunc)
apiServiceClient.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Do(func(ctx context.Context, obj runtime.Object, opts metav1.CreateOptions) {
genericCreateFunc(&testing.CreateActionImpl{Object: obj})
})
}
shouldExpectKubeVirtUpdate := func(times int) {
update := kvInterface.EXPECT().Update(gomock.Any())
update.Do(func(kv *v1.KubeVirt) {
kvInformer.GetStore().Update(kv)
update.Return(kv, nil)
}).Times(times)
}
shouldExpectKubeVirtUpdateStatus := func(times int) {
update := kvInterface.EXPECT().UpdateStatus(gomock.Any())
update.Do(func(kv *v1.KubeVirt) {
kvInformer.GetStore().Update(kv)
update.Return(kv, nil)
}).Times(times)
}
shouldExpectKubeVirtUpdateStatusVersion := func(times int, config *util.KubeVirtDeploymentConfig) {
update := kvInterface.EXPECT().UpdateStatus(gomock.Any())
update.Do(func(kv *v1.KubeVirt) {
Expect(kv.Status.TargetKubeVirtVersion).To(Equal(config.GetKubeVirtVersion()))
Expect(kv.Status.ObservedKubeVirtVersion).To(Equal(config.GetKubeVirtVersion()))
kvInformer.GetStore().Update(kv)
update.Return(kv, nil)
}).Times(times)
}
shouldExpectKubeVirtUpdateStatusFailureCondition := func(reason string) {
update := kvInterface.EXPECT().UpdateStatus(gomock.Any())
update.Do(func(kv *v1.KubeVirt) {
Expect(len(kv.Status.Conditions)).To(Equal(1))
Expect(kv.Status.Conditions[0].Reason).To(Equal(reason))
kvInformer.GetStore().Update(kv)
update.Return(kv, nil)
}).Times(1)
}
getLatestKubeVirt := func(kv *v1.KubeVirt) *v1.KubeVirt {
if obj, exists, _ := kvInformer.GetStore().GetByKey(kv.GetNamespace() + "/" + kv.GetName()); exists {
if kvLatest, ok := obj.(*v1.KubeVirt); ok {
return kvLatest
}
}
return nil
}
shouldExpectHCOConditions := func(kv *v1.KubeVirt, available k8sv1.ConditionStatus, progressing k8sv1.ConditionStatus, degraded k8sv1.ConditionStatus) {
getType := func(c v1.KubeVirtCondition) v1.KubeVirtConditionType { return c.Type }
getStatus := func(c v1.KubeVirtCondition) k8sv1.ConditionStatus { return c.Status }
Expect(kv.Status.Conditions).To(ContainElement(
And(
WithTransform(getType, Equal(v1.KubeVirtConditionAvailable)),
WithTransform(getStatus, Equal(available)),
),
))
Expect(kv.Status.Conditions).To(ContainElement(
And(
WithTransform(getType, Equal(v1.KubeVirtConditionProgressing)),
WithTransform(getStatus, Equal(progressing)),
),
))
Expect(kv.Status.Conditions).To(ContainElement(
And(
WithTransform(getType, Equal(v1.KubeVirtConditionDegraded)),
WithTransform(getStatus, Equal(degraded)),
),
))
}
fakeNamespaceModificationEvent := func() {
// Add modification event for namespace w/o the labels we need
mockQueue.ExpectAdds(1)
namespaceSource.Modify(&k8sv1.Namespace{
TypeMeta: metav1.TypeMeta{
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: NAMESPACE,
},
})
mockQueue.Wait()
}
shouldExpectNamespacePatch := func() {
kubeClient.Fake.PrependReactor("patch", "namespaces", genericPatchFunc)
}
Context("On valid KubeVirt object", func() {
It("Should not patch kubevirt namespace when labels are already defined", func(done Done) {
defer close(done)
// Add fake namespace with labels predefined
err := informers.Namespace.GetStore().Add(&k8sv1.Namespace{
TypeMeta: metav1.TypeMeta{
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: NAMESPACE,
Labels: map[string]string{
"openshift.io/cluster-monitoring": "true",
},
},
})
Expect(err).To(Not(HaveOccurred()), "could not add fake namespace to the store")
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Generation: int64(1),
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeleted,
},
}
// Add kubevirt deployment and mark everything as ready
addKubeVirt(kv)
kubecontroller.SetLatestApiVersionAnnotation(kv)
shouldExpectKubeVirtUpdateStatus(1)
shouldExpectCreations()
addInstallStrategy(defaultConfig)
addAll(defaultConfig, kv)
addPodsAndPodDisruptionBudgets(defaultConfig, kv)
makeHandlerReady()
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectPatchesAndUpdates()
// Now when the controller runs, if the namespace will be patched, the test will fail
// because the patch is not expected here.
controller.Execute()
}, 15)
It("should delete install strategy configmap once kubevirt install is deleted", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeleted,
},
}
kv.DeletionTimestamp = now()
util.UpdateConditionsDeleting(kv)
shouldExpectInstallStrategyDeletion()
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
addInstallStrategy(defaultConfig)
shouldExpectKubeVirtUpdate(1)
controller.Execute()
kv = getLatestKubeVirt(kv)
Expect(len(kv.ObjectMeta.Finalizers)).To(Equal(0))
}, 15)
It("should observe custom image tag in status during deploy", func(done Done) {
defer close(done)
defer GinkgoRecover()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
Generation: int64(1),
},
Spec: v1.KubeVirtSpec{
ImageTag: "custom.tag",
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
customConfig := getConfig(defaultConfig.GetImageRegistry(), "custom.tag")
fakeNamespaceModificationEvent()
shouldExpectNamespacePatch()
shouldExpectPatchesAndUpdates()
addAll(customConfig, kv)
// install strategy config
addInstallStrategy(customConfig)
addPodsAndPodDisruptionBudgets(customConfig, kv)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectKubeVirtUpdateStatusVersion(1, customConfig)
controller.Execute()
kv = getLatestKubeVirt(kv)
shouldExpectHCOConditions(kv, k8sv1.ConditionTrue, k8sv1.ConditionFalse, k8sv1.ConditionFalse)
}, 15)
It("delete temporary validation webhook once virt-api is deployed", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
Generation: int64(1),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
defaultConfig.SetTargetDeploymentConfig(kv)
defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
deleteFromCache = false
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
addDummyValidationWebhook()
addInstallStrategy(defaultConfig)
addAll(defaultConfig, kv)
addPodsAndPodDisruptionBudgets(defaultConfig, kv)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectDeletions()
fakeNamespaceModificationEvent()
shouldExpectNamespacePatch()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdateStatus(1)
controller.Execute()
Expect(totalDeletions).To(Equal(1))
}, 15)
It("should do nothing if KubeVirt object is deployed", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
Generation: int64(1),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
defaultConfig.SetTargetDeploymentConfig(kv)
defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
addInstallStrategy(defaultConfig)
addAll(defaultConfig, kv)
addPodsAndPodDisruptionBudgets(defaultConfig, kv)
makeApiAndControllerReady()
makeHandlerReady()
fakeNamespaceModificationEvent()
shouldExpectNamespacePatch()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdateStatus(1)
controller.Execute()
}, 15)
It("should update KubeVirt object if generation IDs do not match", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
Generation: int64(1),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
defaultConfig.SetTargetDeploymentConfig(kv)
defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
defaultConfig.SetTargetDeploymentConfig(kv)
defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
addInstallStrategy(defaultConfig)
addAll(defaultConfig, kv)
addPodsAndPodDisruptionBudgets(defaultConfig, kv)
makeApiAndControllerReady()
makeHandlerReady()
fakeNamespaceModificationEvent()
shouldExpectNamespacePatch()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdateStatus(1)
// invalidate all lastGeneration versions
numGenerations := len(kv.Status.Generations)
for i := range kv.Status.Generations {
kv.Status.Generations[i].LastGeneration = -1
}
controller.Execute()
// add one for the namespace
Expect(totalPatches).To(Equal(numGenerations + 1))
// all these resources should be tracked by there generation so everyone that has been added should now be patched
// since they where the `lastGeneration` was set to -1 on the KubeVirt CR
Expect(resourceChanges["mutatingwebhookconfigurations"][Patched]).To(Equal(resourceChanges["mutatingwebhookconfigurations"][Added]))
Expect(resourceChanges["validatingwebhookconfigurations"][Patched]).To(Equal(resourceChanges["validatingwebhookconfigurations"][Added]))
Expect(resourceChanges["deployements"][Patched]).To(Equal(resourceChanges["deployements"][Added]))
Expect(resourceChanges["daemonsets"][Patched]).To(Equal(resourceChanges["daemonsets"][Added]))
Expect(resourceChanges["poddisruptionbudgets"][Patched]).To(Equal(resourceChanges["poddisruptionbudgets"][Added]))
}, 150)
It("should delete operator managed resources not in the deployed installstrategy", func() {
defer GinkgoRecover()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
Generation: int64(1),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
defaultConfig.SetTargetDeploymentConfig(kv)
defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsDeploying(kv)
util.UpdateConditionsCreated(kv)
deleteFromCache = false
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
addInstallStrategy(defaultConfig)
addAll(defaultConfig, kv)
numResources := generateRandomResources()
addPodsAndPodDisruptionBudgets(defaultConfig, kv)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectDeletions()
fakeNamespaceModificationEvent()
shouldExpectNamespacePatch()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdateStatus(1)
controller.Execute()
Expect(totalDeletions).To(Equal(numResources))
}, 15)
It("should fail if KubeVirt object already exists", func() {
kv1 := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install-1",
Namespace: NAMESPACE,
UID: "11111111111",
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: "v0.0.0-master+$Format:%h$",
},
}
kv2 := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install-2",
Namespace: NAMESPACE,
UID: "123123123",
},
Status: v1.KubeVirtStatus{},
}
kubecontroller.SetLatestApiVersionAnnotation(kv1)
util.UpdateConditionsCreated(kv1)
util.UpdateConditionsAvailable(kv1)
addKubeVirt(kv1)
kubecontroller.SetLatestApiVersionAnnotation(kv2)
addKubeVirt(kv2)
shouldExpectKubeVirtUpdateStatusFailureCondition(util.ConditionReasonDeploymentFailedExisting)
controller.execute(fmt.Sprintf("%s/%s", kv2.Namespace, kv2.Name))
}, 15)
It("should generate install strategy creation job for update version", func(done Done) {
defer close(done)
updatedVersion := "1.1.1"
updatedRegistry := "otherregistry"
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedVersion,
ImageRegistry: updatedRegistry,
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
defaultConfig.SetTargetDeploymentConfig(kv)
defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
addInstallStrategy(defaultConfig)
shouldExpectKubeVirtUpdateStatus(1)
shouldExpectJobCreation()
controller.Execute()
}, 15)
It("should create an install strategy creation job with passthrough env vars, if provided in config", func(done Done) {
defer close(done)
config := getConfig("registry", "v1.1.1")
envKey := rand.String(10)
envVal := rand.String(10)
config.PassthroughEnvVars = map[string]string{envKey: envVal}
job, err := controller.generateInstallStrategyJob(config)
Expect(err).ToNot(HaveOccurred())
Expect(job.Spec.Template.Spec.Containers[0].Env).To(ContainElement(k8sv1.EnvVar{Name: envKey, Value: envVal}))
}, 15)
It("should create an api server deployment with passthrough env vars, if provided in config", func(done Done) {
defer close(done)
config := getConfig("registry", "v1.1.1")
envKey := rand.String(10)
envVal := rand.String(10)
config.PassthroughEnvVars = map[string]string{envKey: envVal}
apiDeployment, err := components.NewApiServerDeployment(NAMESPACE, config.GetImageRegistry(), config.GetImagePrefix(), config.GetApiVersion(), "", "", config.GetImagePullPolicy(), config.GetVerbosity(), config.GetExtraEnv())
Expect(err).ToNot(HaveOccurred())
Expect(apiDeployment.Spec.Template.Spec.Containers[0].Env).To(ContainElement(k8sv1.EnvVar{Name: envKey, Value: envVal}))
}, 15)
It("should create a controller deployment with passthrough env vars, if provided in config", func(done Done) {
defer close(done)
config := getConfig("registry", "v1.1.1")
envKey := rand.String(10)
envVal := rand.String(10)
config.PassthroughEnvVars = map[string]string{envKey: envVal}
controllerDeployment, err := components.NewControllerDeployment(NAMESPACE, config.GetImageRegistry(), config.GetImagePrefix(), config.GetControllerVersion(), config.GetLauncherVersion(), "", "", config.GetImagePullPolicy(), config.GetVerbosity(), config.GetExtraEnv())
Expect(err).ToNot(HaveOccurred())
Expect(controllerDeployment.Spec.Template.Spec.Containers[0].Env).To(ContainElement(k8sv1.EnvVar{Name: envKey, Value: envVal}))
}, 15)
It("should create a handler daemonset with passthrough env vars, if provided in config", func(done Done) {
defer close(done)
config := getConfig("registry", "v1.1.1")
envKey := rand.String(10)
envVal := rand.String(10)
config.PassthroughEnvVars = map[string]string{envKey: envVal}
handlerDaemonset, err := components.NewHandlerDaemonSet(NAMESPACE, config.GetImageRegistry(), config.GetImagePrefix(), config.GetHandlerVersion(), "", "", config.GetLauncherVersion(), config.GetImagePullPolicy(), config.GetVerbosity(), config.GetExtraEnv())
Expect(err).ToNot(HaveOccurred())
Expect(handlerDaemonset.Spec.Template.Spec.Containers[0].Env).To(ContainElement(k8sv1.EnvVar{Name: envKey, Value: envVal}))
}, 15)
It("should generate install strategy creation job if no install strategy exists", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{},
}
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
shouldExpectKubeVirtUpdateStatus(1)
shouldExpectJobCreation()
controller.Execute()
}, 15)
It("should label install strategy creation job", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{},
}
job, err := controller.generateInstallStrategyJob(util.GetTargetConfigFromKV(kv))
Expect(err).ToNot(HaveOccurred())
Expect(job.Spec.Template.ObjectMeta.Labels).Should(HaveKeyWithValue(v1.AppLabel, virtOperatorJobAppLabel))
}, 15)
It("should delete install strategy creation job if job has failed", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{},
}
job, err := controller.generateInstallStrategyJob(util.GetTargetConfigFromKV(kv))
Expect(err).ToNot(HaveOccurred())
// will only create a new job after 10 seconds has passed.
// this is just a simple mechanism to prevent spin loops
// in the event that jobs are fast failing for some unknown reason.
completionTime := time.Now().Add(time.Duration(-10) * time.Second)
job.Status.CompletionTime = &metav1.Time{Time: completionTime}
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
addInstallStrategyJob(job)
shouldExpectJobDeletion()
shouldExpectKubeVirtUpdateStatus(1)
controller.Execute()
}, 15)
It("should not delete completed install strategy creation job if job has failed less that 10 seconds ago", func(done Done) {
defer GinkgoRecover()
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{},
}
job, err := controller.generateInstallStrategyJob(util.GetTargetConfigFromKV(kv))
Expect(err).ToNot(HaveOccurred())
job.Status.CompletionTime = now()
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
addInstallStrategyJob(job)
shouldExpectKubeVirtUpdateStatus(1)
controller.Execute()
}, 15)
It("should add resources on create", func() {
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
},
}
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
addInstallStrategy(defaultConfig)
job, err := controller.generateInstallStrategyJob(util.GetTargetConfigFromKV(kv))
Expect(err).ToNot(HaveOccurred())
job.Status.CompletionTime = now()
addInstallStrategyJob(job)
// ensure completed jobs are garbage collected once install strategy
// is loaded
deleteFromCache = false
shouldExpectJobDeletion()
shouldExpectKubeVirtUpdate(1)
shouldExpectKubeVirtUpdateStatus(1)
shouldExpectCreations()
controller.Execute()
kv = getLatestKubeVirt(kv)
Expect(kv.Status.Phase).To(Equal(v1.KubeVirtPhaseDeploying))
Expect(len(kv.Status.Conditions)).To(Equal(3))
Expect(len(kv.ObjectMeta.Finalizers)).To(Equal(1))
shouldExpectHCOConditions(kv, k8sv1.ConditionFalse, k8sv1.ConditionTrue, k8sv1.ConditionFalse)
// 3 in total are yet missing at this point
// because waiting on controller, controller's PDB and virt-handler daemonset until API server deploys successfully
expectedUncreatedResources := 3
// 1 because a temporary validation webhook is created to block new CRDs until api server is deployed
expectedTemporaryResources := 1
Expect(totalAdds).To(Equal(resourceCount - expectedUncreatedResources + expectedTemporaryResources))
Expect(len(controller.stores.ServiceAccountCache.List())).To(Equal(3))
Expect(len(controller.stores.ClusterRoleCache.List())).To(Equal(7))
Expect(len(controller.stores.ClusterRoleBindingCache.List())).To(Equal(5))
Expect(len(controller.stores.RoleCache.List())).To(Equal(3))
Expect(len(controller.stores.RoleBindingCache.List())).To(Equal(3))
Expect(len(controller.stores.CrdCache.List())).To(Equal(8))
Expect(len(controller.stores.ServiceCache.List())).To(Equal(3))
Expect(len(controller.stores.DeploymentCache.List())).To(Equal(1))
Expect(len(controller.stores.DaemonSetCache.List())).To(Equal(0))
Expect(len(controller.stores.ValidationWebhookCache.List())).To(Equal(3))
Expect(len(controller.stores.PodDisruptionBudgetCache.List())).To(Equal(1))
Expect(len(controller.stores.SCCCache.List())).To(Equal(3))
Expect(len(controller.stores.ServiceMonitorCache.List())).To(Equal(1))
Expect(len(controller.stores.PrometheusRuleCache.List())).To(Equal(1))
Expect(resourceChanges["poddisruptionbudgets"][Added]).To(Equal(1))
}, 15)
Context("when the monitor namespace does not exist", func() {
It("should not create ServiceMonitor resources", func() {
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
}
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
// install strategy config
resource, _ := install.NewInstallStrategyConfigMap(defaultConfig, false, NAMESPACE)
resource.Name = fmt.Sprintf("%s-%s", resource.Name, rand.String(10))
addResource(resource, defaultConfig, nil)
job, err := controller.generateInstallStrategyJob(util.GetTargetConfigFromKV(kv))
Expect(err).ToNot(HaveOccurred())
job.Status.CompletionTime = now()
addInstallStrategyJob(job)
// ensure completed jobs are garbage collected once install strategy
// is loaded
deleteFromCache = false
shouldExpectJobDeletion()
shouldExpectKubeVirtUpdateStatus(1)
shouldExpectCreations()
controller.Execute()
Expect(len(controller.stores.RoleCache.List())).To(Equal(2))
Expect(len(controller.stores.RoleBindingCache.List())).To(Equal(2))
Expect(len(controller.stores.ServiceMonitorCache.List())).To(Equal(0))
}, 15)
})
It("should pause rollback until api server is rolled over.", func(done Done) {
defer close(done)
defer GinkgoRecover()
rollbackConfig := getConfig("otherregistry", "9.9.7")
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: rollbackConfig.GetKubeVirtVersion(),
ImageRegistry: rollbackConfig.GetImageRegistry(),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
defaultConfig.SetTargetDeploymentConfig(kv)
defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
addInstallStrategy(defaultConfig)
addInstallStrategy(rollbackConfig)
addAll(defaultConfig, kv)
addPodsAndPodDisruptionBudgets(defaultConfig, kv)
makeApiAndControllerReady()
makeHandlerReady()
addToCache = false
shouldExpectRbacBackupCreations()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdateStatus(1)
controller.Execute()
kv = getLatestKubeVirt(kv)
// conditions should reflect an ongoing update
shouldExpectHCOConditions(kv, k8sv1.ConditionTrue, k8sv1.ConditionTrue, k8sv1.ConditionTrue)
// on rollback or create, api server must be online first before controllers and daemonset.
// On rollback this prevents someone from posting invalid specs to
// the cluster from newer versions when an older version is being deployed.
// On create this prevents invalid specs from entering the cluster
// while controllers are available to process them.
// 4 because 2 for virt-controller service and deployment,
// 1 because of the pdb of virt-controller
// and another 1 because of the namespace was not patched yet.
Expect(totalPatches).To(Equal(patchCount - 4))
// 2 for virt-controller and pdb
Expect(totalUpdates).To(Equal(updateCount))
Expect(resourceChanges["poddisruptionbudgets"][Patched]).To(Equal(1))
}, 15)
It("should pause update after daemonsets are rolled over", func(done Done) {
defer close(done)
updatedConfig := getConfig("otherregistry", "9.9.10")
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedConfig.GetKubeVirtVersion(),
ImageRegistry: updatedConfig.GetImageRegistry(),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
defaultConfig.SetTargetDeploymentConfig(kv)
defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
addInstallStrategy(defaultConfig)
addInstallStrategy(updatedConfig)
addAll(defaultConfig, kv)
addPodsAndPodDisruptionBudgets(defaultConfig, kv)
makeApiAndControllerReady()
makeHandlerReady()
addToCache = false
shouldExpectRbacBackupCreations()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdateStatus(1)
controller.Execute()
kv = getLatestKubeVirt(kv)
// conditions should reflect an ongoing update
shouldExpectHCOConditions(kv, k8sv1.ConditionTrue, k8sv1.ConditionTrue, k8sv1.ConditionTrue)
Expect(totalUpdates).To(Equal(updateCount))
// daemonset, controller and apiserver pods are updated in this order.
// this prevents the new API from coming online until the controllers can manage it.
// The PDBs will prevent updated pods from getting "ready", so update should pause after
// daemonsets and before controller and namespace
// 5 because virt-controller, virt-api, PDBs and the namespace are not patched
Expect(totalPatches).To(Equal(patchCount - 5))
// Make sure the 5 unpatched are as expected
Expect(resourceChanges["deployments"][Patched]).To(Equal(0)) // virt-controller and virt-api unpatched
Expect(resourceChanges["poddisruptionbudgets"][Patched]).To(Equal(0)) // PDBs unpatched
Expect(resourceChanges["namespace"][Patched]).To(Equal(0)) // namespace unpatched
}, 15)
It("should pause update after controllers are rolled over", func(done Done) {
defer close(done)
updatedConfig := getConfig("otherregistry", "9.9.10")
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedConfig.GetKubeVirtVersion(),
ImageRegistry: updatedConfig.GetImageRegistry(),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
defaultConfig.SetTargetDeploymentConfig(kv)
defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
addInstallStrategy(defaultConfig)
addInstallStrategy(updatedConfig)
addAll(defaultConfig, kv)
// Create virt-api and virt-controller under defaultConfig,
// but use updatedConfig for virt-handler (hack) to avoid pausing after daemonsets
addPodsWithIndividualConfigs(defaultConfig, defaultConfig, updatedConfig, true, kv)
makeApiAndControllerReady()
makeHandlerReady()
addToCache = false
shouldExpectRbacBackupCreations()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdateStatus(1)
controller.Execute()
kv = getLatestKubeVirt(kv)
// conditions should reflect an ongoing update
shouldExpectHCOConditions(kv, k8sv1.ConditionTrue, k8sv1.ConditionTrue, k8sv1.ConditionTrue)
Expect(totalUpdates).To(Equal(updateCount))
// The update was hacked to avoid pausing after rolling out the daemonsets (virt-handler)
// That will allow both daemonset and controller pods to get patched before the pause.
// 3 because virt-api, PDB and the namespace should not be patched
Expect(totalPatches).To(Equal(patchCount - 3))
// Make sure the 3 unpatched are as expected
Expect(resourceChanges["deployments"][Patched]).To(Equal(1)) // virt-operator patched, virt-api unpatched
Expect(resourceChanges["poddisruptionbudgets"][Patched]).To(Equal(1)) // 1 of 2 PDBs patched
Expect(resourceChanges["namespace"][Patched]).To(Equal(0)) // namespace unpatched
}, 15)
It("should update kubevirt resources when Operator version changes if no imageTag and imageRegistry is explicitly set.", func() {
os.Setenv(util.OperatorImageEnvName, fmt.Sprintf("%s/virt-operator:%s", "otherregistry", "1.1.1"))
updatedConfig := getConfig("", "")
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
defaultConfig.SetTargetDeploymentConfig(kv)
defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
addInstallStrategy(defaultConfig)
addInstallStrategy(updatedConfig)
addAll(defaultConfig, kv)
addPodsAndPodDisruptionBudgets(defaultConfig, kv)
// pods for the new version are added so this test won't
// wait for daemonsets to rollover before updating/patching
// all resources.
addPodsWithOptionalPodDisruptionBudgets(updatedConfig, false, kv)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdateStatus(1)
fakeNamespaceModificationEvent()
shouldExpectNamespacePatch()
controller.Execute()
kv = getLatestKubeVirt(kv)
// conditions should reflect a successful update
shouldExpectHCOConditions(kv, k8sv1.ConditionTrue, k8sv1.ConditionFalse, k8sv1.ConditionFalse)
Expect(totalPatches).To(Equal(patchCount))
Expect(totalUpdates).To(Equal(updateCount))
// ensure every resource is either patched or updated
// + 1 is for the namespace patch which we don't consider as a resource we own.
Expect(totalUpdates + totalPatches).To(Equal(resourceCount + 1))
Expect(resourceChanges["poddisruptionbudgets"][Patched]).To(Equal(2))
}, 15)
It("should update resources when changing KubeVirt version.", func() {
updatedConfig := getConfig("otherregistry", "1.1.1")
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedConfig.GetKubeVirtVersion(),
ImageRegistry: updatedConfig.GetImageRegistry(),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
defaultConfig.SetTargetDeploymentConfig(kv)
defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
addInstallStrategy(defaultConfig)
addInstallStrategy(updatedConfig)
addAll(defaultConfig, kv)
addPodsAndPodDisruptionBudgets(defaultConfig, kv)
// pods for the new version are added so this test won't
// wait for daemonsets to rollover before updating/patching
// all resources.
addPodsWithOptionalPodDisruptionBudgets(updatedConfig, false, kv)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdateStatus(1)
fakeNamespaceModificationEvent()
shouldExpectNamespacePatch()
controller.Execute()
kv = getLatestKubeVirt(kv)
// conditions should reflect a successful update
shouldExpectHCOConditions(kv, k8sv1.ConditionTrue, k8sv1.ConditionFalse, k8sv1.ConditionFalse)
Expect(totalPatches).To(Equal(patchCount))
Expect(totalUpdates).To(Equal(updateCount))
// ensure every resource is either patched or updated
// + 1 is for the namespace patch which we don't consider as a resource we own.
Expect(totalUpdates + totalPatches).To(Equal(resourceCount + 1))
}, 15)
It("should patch poddisruptionbudgets when changing KubeVirt version.", func() {
updatedConfig := getConfig("otherregistry", "1.1.1")
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedConfig.GetKubeVirtVersion(),
ImageRegistry: updatedConfig.GetImageRegistry(),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: util.ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionAvailable,
Status: k8sv1.ConditionTrue,
Reason: util.ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: version.Get().String(),
},
}
defaultConfig.SetTargetDeploymentConfig(kv)
defaultConfig.SetObservedDeploymentConfig(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
addInstallStrategy(defaultConfig)
addInstallStrategy(updatedConfig)
addAll(defaultConfig, kv)
addPodsAndPodDisruptionBudgets(defaultConfig, kv)
// pods for the new version are added so this test won't
// wait for daemonsets to rollover before updating/patching
// all resources.
addPodsWithOptionalPodDisruptionBudgets(updatedConfig, false, kv)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdateStatus(1)
controller.Execute()
Expect(resourceChanges["poddisruptionbudgets"][Patched]).To(Equal(2))
}, 15)
It("should remove resources on deletion", func() {
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
},
}
kv.DeletionTimestamp = now()
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
// create all resources which should be deleted
addInstallStrategy(defaultConfig)
addAll(defaultConfig, kv)
shouldExpectKubeVirtUpdateStatus(1)
shouldExpectDeletions()
shouldExpectInstallStrategyDeletion()
controller.Execute()
// Note: in real life during the first execution loop very probably only CRDs are deleted,
// because that takes some time (see the check that the crd store is empty before going on with deletions)
// But in this test the deletion succeeds immediately, so everything is deleted on first try
Expect(totalDeletions).To(Equal(resourceCount))
kv = getLatestKubeVirt(kv)
Expect(kv.Status.Phase).To(Equal(v1.KubeVirtPhaseDeleted))
Expect(len(kv.Status.Conditions)).To(Equal(3))
shouldExpectHCOConditions(kv, k8sv1.ConditionFalse, k8sv1.ConditionFalse, k8sv1.ConditionTrue)
}, 15)
It("should remove poddisruptionbudgets on deletion", func() {
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
},
}
kv.DeletionTimestamp = now()
kubecontroller.SetLatestApiVersionAnnotation(kv)
addKubeVirt(kv)
// create all resources which should be deleted
addInstallStrategy(defaultConfig)
addAll(defaultConfig, kv)
shouldExpectKubeVirtUpdateStatus(1)
shouldExpectDeletions()
shouldExpectInstallStrategyDeletion()
controller.Execute()
Expect(resourceChanges["poddisruptionbudgets"][Deleted]).To(Equal(2))
}, 15)
})
Context("On install strategy dump", func() {
It("should generate latest install strategy and post as config map", func(done Done) {
defer close(done)
config, err := util.GetConfigFromEnv()
Expect(err).ToNot(HaveOccurred())
kubeClient.Fake.PrependReactor("create", "configmaps", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
create, ok := action.(testing.CreateAction)
Expect(ok).To(BeTrue())
configMap := create.GetObject().(*k8sv1.ConfigMap)
Expect(configMap.GenerateName).To(Equal("kubevirt-install-strategy-"))
version, ok := configMap.ObjectMeta.Annotations[v1.InstallStrategyVersionAnnotation]
Expect(ok).To(BeTrue())
Expect(version).To(Equal(config.GetKubeVirtVersion()))
registry, ok := configMap.ObjectMeta.Annotations[v1.InstallStrategyRegistryAnnotation]
Expect(ok).To(BeTrue())
Expect(registry).To(Equal(config.GetImageRegistry()))
id, ok := configMap.ObjectMeta.Annotations[v1.InstallStrategyIdentifierAnnotation]
Expect(ok).To(BeTrue())
Expect(id).To(Equal(config.GetDeploymentID()))
_, ok = configMap.Data["manifests"]
Expect(ok).To(BeTrue())
return true, create.GetObject(), nil
})
// This generates and posts the install strategy config map
install.DumpInstallStrategyToConfigMap(virtClient, NAMESPACE)
}, 15)
})
})
func now() *metav1.Time {
now := metav1.Now()
return &now
}
Create test state struct to decouple tests for parallel run
Create a test data struct in order to separate the required test state
(mocks, recording structures etc) from the actual run, so we do not
share state between tests anymore. This eliminates the data races.
Also add the behavior to the struct so that it uses the test state and
not the shared state.
Increase timeouts for tests that regularly fail on parallel execution.
Signed-off-by: Daniel Hiller <7d48ffa1ba2c479d4ee671710c0799f4b0a3eb8f@redhat.com>
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2017 Red Hat, Inc.
*
*/
package virt_operator
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"kubevirt.io/kubevirt/pkg/certificates/triple/cert"
promv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1"
secv1 "github.com/openshift/api/security/v1"
secv1fake "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
k8sv1 "k8s.io/api/core/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
extclientfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
framework "k8s.io/client-go/tools/cache/testing"
"k8s.io/client-go/tools/record"
apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
v1 "kubevirt.io/client-go/api/v1"
promclientfake "kubevirt.io/client-go/generated/prometheus-operator/clientset/versioned/fake"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/version"
kubecontroller "kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/testutils"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/apply"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/components"
install "kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/install"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/rbac"
"kubevirt.io/kubevirt/pkg/virt-operator/util"
)
const (
Added = "added"
Updated = "updated"
Patched = "patched"
Deleted = "deleted"
NAMESPACE = "kubevirt-test"
resourceCount = 53
patchCount = 34
updateCount = 20
)
type KubeVirtTestData struct {
ctrl *gomock.Controller
kvInterface *kubecli.MockKubeVirtInterface
kvSource *framework.FakeControllerSource
kvInformer cache.SharedIndexInformer
apiServiceClient *install.MockAPIServiceInterface
serviceAccountSource *framework.FakeControllerSource
clusterRoleSource *framework.FakeControllerSource
clusterRoleBindingSource *framework.FakeControllerSource
roleSource *framework.FakeControllerSource
roleBindingSource *framework.FakeControllerSource
crdSource *framework.FakeControllerSource
serviceSource *framework.FakeControllerSource
deploymentSource *framework.FakeControllerSource
daemonSetSource *framework.FakeControllerSource
validatingWebhookSource *framework.FakeControllerSource
mutatingWebhookSource *framework.FakeControllerSource
apiserviceSource *framework.FakeControllerSource
sccSource *framework.FakeControllerSource
installStrategyConfigMapSource *framework.FakeControllerSource
installStrategyJobSource *framework.FakeControllerSource
infrastructurePodSource *framework.FakeControllerSource
podDisruptionBudgetSource *framework.FakeControllerSource
serviceMonitorSource *framework.FakeControllerSource
namespaceSource *framework.FakeControllerSource
prometheusRuleSource *framework.FakeControllerSource
secretsSource *framework.FakeControllerSource
configMapSource *framework.FakeControllerSource
stop chan struct{}
controller *KubeVirtController
recorder *record.FakeRecorder
mockQueue *testutils.MockWorkQueue
virtClient *kubecli.MockKubevirtClient
kubeClient *fake.Clientset
secClient *secv1fake.FakeSecurityV1
extClient *extclientfake.Clientset
promClient *promclientfake.Clientset
informers util.Informers
stores util.Stores
totalAdds int
totalUpdates int
totalPatches int
totalDeletions int
resourceChanges map[string]map[string]int
deleteFromCache bool
addToCache bool
defaultConfig *util.KubeVirtDeploymentConfig
}
func (k *KubeVirtTestData) BeforeTest() {
k.defaultConfig = getConfig("", "")
k.totalAdds = 0
k.totalUpdates = 0
k.totalPatches = 0
k.totalDeletions = 0
k.resourceChanges = make(map[string]map[string]int)
k.deleteFromCache = true
k.addToCache = true
k.stop = make(chan struct{})
k.ctrl = gomock.NewController(GinkgoT())
k.virtClient = kubecli.NewMockKubevirtClient(k.ctrl)
k.kvInterface = kubecli.NewMockKubeVirtInterface(k.ctrl)
k.apiServiceClient = install.NewMockAPIServiceInterface(k.ctrl)
k.kvInformer, k.kvSource = testutils.NewFakeInformerFor(&v1.KubeVirt{})
k.recorder = record.NewFakeRecorder(100)
k.informers.ServiceAccount, k.serviceAccountSource = testutils.NewFakeInformerFor(&k8sv1.ServiceAccount{})
k.stores.ServiceAccountCache = k.informers.ServiceAccount.GetStore()
k.informers.ClusterRole, k.clusterRoleSource = testutils.NewFakeInformerFor(&rbacv1.ClusterRole{})
k.stores.ClusterRoleCache = k.informers.ClusterRole.GetStore()
k.informers.ClusterRoleBinding, k.clusterRoleBindingSource = testutils.NewFakeInformerFor(&rbacv1.ClusterRoleBinding{})
k.stores.ClusterRoleBindingCache = k.informers.ClusterRoleBinding.GetStore()
k.informers.Role, k.roleSource = testutils.NewFakeInformerFor(&rbacv1.Role{})
k.stores.RoleCache = k.informers.Role.GetStore()
k.informers.RoleBinding, k.roleBindingSource = testutils.NewFakeInformerFor(&rbacv1.RoleBinding{})
k.stores.RoleBindingCache = k.informers.RoleBinding.GetStore()
k.informers.Crd, k.crdSource = testutils.NewFakeInformerFor(&extv1.CustomResourceDefinition{})
k.stores.CrdCache = k.informers.Crd.GetStore()
k.informers.Service, k.serviceSource = testutils.NewFakeInformerFor(&k8sv1.Service{})
k.stores.ServiceCache = k.informers.Service.GetStore()
k.informers.Deployment, k.deploymentSource = testutils.NewFakeInformerFor(&appsv1.Deployment{})
k.stores.DeploymentCache = k.informers.Deployment.GetStore()
k.informers.DaemonSet, k.daemonSetSource = testutils.NewFakeInformerFor(&appsv1.DaemonSet{})
k.stores.DaemonSetCache = k.informers.DaemonSet.GetStore()
k.informers.ValidationWebhook, k.validatingWebhookSource = testutils.NewFakeInformerFor(&admissionregistrationv1.ValidatingWebhookConfiguration{})
k.stores.ValidationWebhookCache = k.informers.ValidationWebhook.GetStore()
k.informers.MutatingWebhook, k.mutatingWebhookSource = testutils.NewFakeInformerFor(&admissionregistrationv1.MutatingWebhookConfiguration{})
k.stores.MutatingWebhookCache = k.informers.MutatingWebhook.GetStore()
k.informers.APIService, k.apiserviceSource = testutils.NewFakeInformerFor(&apiregv1.APIService{})
k.stores.APIServiceCache = k.informers.APIService.GetStore()
k.informers.SCC, k.sccSource = testutils.NewFakeInformerFor(&secv1.SecurityContextConstraints{})
k.stores.SCCCache = k.informers.SCC.GetStore()
k.informers.InstallStrategyConfigMap, k.installStrategyConfigMapSource = testutils.NewFakeInformerFor(&k8sv1.ConfigMap{})
k.stores.InstallStrategyConfigMapCache = k.informers.InstallStrategyConfigMap.GetStore()
k.informers.InstallStrategyJob, k.installStrategyJobSource = testutils.NewFakeInformerFor(&batchv1.Job{})
k.stores.InstallStrategyJobCache = k.informers.InstallStrategyJob.GetStore()
k.informers.InfrastructurePod, k.infrastructurePodSource = testutils.NewFakeInformerFor(&k8sv1.Pod{})
k.stores.InfrastructurePodCache = k.informers.InfrastructurePod.GetStore()
k.informers.PodDisruptionBudget, k.podDisruptionBudgetSource = testutils.NewFakeInformerFor(&policyv1beta1.PodDisruptionBudget{})
k.stores.PodDisruptionBudgetCache = k.informers.PodDisruptionBudget.GetStore()
k.informers.Namespace, k.namespaceSource = testutils.NewFakeInformerWithIndexersFor(
&k8sv1.Namespace{}, cache.Indexers{
"namespace_name": func(obj interface{}) ([]string, error) {
return []string{obj.(*k8sv1.Namespace).GetName()}, nil
},
})
k.stores.NamespaceCache = k.informers.Namespace.GetStore()
// test OpenShift components
k.stores.IsOnOpenshift = true
k.informers.ServiceMonitor, k.serviceMonitorSource = testutils.NewFakeInformerFor(&promv1.ServiceMonitor{Spec: promv1.ServiceMonitorSpec{}})
k.stores.ServiceMonitorCache = k.informers.ServiceMonitor.GetStore()
k.stores.ServiceMonitorEnabled = true
k.informers.PrometheusRule, k.prometheusRuleSource = testutils.NewFakeInformerFor(&promv1.PrometheusRule{Spec: promv1.PrometheusRuleSpec{}})
k.stores.PrometheusRuleCache = k.informers.PrometheusRule.GetStore()
k.stores.PrometheusRulesEnabled = true
k.informers.Secrets, k.secretsSource = testutils.NewFakeInformerFor(&k8sv1.Secret{})
k.stores.SecretCache = k.informers.Secrets.GetStore()
k.informers.ConfigMap, k.configMapSource = testutils.NewFakeInformerFor(&k8sv1.ConfigMap{})
k.stores.ConfigMapCache = k.informers.ConfigMap.GetStore()
k.controller = NewKubeVirtController(k.virtClient, k.apiServiceClient, k.kvInformer, k.recorder, k.stores, k.informers, NAMESPACE)
// Wrap our workqueue to have a way to detect when we are done processing updates
k.mockQueue = testutils.NewMockWorkQueue(k.controller.queue)
k.controller.queue = k.mockQueue
// Set up mock client
k.virtClient.EXPECT().KubeVirt(NAMESPACE).Return(k.kvInterface).AnyTimes()
k.kubeClient = fake.NewSimpleClientset()
k.secClient = &secv1fake.FakeSecurityV1{
Fake: &fake.NewSimpleClientset().Fake,
}
k.extClient = extclientfake.NewSimpleClientset()
k.promClient = promclientfake.NewSimpleClientset()
k.virtClient.EXPECT().AdmissionregistrationV1().Return(k.kubeClient.AdmissionregistrationV1()).AnyTimes()
k.virtClient.EXPECT().CoreV1().Return(k.kubeClient.CoreV1()).AnyTimes()
k.virtClient.EXPECT().BatchV1().Return(k.kubeClient.BatchV1()).AnyTimes()
k.virtClient.EXPECT().RbacV1().Return(k.kubeClient.RbacV1()).AnyTimes()
k.virtClient.EXPECT().AppsV1().Return(k.kubeClient.AppsV1()).AnyTimes()
k.virtClient.EXPECT().SecClient().Return(k.secClient).AnyTimes()
k.virtClient.EXPECT().ExtensionsClient().Return(k.extClient).AnyTimes()
k.virtClient.EXPECT().PolicyV1beta1().Return(k.kubeClient.PolicyV1beta1()).AnyTimes()
k.virtClient.EXPECT().PrometheusClient().Return(k.promClient).AnyTimes()
// Make sure that all unexpected calls to kubeClient will fail
k.kubeClient.Fake.PrependReactor("*", "*", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
if action.GetVerb() == "get" && action.GetResource().Resource == "secrets" {
return true, nil, errors.NewNotFound(schema.GroupResource{Group: "", Resource: "secrets"}, "whatever")
}
if action.GetVerb() == "get" && action.GetResource().Resource == "validatingwebhookconfigurations" {
return true, nil, errors.NewNotFound(schema.GroupResource{Group: "", Resource: "validatingwebhookconfigurations"}, "whatever")
}
if action.GetVerb() == "get" && action.GetResource().Resource == "mutatingwebhookconfigurations" {
return true, nil, errors.NewNotFound(schema.GroupResource{Group: "", Resource: "mutatingwebhookconfigurations"}, "whatever")
}
if action.GetVerb() != "get" || action.GetResource().Resource != "namespaces" {
Expect(action).To(BeNil())
}
return true, nil, nil
})
k.apiServiceClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil, errors.NewNotFound(schema.GroupResource{Group: "", Resource: "apiservices"}, "whatever"))
k.secClient.Fake.PrependReactor("*", "*", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
Expect(action).To(BeNil())
return true, nil, nil
})
k.extClient.Fake.PrependReactor("*", "*", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
Expect(action).To(BeNil())
return true, nil, nil
})
k.promClient.Fake.PrependReactor("*", "*", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
Expect(action).To(BeNil())
return true, nil, nil
})
syncCaches(k.stop, k.kvInformer, k.informers)
// add the privileged SCC without KubeVirt accounts
scc := getSCC()
k.sccSource.Add(&scc)
k.deleteFromCache = true
k.addToCache = true
}
func (k *KubeVirtTestData) AfterTest() {
close(k.stop)
// Ensure that we add checks for expected events to every test
Expect(k.recorder.Events).To(BeEmpty())
k.ctrl.Finish()
}
func (k *KubeVirtTestData) shouldExpectKubeVirtUpdate(times int) {
update := k.kvInterface.EXPECT().Update(gomock.Any())
update.Do(func(kv *v1.KubeVirt) {
k.kvInformer.GetStore().Update(kv)
update.Return(kv, nil)
}).Times(times)
}
func (k *KubeVirtTestData) shouldExpectKubeVirtUpdateStatus(times int) {
update := k.kvInterface.EXPECT().UpdateStatus(gomock.Any())
update.Do(func(kv *v1.KubeVirt) {
k.kvInformer.GetStore().Update(kv)
update.Return(kv, nil)
}).Times(times)
}
func (k *KubeVirtTestData) shouldExpectKubeVirtUpdateStatusVersion(times int, config *util.KubeVirtDeploymentConfig) {
update := k.kvInterface.EXPECT().UpdateStatus(gomock.Any())
update.Do(func(kv *v1.KubeVirt) {
Expect(kv.Status.TargetKubeVirtVersion).To(Equal(config.GetKubeVirtVersion()))
Expect(kv.Status.ObservedKubeVirtVersion).To(Equal(config.GetKubeVirtVersion()))
k.kvInformer.GetStore().Update(kv)
update.Return(kv, nil)
}).Times(times)
}
func (k *KubeVirtTestData) shouldExpectKubeVirtUpdateStatusFailureCondition(reason string) {
update := k.kvInterface.EXPECT().UpdateStatus(gomock.Any())
update.Do(func(kv *v1.KubeVirt) {
Expect(len(kv.Status.Conditions)).To(Equal(1))
Expect(kv.Status.Conditions[0].Reason).To(Equal(reason))
k.kvInformer.GetStore().Update(kv)
update.Return(kv, nil)
}).Times(1)
}
func (k *KubeVirtTestData) addKubeVirt(kv *v1.KubeVirt) {
k.mockQueue.ExpectAdds(1)
k.kvSource.Add(kv)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) getLatestKubeVirt(kv *v1.KubeVirt) *v1.KubeVirt {
if obj, exists, _ := k.kvInformer.GetStore().GetByKey(kv.GetNamespace() + "/" + kv.GetName()); exists {
if kvLatest, ok := obj.(*v1.KubeVirt); ok {
return kvLatest
}
}
return nil
}
func (k *KubeVirtTestData) shouldExpectDeletions() {
genericDeleteFunc := k.genericDeleteFunc()
k.kubeClient.Fake.PrependReactor("delete", "serviceaccounts", genericDeleteFunc)
k.kubeClient.Fake.PrependReactor("delete", "clusterroles", genericDeleteFunc)
k.kubeClient.Fake.PrependReactor("delete", "clusterrolebindings", genericDeleteFunc)
k.kubeClient.Fake.PrependReactor("delete", "roles", genericDeleteFunc)
k.kubeClient.Fake.PrependReactor("delete", "rolebindings", genericDeleteFunc)
k.extClient.Fake.PrependReactor("delete", "customresourcedefinitions", genericDeleteFunc)
k.kubeClient.Fake.PrependReactor("delete", "services", genericDeleteFunc)
k.kubeClient.Fake.PrependReactor("delete", "deployments", genericDeleteFunc)
k.kubeClient.Fake.PrependReactor("delete", "daemonsets", genericDeleteFunc)
k.kubeClient.Fake.PrependReactor("delete", "validatingwebhookconfigurations", genericDeleteFunc)
k.kubeClient.Fake.PrependReactor("delete", "mutatingwebhookconfigurations", genericDeleteFunc)
k.kubeClient.Fake.PrependReactor("delete", "secrets", genericDeleteFunc)
k.kubeClient.Fake.PrependReactor("delete", "configmaps", genericDeleteFunc)
k.kubeClient.Fake.PrependReactor("delete", "poddisruptionbudgets", genericDeleteFunc)
k.secClient.Fake.PrependReactor("delete", "securitycontextconstraints", genericDeleteFunc)
k.promClient.Fake.PrependReactor("delete", "servicemonitors", genericDeleteFunc)
k.promClient.Fake.PrependReactor("delete", "prometheusrules", genericDeleteFunc)
k.apiServiceClient.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Do(func(ctx context.Context, name string, options interface{}) {
genericDeleteFunc(&testing.DeleteActionImpl{ActionImpl: testing.ActionImpl{Resource: schema.GroupVersionResource{Resource: "apiservices"}}, Name: name})
})
}
func (k *KubeVirtTestData) genericDeleteFunc() func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return func(action testing.Action) (handled bool, obj runtime.Object, err error) {
deleted, ok := action.(testing.DeleteAction)
Expect(ok).To(BeTrue())
k.totalDeletions++
var key string
if len(deleted.GetNamespace()) > 0 {
key = deleted.GetNamespace() + "/"
}
key += deleted.GetName()
if k.deleteFromCache {
k.deleteResource(deleted.GetResource().Resource, key)
}
return true, nil, nil
}
}
func (k *KubeVirtTestData) deleteResource(resource string, key string) {
switch resource {
case "serviceaccounts":
k.deleteServiceAccount(key)
case "clusterroles":
k.deleteClusterRole(key)
case "clusterrolebindings":
k.deleteClusterRoleBinding(key)
case "roles":
k.deleteRole(key)
case "rolebindings":
k.deleteRoleBinding(key)
case "customresourcedefinitions":
k.deleteCrd(key)
case "services":
k.deleteService(key)
case "deployments":
k.deleteDeployment(key)
case "daemonsets":
k.deleteDaemonset(key)
case "validatingwebhookconfigurations":
k.deleteValidationWebhook(key)
case "mutatingwebhookconfigurations":
k.deleteMutatingWebhook(key)
case "apiservices":
k.deleteAPIService(key)
case "jobs":
k.deleteInstallStrategyJob(key)
case "configmaps":
k.deleteConfigMap(key)
case "poddisruptionbudgets":
k.deletePodDisruptionBudget(key)
case "secrets":
k.deleteSecret(key)
case "securitycontextconstraints":
k.deleteSCC(key)
case "servicemonitors":
k.deleteServiceMonitor(key)
case "prometheusrules":
k.deletePrometheusRule(key)
default:
Fail(fmt.Sprintf("unknown resource type %+v", resource))
}
if _, ok := k.resourceChanges[resource]; !ok {
k.resourceChanges[resource] = make(map[string]int)
}
k.resourceChanges[resource][Deleted]++
}
func (k *KubeVirtTestData) deleteServiceAccount(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.ServiceAccount.GetStore().GetByKey(key); exists {
k.serviceAccountSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteClusterRole(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.ClusterRole.GetStore().GetByKey(key); exists {
k.clusterRoleSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteClusterRoleBinding(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.ClusterRoleBinding.GetStore().GetByKey(key); exists {
k.clusterRoleBindingSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteRole(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.Role.GetStore().GetByKey(key); exists {
k.roleSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteRoleBinding(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.RoleBinding.GetStore().GetByKey(key); exists {
k.roleBindingSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteCrd(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.Crd.GetStore().GetByKey(key); exists {
k.crdSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteService(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.Service.GetStore().GetByKey(key); exists {
k.serviceSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteDeployment(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.Deployment.GetStore().GetByKey(key); exists {
k.deploymentSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteDaemonset(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.DaemonSet.GetStore().GetByKey(key); exists {
k.daemonSetSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteValidationWebhook(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.ValidationWebhook.GetStore().GetByKey(key); exists {
k.validatingWebhookSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteMutatingWebhook(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.MutatingWebhook.GetStore().GetByKey(key); exists {
k.mutatingWebhookSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteAPIService(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.APIService.GetStore().GetByKey(key); exists {
k.apiserviceSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteInstallStrategyJob(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.InstallStrategyJob.GetStore().GetByKey(key); exists {
k.installStrategyJobSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deletePodDisruptionBudget(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.PodDisruptionBudget.GetStore().GetByKey(key); exists {
k.podDisruptionBudgetSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteSecret(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.Secrets.GetStore().GetByKey(key); exists {
k.secretsSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteConfigMap(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.ConfigMap.GetStore().GetByKey(key); exists {
configMap := obj.(*k8sv1.ConfigMap)
k.configMapSource.Delete(configMap)
} else if obj, exists, _ := k.informers.InstallStrategyConfigMap.GetStore().GetByKey(key); exists {
configMap := obj.(*k8sv1.ConfigMap)
k.installStrategyConfigMapSource.Delete(configMap)
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteSCC(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.SCC.GetStore().GetByKey(key); exists {
k.sccSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deleteServiceMonitor(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.ServiceMonitor.GetStore().GetByKey(key); exists {
k.serviceMonitorSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) deletePrometheusRule(key string) {
k.mockQueue.ExpectAdds(1)
if obj, exists, _ := k.informers.PrometheusRule.GetStore().GetByKey(key); exists {
k.prometheusRuleSource.Delete(obj.(runtime.Object))
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) shouldExpectPatchesAndUpdates() {
genericPatchFunc := k.genericPatchFunc()
genericUpdateFunc := k.genericUpdateFunc()
webhookValidationPatchFunc := k.webhookValidationPatchFunc()
webhookMutatingPatchFunc := k.webhookMutatingPatchFunc()
daemonsetPatchFunc := k.daemonsetPatchFunc()
deploymentPatchFunc := k.deploymentPatchFunc()
podDisruptionBudgetPatchFunc := k.podDisruptionBudgetPatchFunc()
k.extClient.Fake.PrependReactor("patch", "customresourcedefinitions", k.crdPatchFunc())
k.kubeClient.Fake.PrependReactor("patch", "serviceaccounts", genericPatchFunc)
k.kubeClient.Fake.PrependReactor("update", "clusterroles", genericUpdateFunc)
k.kubeClient.Fake.PrependReactor("update", "clusterrolebindings", genericUpdateFunc)
k.kubeClient.Fake.PrependReactor("update", "roles", genericUpdateFunc)
k.kubeClient.Fake.PrependReactor("update", "rolebindings", genericUpdateFunc)
k.kubeClient.Fake.PrependReactor("patch", "validatingwebhookconfigurations", webhookValidationPatchFunc)
k.kubeClient.Fake.PrependReactor("patch", "mutatingwebhookconfigurations", webhookMutatingPatchFunc)
k.kubeClient.Fake.PrependReactor("patch", "secrets", genericPatchFunc)
k.kubeClient.Fake.PrependReactor("patch", "configmaps", genericPatchFunc)
k.kubeClient.Fake.PrependReactor("patch", "services", genericPatchFunc)
k.kubeClient.Fake.PrependReactor("patch", "daemonsets", daemonsetPatchFunc)
k.kubeClient.Fake.PrependReactor("patch", "deployments", deploymentPatchFunc)
k.kubeClient.Fake.PrependReactor("patch", "poddisruptionbudgets", podDisruptionBudgetPatchFunc)
k.secClient.Fake.PrependReactor("update", "securitycontextconstraints", genericUpdateFunc)
k.promClient.Fake.PrependReactor("patch", "servicemonitors", genericPatchFunc)
k.promClient.Fake.PrependReactor("patch", "prometheusrules", genericPatchFunc)
k.apiServiceClient.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Do(func(args ...interface{}) {
genericPatchFunc(&testing.PatchActionImpl{ActionImpl: testing.ActionImpl{Resource: schema.GroupVersionResource{Resource: "apiservices"}}})
})
}
func (k *KubeVirtTestData) genericPatchFunc() func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return func(action testing.Action) (handled bool, obj runtime.Object, err error) {
_, ok := action.(testing.PatchAction)
Expect(ok).To(BeTrue())
k.totalPatches++
resource := action.GetResource().Resource
if _, ok := k.resourceChanges[resource]; !ok {
k.resourceChanges[resource] = make(map[string]int)
}
k.resourceChanges[resource][Patched]++
return true, nil, nil
}
}
func (k *KubeVirtTestData) genericUpdateFunc() func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return func(action testing.Action) (handled bool, obj runtime.Object, err error) {
update, ok := action.(testing.UpdateAction)
Expect(ok).To(BeTrue(), "genericUpdateFunction testing ok")
k.totalUpdates++
resource := action.GetResource().Resource
if _, ok := k.resourceChanges[resource]; !ok {
k.resourceChanges[resource] = make(map[string]int)
}
k.resourceChanges[resource][Updated]++
return true, update.GetObject(), nil
}
}
func (k *KubeVirtTestData) webhookValidationPatchFunc() func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return func(action testing.Action) (handled bool, obj runtime.Object, err error) {
k.genericPatchFunc()(action)
return true, &admissionregistrationv1.ValidatingWebhookConfiguration{}, nil
}
}
func (k *KubeVirtTestData) webhookMutatingPatchFunc() func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return func(action testing.Action) (handled bool, obj runtime.Object, err error) {
k.genericPatchFunc()(action)
return true, &admissionregistrationv1.MutatingWebhookConfiguration{}, nil
}
}
func (k *KubeVirtTestData) deploymentPatchFunc() func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return func(action testing.Action) (handled bool, obj runtime.Object, err error) {
k.genericPatchFunc()(action)
return true, &appsv1.Deployment{}, nil
}
}
func (k *KubeVirtTestData) daemonsetPatchFunc() func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return func(action testing.Action) (handled bool, obj runtime.Object, err error) {
k.genericPatchFunc()(action)
return true, &appsv1.DaemonSet{}, nil
}
}
func (k *KubeVirtTestData) podDisruptionBudgetPatchFunc() func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return func(action testing.Action) (handled bool, obj runtime.Object, err error) {
k.genericPatchFunc()(action)
return true, &policyv1beta1.PodDisruptionBudget{}, nil
}
}
func (k *KubeVirtTestData) crdPatchFunc() func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return func(action testing.Action) (handled bool, obj runtime.Object, err error) {
k.genericPatchFunc()(action)
return true, &extv1.CustomResourceDefinition{}, nil
}
}
func (k *KubeVirtTestData) shouldExpectCreations() {
genericCreateFunc := k.genericCreateFunc()
k.kubeClient.Fake.PrependReactor("create", "serviceaccounts", genericCreateFunc)
k.kubeClient.Fake.PrependReactor("create", "clusterroles", genericCreateFunc)
k.kubeClient.Fake.PrependReactor("create", "clusterrolebindings", genericCreateFunc)
k.kubeClient.Fake.PrependReactor("create", "roles", genericCreateFunc)
k.kubeClient.Fake.PrependReactor("create", "rolebindings", genericCreateFunc)
k.extClient.Fake.PrependReactor("create", "customresourcedefinitions", genericCreateFunc)
k.kubeClient.Fake.PrependReactor("create", "services", genericCreateFunc)
k.kubeClient.Fake.PrependReactor("create", "deployments", genericCreateFunc)
k.kubeClient.Fake.PrependReactor("create", "daemonsets", genericCreateFunc)
k.kubeClient.Fake.PrependReactor("create", "validatingwebhookconfigurations", genericCreateFunc)
k.kubeClient.Fake.PrependReactor("create", "mutatingwebhookconfigurations", genericCreateFunc)
k.kubeClient.Fake.PrependReactor("create", "secrets", genericCreateFunc)
k.kubeClient.Fake.PrependReactor("create", "configmaps", genericCreateFunc)
k.kubeClient.Fake.PrependReactor("create", "poddisruptionbudgets", genericCreateFunc)
k.secClient.Fake.PrependReactor("create", "securitycontextconstraints", genericCreateFunc)
k.promClient.Fake.PrependReactor("create", "servicemonitors", genericCreateFunc)
k.promClient.Fake.PrependReactor("create", "prometheusrules", genericCreateFunc)
k.apiServiceClient.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Do(func(ctx context.Context, obj runtime.Object, opts metav1.CreateOptions) {
genericCreateFunc(&testing.CreateActionImpl{Object: obj})
})
}
func (k *KubeVirtTestData) genericCreateFunc() func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return func(action testing.Action) (handled bool, obj runtime.Object, err error) {
create, ok := action.(testing.CreateAction)
Expect(ok).To(BeTrue())
k.totalAdds++
if k.addToCache {
k.addResource(create.GetObject(), nil, nil)
}
return true, create.GetObject(), nil
}
}
func (k *KubeVirtTestData) addResource(obj runtime.Object, config *util.KubeVirtDeploymentConfig, kv *v1.KubeVirt) {
switch resource := obj.(type) {
case *k8sv1.ServiceAccount:
injectMetadata(&obj.(*k8sv1.ServiceAccount).ObjectMeta, config)
k.addServiceAccount(resource)
case *rbacv1.ClusterRole:
injectMetadata(&obj.(*rbacv1.ClusterRole).ObjectMeta, config)
k.addClusterRole(resource)
case *rbacv1.ClusterRoleBinding:
injectMetadata(&obj.(*rbacv1.ClusterRoleBinding).ObjectMeta, config)
k.addClusterRoleBinding(resource)
case *rbacv1.Role:
injectMetadata(&obj.(*rbacv1.Role).ObjectMeta, config)
k.addRole(resource)
case *rbacv1.RoleBinding:
injectMetadata(&obj.(*rbacv1.RoleBinding).ObjectMeta, config)
k.addRoleBinding(resource)
case *extv1.CustomResourceDefinition:
injectMetadata(&obj.(*extv1.CustomResourceDefinition).ObjectMeta, config)
k.addCrd(resource, kv)
case *k8sv1.Service:
injectMetadata(&obj.(*k8sv1.Service).ObjectMeta, config)
k.addService(resource)
case *appsv1.Deployment:
injectMetadata(&obj.(*appsv1.Deployment).ObjectMeta, config)
k.addDeployment(resource, kv)
case *appsv1.DaemonSet:
injectMetadata(&obj.(*appsv1.DaemonSet).ObjectMeta, config)
k.addDaemonset(resource, kv)
case *admissionregistrationv1.ValidatingWebhookConfiguration:
injectMetadata(&obj.(*admissionregistrationv1.ValidatingWebhookConfiguration).ObjectMeta, config)
k.addValidatingWebhook(resource, kv)
case *admissionregistrationv1.MutatingWebhookConfiguration:
injectMetadata(&obj.(*admissionregistrationv1.MutatingWebhookConfiguration).ObjectMeta, config)
k.addMutatingWebhook(resource, kv)
case *apiregv1.APIService:
injectMetadata(&obj.(*apiregv1.APIService).ObjectMeta, config)
k.addAPIService(resource)
case *batchv1.Job:
injectMetadata(&obj.(*batchv1.Job).ObjectMeta, config)
k.addInstallStrategyJob(resource)
case *k8sv1.ConfigMap:
injectMetadata(&obj.(*k8sv1.ConfigMap).ObjectMeta, config)
k.addConfigMap(resource)
case *k8sv1.Pod:
injectMetadata(&obj.(*k8sv1.Pod).ObjectMeta, config)
k.addPod(resource)
case *policyv1beta1.PodDisruptionBudget:
injectMetadata(&obj.(*policyv1beta1.PodDisruptionBudget).ObjectMeta, config)
k.addPodDisruptionBudget(resource, kv)
case *k8sv1.Secret:
injectMetadata(&obj.(*k8sv1.Secret).ObjectMeta, config)
k.addSecret(resource)
case *secv1.SecurityContextConstraints:
injectMetadata(&obj.(*secv1.SecurityContextConstraints).ObjectMeta, config)
k.addSCC(resource)
case *promv1.ServiceMonitor:
injectMetadata(&obj.(*promv1.ServiceMonitor).ObjectMeta, config)
k.addServiceMonitor(resource)
case *promv1.PrometheusRule:
injectMetadata(&obj.(*promv1.PrometheusRule).ObjectMeta, config)
k.addPrometheusRule(resource)
default:
Fail("unknown resource type")
}
split := strings.Split(fmt.Sprintf("%T", obj), ".")
resourceKey := strings.ToLower(split[len(split)-1]) + "s"
if _, ok := k.resourceChanges[resourceKey]; !ok {
k.resourceChanges[resourceKey] = make(map[string]int)
}
k.resourceChanges[resourceKey][Added]++
}
func (k *KubeVirtTestData) addServiceAccount(sa *k8sv1.ServiceAccount) {
k.mockQueue.ExpectAdds(1)
k.serviceAccountSource.Add(sa)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addClusterRole(cr *rbacv1.ClusterRole) {
k.mockQueue.ExpectAdds(1)
k.clusterRoleSource.Add(cr)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addClusterRoleBinding(crb *rbacv1.ClusterRoleBinding) {
k.mockQueue.ExpectAdds(1)
k.clusterRoleBindingSource.Add(crb)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addRole(role *rbacv1.Role) {
k.mockQueue.ExpectAdds(1)
k.roleSource.Add(role)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addRoleBinding(rb *rbacv1.RoleBinding) {
k.mockQueue.ExpectAdds(1)
k.roleBindingSource.Add(rb)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addCrd(crd *extv1.CustomResourceDefinition, kv *v1.KubeVirt) {
k.mockQueue.ExpectAdds(1)
if kv != nil {
apply.SetGeneration(&kv.Status.Generations, crd)
}
k.crdSource.Add(crd)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addService(svc *k8sv1.Service) {
k.mockQueue.ExpectAdds(1)
k.serviceSource.Add(svc)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addDeployment(depl *appsv1.Deployment, kv *v1.KubeVirt) {
k.mockQueue.ExpectAdds(1)
if kv != nil {
apply.SetGeneration(&kv.Status.Generations, depl)
}
k.deploymentSource.Add(depl)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addDaemonset(ds *appsv1.DaemonSet, kv *v1.KubeVirt) {
k.mockQueue.ExpectAdds(1)
if kv != nil {
apply.SetGeneration(&kv.Status.Generations, ds)
}
k.daemonSetSource.Add(ds)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addMutatingWebhook(wh *admissionregistrationv1.MutatingWebhookConfiguration, kv *v1.KubeVirt) {
k.mockQueue.ExpectAdds(1)
if kv != nil {
apply.SetGeneration(&kv.Status.Generations, wh)
}
k.mutatingWebhookSource.Add(wh)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addAPIService(wh *apiregv1.APIService) {
k.mockQueue.ExpectAdds(1)
k.apiserviceSource.Add(wh)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addInstallStrategyJob(job *batchv1.Job) {
k.mockQueue.ExpectAdds(1)
k.installStrategyJobSource.Add(job)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addPod(pod *k8sv1.Pod) {
k.mockQueue.ExpectAdds(1)
k.infrastructurePodSource.Add(pod)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addPodDisruptionBudget(podDisruptionBudget *policyv1beta1.PodDisruptionBudget, kv *v1.KubeVirt) {
k.mockQueue.ExpectAdds(1)
if kv != nil {
apply.SetGeneration(&kv.Status.Generations, podDisruptionBudget)
}
k.podDisruptionBudgetSource.Add(podDisruptionBudget)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addSecret(secret *k8sv1.Secret) {
k.mockQueue.ExpectAdds(1)
k.secretsSource.Add(secret)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addConfigMap(configMap *k8sv1.ConfigMap) {
k.mockQueue.ExpectAdds(1)
if _, ok := configMap.Labels[v1.InstallStrategyLabel]; ok {
k.installStrategyConfigMapSource.Add(configMap)
} else {
k.configMapSource.Add(configMap)
}
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addSCC(scc *secv1.SecurityContextConstraints) {
k.mockQueue.ExpectAdds(1)
k.sccSource.Add(scc)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addServiceMonitor(serviceMonitor *promv1.ServiceMonitor) {
k.mockQueue.ExpectAdds(1)
k.serviceMonitorSource.Add(serviceMonitor)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addPrometheusRule(prometheusRule *promv1.PrometheusRule) {
k.mockQueue.ExpectAdds(1)
k.prometheusRuleSource.Add(prometheusRule)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) generateRandomResources() int {
version := fmt.Sprintf("rand-%s", rand.String(10))
registry := fmt.Sprintf("rand-%s", rand.String(10))
config := getConfig(registry, version)
all := make([]runtime.Object, 0)
all = append(all, &k8sv1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &extv1.CustomResourceDefinition{
TypeMeta: metav1.TypeMeta{
APIVersion: "apiextensions.k8s.io/v1",
Kind: "CustomResourceDefinition",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &k8sv1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &secv1.SecurityContextConstraints{
TypeMeta: metav1.TypeMeta{
APIVersion: "security.openshift.io/v1",
Kind: "SecurityContextConstraints",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
for _, obj := range all {
k.addResource(obj, config, nil)
}
return len(all)
}
func (k *KubeVirtTestData) addAll(config *util.KubeVirtDeploymentConfig, kv *v1.KubeVirt) {
c, _ := apply.NewCustomizer(kv.Spec.CustomizeComponents)
all := make([]runtime.Object, 0)
// rbac
all = append(all, rbac.GetAllCluster()...)
all = append(all, rbac.GetAllApiServer(NAMESPACE)...)
all = append(all, rbac.GetAllHandler(NAMESPACE)...)
all = append(all, rbac.GetAllController(NAMESPACE)...)
// crds
functions := []func() (*extv1.CustomResourceDefinition, error){
components.NewVirtualMachineInstanceCrd, components.NewPresetCrd, components.NewReplicaSetCrd,
components.NewVirtualMachineCrd, components.NewVirtualMachineInstanceMigrationCrd,
components.NewVirtualMachineSnapshotCrd, components.NewVirtualMachineSnapshotContentCrd,
components.NewVirtualMachineRestoreCrd,
}
for _, f := range functions {
crd, err := f()
if err != nil {
panic(fmt.Errorf("This should not happen, %v", err))
}
all = append(all, crd)
}
// cr
all = append(all, components.NewPrometheusRuleCR(config.GetNamespace(), config.WorkloadUpdatesEnabled()))
// sccs
all = append(all, components.NewKubeVirtControllerSCC(NAMESPACE))
all = append(all, components.NewKubeVirtHandlerSCC(NAMESPACE))
// services and deployments
all = append(all, components.NewOperatorWebhookService(NAMESPACE))
all = append(all, components.NewPrometheusService(NAMESPACE))
all = append(all, components.NewApiServerService(NAMESPACE))
apiDeployment, _ := components.NewApiServerDeployment(NAMESPACE, config.GetImageRegistry(), config.GetImagePrefix(), config.GetApiVersion(), "", "", config.GetImagePullPolicy(), config.GetVerbosity(), config.GetExtraEnv())
apiDeploymentPdb := components.NewPodDisruptionBudgetForDeployment(apiDeployment)
controller, _ := components.NewControllerDeployment(NAMESPACE, config.GetImageRegistry(), config.GetImagePrefix(), config.GetControllerVersion(), config.GetLauncherVersion(), "", "", config.GetImagePullPolicy(), config.GetVerbosity(), config.GetExtraEnv())
controllerPdb := components.NewPodDisruptionBudgetForDeployment(controller)
handler, _ := components.NewHandlerDaemonSet(NAMESPACE, config.GetImageRegistry(), config.GetImagePrefix(), config.GetHandlerVersion(), "", "", config.GetLauncherVersion(), config.GetImagePullPolicy(), config.GetVerbosity(), config.GetExtraEnv())
all = append(all, apiDeployment, apiDeploymentPdb, controller, controllerPdb, handler)
all = append(all, rbac.GetAllServiceMonitor(NAMESPACE, config.GetMonitorNamespace(), config.GetMonitorServiceAccount())...)
all = append(all, components.NewServiceMonitorCR(NAMESPACE, config.GetMonitorNamespace(), true))
// ca certificate
caSecret := components.NewCACertSecret(NAMESPACE)
components.PopulateSecretWithCertificate(caSecret, nil, &metav1.Duration{Duration: apply.Duration7d})
caCert, _ := components.LoadCertificates(caSecret)
caBundle := cert.EncodeCertPEM(caCert.Leaf)
all = append(all, caSecret)
caConfigMap := components.NewKubeVirtCAConfigMap(NAMESPACE)
caConfigMap.Data = map[string]string{components.CABundleKey: string(caBundle)}
all = append(all, caConfigMap)
// webhooks and apiservice
validatingWebhook := components.NewVirtAPIValidatingWebhookConfiguration(config.GetNamespace())
for i := range validatingWebhook.Webhooks {
validatingWebhook.Webhooks[i].ClientConfig.CABundle = caBundle
}
all = append(all, validatingWebhook)
mutatingWebhook := components.NewVirtAPIMutatingWebhookConfiguration(config.GetNamespace())
for i := range mutatingWebhook.Webhooks {
mutatingWebhook.Webhooks[i].ClientConfig.CABundle = caBundle
}
all = append(all, mutatingWebhook)
apiServices := components.NewVirtAPIAPIServices(config.GetNamespace())
for _, apiService := range apiServices {
apiService.Spec.CABundle = caBundle
all = append(all, apiService)
}
validatingWebhook = components.NewOpertorValidatingWebhookConfiguration(NAMESPACE)
for i := range validatingWebhook.Webhooks {
validatingWebhook.Webhooks[i].ClientConfig.CABundle = caBundle
}
all = append(all, validatingWebhook)
secrets := components.NewCertSecrets(NAMESPACE, config.GetNamespace())
for _, secret := range secrets {
components.PopulateSecretWithCertificate(secret, caCert, &metav1.Duration{Duration: apply.Duration1d})
all = append(all, secret)
}
for _, obj := range all {
m := obj.(metav1.Object)
a := m.GetAnnotations()
if len(a) == 0 {
a = map[string]string{}
}
a[v1.KubeVirtCustomizeComponentAnnotationHash] = c.Hash()
m.SetAnnotations(a)
k.addResource(obj, config, kv)
}
}
func (k *KubeVirtTestData) shouldExpectJobCreation() {
k.kubeClient.Fake.PrependReactor("create", "jobs", k.genericCreateFunc())
}
func (k *KubeVirtTestData) shouldExpectRbacBackupCreations() {
genericCreateFunc := k.genericCreateFunc()
k.kubeClient.Fake.PrependReactor("create", "clusterroles", genericCreateFunc)
k.kubeClient.Fake.PrependReactor("create", "clusterrolebindings", genericCreateFunc)
k.kubeClient.Fake.PrependReactor("create", "roles", genericCreateFunc)
k.kubeClient.Fake.PrependReactor("create", "rolebindings", genericCreateFunc)
}
func (k *KubeVirtTestData) shouldExpectJobDeletion() {
k.kubeClient.Fake.PrependReactor("delete", "jobs", k.genericDeleteFunc())
}
func (k *KubeVirtTestData) shouldExpectInstallStrategyDeletion() {
k.kubeClient.Fake.PrependReactor("delete", "configmaps", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
deleted, ok := action.(testing.DeleteAction)
Expect(ok).To(BeTrue())
if deleted.GetName() == "kubevirt-ca" {
return false, nil, nil
}
var key string
if len(deleted.GetNamespace()) > 0 {
key = deleted.GetNamespace() + "/"
}
key += deleted.GetName()
k.deleteResource(deleted.GetResource().Resource, key)
return true, nil, nil
})
}
func (k *KubeVirtTestData) makeApiAndControllerReady() {
makeDeploymentReady := func(item interface{}) {
depl, _ := item.(*appsv1.Deployment)
deplNew := depl.DeepCopy()
var replicas int32 = 1
if depl.Spec.Replicas != nil {
replicas = *depl.Spec.Replicas
}
deplNew.Status.Replicas = replicas
deplNew.Status.ReadyReplicas = replicas
k.deploymentSource.Modify(deplNew)
}
for _, name := range []string{"/virt-api", "/virt-controller"} {
exists := false
var obj interface{}
// we need to wait until the deployment exists
for !exists {
obj, exists, _ = k.controller.stores.DeploymentCache.GetByKey(NAMESPACE + name)
if exists {
makeDeploymentReady(obj)
}
time.Sleep(time.Second)
}
}
k.makePodDisruptionBudgetsReady()
}
func (k *KubeVirtTestData) makePodDisruptionBudgetsReady() {
for _, pdbname := range []string{"/virt-api-pdb", "/virt-controller-pdb"} {
exists := false
// we need to wait until the pdb exists
for !exists {
_, exists, _ = k.stores.PodDisruptionBudgetCache.GetByKey(NAMESPACE + pdbname)
if !exists {
time.Sleep(time.Second)
}
}
}
}
func (k *KubeVirtTestData) makeHandlerReady() {
exists := false
var obj interface{}
// we need to wait until the daemonset exists
for !exists {
obj, exists, _ = k.controller.stores.DaemonSetCache.GetByKey(NAMESPACE + "/virt-handler")
if exists {
handler, _ := obj.(*appsv1.DaemonSet)
handlerNew := handler.DeepCopy()
handlerNew.Status.DesiredNumberScheduled = 1
handlerNew.Status.NumberReady = 1
k.daemonSetSource.Modify(handlerNew)
}
time.Sleep(time.Second)
}
}
func (k *KubeVirtTestData) addDummyValidationWebhook() {
version := fmt.Sprintf("rand-%s", rand.String(10))
registry := fmt.Sprintf("rand-%s", rand.String(10))
config := getConfig(registry, version)
validationWebhook := &admissionregistrationv1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: "virt-operator-tmp-webhook",
},
}
injectMetadata(&validationWebhook.ObjectMeta, config)
k.addValidatingWebhook(validationWebhook, nil)
}
func (k *KubeVirtTestData) addValidatingWebhook(wh *admissionregistrationv1.ValidatingWebhookConfiguration, kv *v1.KubeVirt) {
k.mockQueue.ExpectAdds(1)
if kv != nil {
apply.SetGeneration(&kv.Status.Generations, wh)
}
k.validatingWebhookSource.Add(wh)
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) addInstallStrategy(config *util.KubeVirtDeploymentConfig) {
// install strategy config
resource, _ := install.NewInstallStrategyConfigMap(config, true, NAMESPACE)
resource.Name = fmt.Sprintf("%s-%s", resource.Name, rand.String(10))
injectMetadata(&resource.ObjectMeta, config)
k.addConfigMap(resource)
}
func (k *KubeVirtTestData) addPodDisruptionBudgets(config *util.KubeVirtDeploymentConfig, apiDeployment *appsv1.Deployment, controller *appsv1.Deployment, kv *v1.KubeVirt) {
minAvailable := intstr.FromInt(int(1))
apiPodDisruptionBudget := &policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Namespace: apiDeployment.Namespace,
Name: apiDeployment.Name + "-pdb",
Labels: apiDeployment.Labels,
},
Spec: policyv1beta1.PodDisruptionBudgetSpec{
MinAvailable: &minAvailable,
Selector: apiDeployment.Spec.Selector,
},
}
injectMetadata(&apiPodDisruptionBudget.ObjectMeta, config)
k.addPodDisruptionBudget(apiPodDisruptionBudget, kv)
controllerPodDisruptionBudget := &policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Namespace: controller.Namespace,
Name: controller.Name + "-pdb",
Labels: controller.Labels,
},
Spec: policyv1beta1.PodDisruptionBudgetSpec{
MinAvailable: &minAvailable,
Selector: controller.Spec.Selector,
},
}
injectMetadata(&controllerPodDisruptionBudget.ObjectMeta, config)
k.addPodDisruptionBudget(controllerPodDisruptionBudget, kv)
}
func (k *KubeVirtTestData) fakeNamespaceModificationEvent() {
// Add modification event for namespace w/o the labels we need
k.mockQueue.ExpectAdds(1)
k.namespaceSource.Modify(&k8sv1.Namespace{
TypeMeta: metav1.TypeMeta{
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: NAMESPACE,
},
})
k.mockQueue.Wait()
}
func (k *KubeVirtTestData) shouldExpectNamespacePatch() {
k.kubeClient.Fake.PrependReactor("patch", "namespaces", k.genericPatchFunc())
}
func (k *KubeVirtTestData) addPodsWithIndividualConfigs(config *util.KubeVirtDeploymentConfig,
configController *util.KubeVirtDeploymentConfig,
configHandler *util.KubeVirtDeploymentConfig,
shouldAddPodDisruptionBudgets bool,
kv *v1.KubeVirt) {
// we need at least one active pod for
// virt-api
// virt-controller
// virt-handler
apiDeployment, _ := components.NewApiServerDeployment(NAMESPACE, config.GetImageRegistry(), config.GetImagePrefix(), config.GetApiVersion(), "", "", config.GetImagePullPolicy(), config.GetVerbosity(), config.GetExtraEnv())
pod := &k8sv1.Pod{
ObjectMeta: apiDeployment.Spec.Template.ObjectMeta,
Spec: apiDeployment.Spec.Template.Spec,
Status: k8sv1.PodStatus{
Phase: k8sv1.PodRunning,
ContainerStatuses: []k8sv1.ContainerStatus{
{Ready: true, Name: "somecontainer"},
},
},
}
injectMetadata(&pod.ObjectMeta, config)
pod.Name = "virt-api-xxxx"
k.addPod(pod)
controller, _ := components.NewControllerDeployment(NAMESPACE, configController.GetImageRegistry(), configController.GetImagePrefix(), configController.GetControllerVersion(), configController.GetLauncherVersion(), "", "", configController.GetImagePullPolicy(), configController.GetVerbosity(), configController.GetExtraEnv())
pod = &k8sv1.Pod{
ObjectMeta: controller.Spec.Template.ObjectMeta,
Spec: controller.Spec.Template.Spec,
Status: k8sv1.PodStatus{
Phase: k8sv1.PodRunning,
ContainerStatuses: []k8sv1.ContainerStatus{
{Ready: true, Name: "somecontainer"},
},
},
}
pod.Name = "virt-controller-xxxx"
injectMetadata(&pod.ObjectMeta, configController)
k.addPod(pod)
handler, _ := components.NewHandlerDaemonSet(NAMESPACE, configHandler.GetImageRegistry(), configHandler.GetImagePrefix(), configHandler.GetHandlerVersion(), "", "", configController.GetLauncherVersion(), configHandler.GetImagePullPolicy(), configHandler.GetVerbosity(), configHandler.GetExtraEnv())
pod = &k8sv1.Pod{
ObjectMeta: handler.Spec.Template.ObjectMeta,
Spec: handler.Spec.Template.Spec,
Status: k8sv1.PodStatus{
Phase: k8sv1.PodRunning,
ContainerStatuses: []k8sv1.ContainerStatus{
{Ready: true, Name: "somecontainer"},
},
},
}
injectMetadata(&pod.ObjectMeta, configHandler)
pod.Name = "virt-handler-xxxx"
k.addPod(pod)
if shouldAddPodDisruptionBudgets {
k.addPodDisruptionBudgets(config, apiDeployment, controller, kv)
}
}
func (k *KubeVirtTestData) addPodsWithOptionalPodDisruptionBudgets(config *util.KubeVirtDeploymentConfig, shouldAddPodDisruptionBudgets bool, kv *v1.KubeVirt) {
k.addPodsWithIndividualConfigs(config, config, config, shouldAddPodDisruptionBudgets, kv)
}
func (k *KubeVirtTestData) addPodsAndPodDisruptionBudgets(config *util.KubeVirtDeploymentConfig, kv *v1.KubeVirt) {
k.addPodsWithOptionalPodDisruptionBudgets(config, true, kv)
}
var _ = Describe("KubeVirt Operator", func() {
BeforeEach(func() {
err := os.Setenv(util.OperatorImageEnvName, fmt.Sprintf("%s/virt-operator:%s", "someregistry", "v9.9.9"))
Expect(err).NotTo(HaveOccurred())
})
Context("On valid KubeVirt object", func() {
It("Should not patch kubevirt namespace when labels are already defined", func(done Done) {
defer close(done)
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
// Add fake namespace with labels predefined
err := kvTestData.informers.Namespace.GetStore().Add(&k8sv1.Namespace{
TypeMeta: metav1.TypeMeta{
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: NAMESPACE,
Labels: map[string]string{
"openshift.io/cluster-monitoring": "true",
},
},
})
Expect(err).To(Not(HaveOccurred()), "could not add fake namespace to the store")
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Generation: int64(1),
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeleted,
},
}
// Add kubevirt deployment and mark everything as ready
kvTestData.addKubeVirt(kv)
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.shouldExpectCreations()
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
kvTestData.addAll(kvTestData.defaultConfig, kv)
kvTestData.addPodsAndPodDisruptionBudgets(kvTestData.defaultConfig, kv)
kvTestData.makeHandlerReady()
kvTestData.makeApiAndControllerReady()
kvTestData.makeHandlerReady()
kvTestData.shouldExpectPatchesAndUpdates()
// Now when the controller runs, if the namespace will be patched, the test will fail
// because the patch is not expected here.
kvTestData.controller.Execute()
}, 30)
It("should delete install strategy configmap once kubevirt install is deleted", func(done Done) {
defer close(done)
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeleted,
},
}
kv.DeletionTimestamp = now()
util.UpdateConditionsDeleting(kv)
kvTestData.shouldExpectInstallStrategyDeletion()
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
kvTestData.shouldExpectKubeVirtUpdate(1)
kvTestData.controller.Execute()
kv = kvTestData.getLatestKubeVirt(kv)
Expect(len(kv.ObjectMeta.Finalizers)).To(Equal(0))
}, 30)
It("should observe custom image tag in status during deploy", func(done Done) {
defer close(done)
defer GinkgoRecover()
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
Generation: int64(1),
},
Spec: v1.KubeVirtSpec{
ImageTag: "custom.tag",
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
customConfig := getConfig(kvTestData.defaultConfig.GetImageRegistry(), "custom.tag")
kvTestData.fakeNamespaceModificationEvent()
kvTestData.shouldExpectNamespacePatch()
kvTestData.shouldExpectPatchesAndUpdates()
kvTestData.addAll(customConfig, kv)
// install strategy config
kvTestData.addInstallStrategy(customConfig)
kvTestData.addPodsAndPodDisruptionBudgets(customConfig, kv)
kvTestData.makeApiAndControllerReady()
kvTestData.makeHandlerReady()
kvTestData.shouldExpectKubeVirtUpdateStatusVersion(1, customConfig)
kvTestData.controller.Execute()
kv = kvTestData.getLatestKubeVirt(kv)
shouldExpectHCOConditions(kv, k8sv1.ConditionTrue, k8sv1.ConditionFalse, k8sv1.ConditionFalse)
}, 30)
It("delete temporary validation webhook once virt-api is deployed", func(done Done) {
defer close(done)
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
Generation: int64(1),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
kvTestData.defaultConfig.SetTargetDeploymentConfig(kv)
kvTestData.defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
kvTestData.deleteFromCache = false
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.addDummyValidationWebhook()
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
kvTestData.addAll(kvTestData.defaultConfig, kv)
kvTestData.addPodsAndPodDisruptionBudgets(kvTestData.defaultConfig, kv)
kvTestData.makeApiAndControllerReady()
kvTestData.makeHandlerReady()
kvTestData.shouldExpectDeletions()
kvTestData.fakeNamespaceModificationEvent()
kvTestData.shouldExpectNamespacePatch()
kvTestData.shouldExpectPatchesAndUpdates()
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.controller.Execute()
Expect(kvTestData.totalDeletions).To(Equal(1))
}, 30)
It("should do nothing if KubeVirt object is deployed", func(done Done) {
defer close(done)
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
Generation: int64(1),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
kvTestData.defaultConfig.SetTargetDeploymentConfig(kv)
kvTestData.defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
kvTestData.addAll(kvTestData.defaultConfig, kv)
kvTestData.addPodsAndPodDisruptionBudgets(kvTestData.defaultConfig, kv)
kvTestData.makeApiAndControllerReady()
kvTestData.makeHandlerReady()
kvTestData.fakeNamespaceModificationEvent()
kvTestData.shouldExpectNamespacePatch()
kvTestData.shouldExpectPatchesAndUpdates()
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.controller.Execute()
}, 30)
It("should update KubeVirt object if generation IDs do not match", func(done Done) {
defer close(done)
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
Generation: int64(1),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
kvTestData.defaultConfig.SetTargetDeploymentConfig(kv)
kvTestData.defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
kvTestData.defaultConfig.SetTargetDeploymentConfig(kv)
kvTestData.defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
kvTestData.addAll(kvTestData.defaultConfig, kv)
kvTestData.addPodsAndPodDisruptionBudgets(kvTestData.defaultConfig, kv)
kvTestData.makeApiAndControllerReady()
kvTestData.makeHandlerReady()
kvTestData.fakeNamespaceModificationEvent()
kvTestData.shouldExpectNamespacePatch()
kvTestData.shouldExpectPatchesAndUpdates()
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
// invalidate all lastGeneration versions
numGenerations := len(kv.Status.Generations)
for i := range kv.Status.Generations {
kv.Status.Generations[i].LastGeneration = -1
}
kvTestData.controller.Execute()
// add one for the namespace
Expect(kvTestData.totalPatches).To(Equal(numGenerations + 1))
// all these resources should be tracked by there generation so everyone that has been added should now be patched
// since they where the `lastGeneration` was set to -1 on the KubeVirt CR
Expect(kvTestData.resourceChanges["mutatingwebhookconfigurations"][Patched]).To(Equal(kvTestData.resourceChanges["mutatingwebhookconfigurations"][Added]))
Expect(kvTestData.resourceChanges["validatingwebhookconfigurations"][Patched]).To(Equal(kvTestData.resourceChanges["validatingwebhookconfigurations"][Added]))
Expect(kvTestData.resourceChanges["deployements"][Patched]).To(Equal(kvTestData.resourceChanges["deployements"][Added]))
Expect(kvTestData.resourceChanges["daemonsets"][Patched]).To(Equal(kvTestData.resourceChanges["daemonsets"][Added]))
}, 30)
It("should delete operator managed resources not in the deployed installstrategy", func() {
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
defer GinkgoRecover()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
Generation: int64(1),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
kvTestData.defaultConfig.SetTargetDeploymentConfig(kv)
kvTestData.defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsDeploying(kv)
util.UpdateConditionsCreated(kv)
kvTestData.deleteFromCache = false
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
kvTestData.addAll(kvTestData.defaultConfig, kv)
numResources := kvTestData.generateRandomResources()
kvTestData.addPodsAndPodDisruptionBudgets(kvTestData.defaultConfig, kv)
kvTestData.makeApiAndControllerReady()
kvTestData.makeHandlerReady()
kvTestData.shouldExpectDeletions()
kvTestData.fakeNamespaceModificationEvent()
kvTestData.shouldExpectNamespacePatch()
kvTestData.shouldExpectPatchesAndUpdates()
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.controller.Execute()
Expect(kvTestData.totalDeletions).To(Equal(numResources))
}, 30)
It("should fail if KubeVirt object already exists", func() {
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv1 := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install-1",
Namespace: NAMESPACE,
UID: "11111111111",
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: "v0.0.0-master+$Format:%h$",
},
}
kv2 := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install-2",
Namespace: NAMESPACE,
UID: "123123123",
},
Status: v1.KubeVirtStatus{},
}
kubecontroller.SetLatestApiVersionAnnotation(kv1)
util.UpdateConditionsCreated(kv1)
util.UpdateConditionsAvailable(kv1)
kvTestData.addKubeVirt(kv1)
kubecontroller.SetLatestApiVersionAnnotation(kv2)
kvTestData.addKubeVirt(kv2)
kvTestData.shouldExpectKubeVirtUpdateStatusFailureCondition(util.ConditionReasonDeploymentFailedExisting)
kvTestData.controller.execute(fmt.Sprintf("%s/%s", kv2.Namespace, kv2.Name))
}, 30)
It("should generate install strategy creation job for update version", func(done Done) {
defer close(done)
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
updatedVersion := "1.1.1"
updatedRegistry := "otherregistry"
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedVersion,
ImageRegistry: updatedRegistry,
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
kvTestData.defaultConfig.SetTargetDeploymentConfig(kv)
kvTestData.defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.shouldExpectJobCreation()
kvTestData.controller.Execute()
}, 30)
It("should create an install strategy creation job with passthrough env vars, if provided in config", func(done Done) {
defer close(done)
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
config := getConfig("registry", "v1.1.1")
envKey := rand.String(10)
envVal := rand.String(10)
config.PassthroughEnvVars = map[string]string{envKey: envVal}
job, err := kvTestData.controller.generateInstallStrategyJob(config)
Expect(err).ToNot(HaveOccurred())
Expect(job.Spec.Template.Spec.Containers[0].Env).To(ContainElement(k8sv1.EnvVar{Name: envKey, Value: envVal}))
}, 30)
It("should create an api server deployment with passthrough env vars, if provided in config", func(done Done) {
defer close(done)
config := getConfig("registry", "v1.1.1")
envKey := rand.String(10)
envVal := rand.String(10)
config.PassthroughEnvVars = map[string]string{envKey: envVal}
apiDeployment, err := components.NewApiServerDeployment(NAMESPACE, config.GetImageRegistry(), config.GetImagePrefix(), config.GetApiVersion(), "", "", config.GetImagePullPolicy(), config.GetVerbosity(), config.GetExtraEnv())
Expect(err).ToNot(HaveOccurred())
Expect(apiDeployment.Spec.Template.Spec.Containers[0].Env).To(ContainElement(k8sv1.EnvVar{Name: envKey, Value: envVal}))
}, 30)
It("should create a controller deployment with passthrough env vars, if provided in config", func(done Done) {
defer close(done)
config := getConfig("registry", "v1.1.1")
envKey := rand.String(10)
envVal := rand.String(10)
config.PassthroughEnvVars = map[string]string{envKey: envVal}
controllerDeployment, err := components.NewControllerDeployment(NAMESPACE, config.GetImageRegistry(), config.GetImagePrefix(), config.GetControllerVersion(), config.GetLauncherVersion(), "", "", config.GetImagePullPolicy(), config.GetVerbosity(), config.GetExtraEnv())
Expect(err).ToNot(HaveOccurred())
Expect(controllerDeployment.Spec.Template.Spec.Containers[0].Env).To(ContainElement(k8sv1.EnvVar{Name: envKey, Value: envVal}))
}, 30)
It("should create a handler daemonset with passthrough env vars, if provided in config", func(done Done) {
defer close(done)
config := getConfig("registry", "v1.1.1")
envKey := rand.String(10)
envVal := rand.String(10)
config.PassthroughEnvVars = map[string]string{envKey: envVal}
handlerDaemonset, err := components.NewHandlerDaemonSet(NAMESPACE, config.GetImageRegistry(), config.GetImagePrefix(), config.GetHandlerVersion(), "", "", config.GetLauncherVersion(), config.GetImagePullPolicy(), config.GetVerbosity(), config.GetExtraEnv())
Expect(err).ToNot(HaveOccurred())
Expect(handlerDaemonset.Spec.Template.Spec.Containers[0].Env).To(ContainElement(k8sv1.EnvVar{Name: envKey, Value: envVal}))
}, 30)
It("should generate install strategy creation job if no install strategy exists", func(done Done) {
defer close(done)
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{},
}
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.shouldExpectJobCreation()
kvTestData.controller.Execute()
}, 30)
It("should label install strategy creation job", func(done Done) {
defer close(done)
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{},
}
job, err := kvTestData.controller.generateInstallStrategyJob(util.GetTargetConfigFromKV(kv))
Expect(err).ToNot(HaveOccurred())
Expect(job.Spec.Template.ObjectMeta.Labels).Should(HaveKeyWithValue(v1.AppLabel, virtOperatorJobAppLabel))
}, 30)
It("should delete install strategy creation job if job has failed", func(done Done) {
defer close(done)
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{},
}
job, err := kvTestData.controller.generateInstallStrategyJob(util.GetTargetConfigFromKV(kv))
Expect(err).ToNot(HaveOccurred())
// will only create a new job after 10 seconds has passed.
// this is just a simple mechanism to prevent spin loops
// in the event that jobs are fast failing for some unknown reason.
completionTime := time.Now().Add(time.Duration(-10) * time.Second)
job.Status.CompletionTime = &metav1.Time{Time: completionTime}
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.addInstallStrategyJob(job)
kvTestData.shouldExpectJobDeletion()
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.controller.Execute()
}, 30)
It("should not delete completed install strategy creation job if job has failed less that 10 seconds ago", func(done Done) {
defer GinkgoRecover()
defer close(done)
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{},
}
job, err := kvTestData.controller.generateInstallStrategyJob(util.GetTargetConfigFromKV(kv))
Expect(err).ToNot(HaveOccurred())
job.Status.CompletionTime = now()
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.addInstallStrategyJob(job)
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.controller.Execute()
}, 30)
It("should add resources on create", func() {
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
},
}
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
job, err := kvTestData.controller.generateInstallStrategyJob(util.GetTargetConfigFromKV(kv))
Expect(err).ToNot(HaveOccurred())
job.Status.CompletionTime = now()
kvTestData.addInstallStrategyJob(job)
// ensure completed jobs are garbage collected once install strategy
// is loaded
kvTestData.deleteFromCache = false
kvTestData.shouldExpectJobDeletion()
kvTestData.shouldExpectKubeVirtUpdate(1)
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.shouldExpectCreations()
kvTestData.controller.Execute()
kv = kvTestData.getLatestKubeVirt(kv)
Expect(kv.Status.Phase).To(Equal(v1.KubeVirtPhaseDeploying))
Expect(len(kv.Status.Conditions)).To(Equal(3))
Expect(len(kv.ObjectMeta.Finalizers)).To(Equal(1))
shouldExpectHCOConditions(kv, k8sv1.ConditionFalse, k8sv1.ConditionTrue, k8sv1.ConditionFalse)
// 3 in total are yet missing at this point
// because waiting on controller, controller's PDB and virt-handler daemonset until API server deploys successfully
expectedUncreatedResources := 3
// 1 because a temporary validation webhook is created to block new CRDs until api server is deployed
expectedTemporaryResources := 1
Expect(kvTestData.totalAdds).To(Equal(resourceCount - expectedUncreatedResources + expectedTemporaryResources))
Expect(len(kvTestData.controller.stores.ServiceAccountCache.List())).To(Equal(3))
Expect(len(kvTestData.controller.stores.ClusterRoleCache.List())).To(Equal(7))
Expect(len(kvTestData.controller.stores.ClusterRoleBindingCache.List())).To(Equal(5))
Expect(len(kvTestData.controller.stores.RoleCache.List())).To(Equal(3))
Expect(len(kvTestData.controller.stores.RoleBindingCache.List())).To(Equal(3))
Expect(len(kvTestData.controller.stores.CrdCache.List())).To(Equal(8))
Expect(len(kvTestData.controller.stores.ServiceCache.List())).To(Equal(3))
Expect(len(kvTestData.controller.stores.DeploymentCache.List())).To(Equal(1))
Expect(len(kvTestData.controller.stores.DaemonSetCache.List())).To(Equal(0))
Expect(len(kvTestData.controller.stores.ValidationWebhookCache.List())).To(Equal(3))
Expect(len(kvTestData.controller.stores.PodDisruptionBudgetCache.List())).To(Equal(1))
Expect(len(kvTestData.controller.stores.SCCCache.List())).To(Equal(3))
Expect(len(kvTestData.controller.stores.ServiceMonitorCache.List())).To(Equal(1))
Expect(len(kvTestData.controller.stores.PrometheusRuleCache.List())).To(Equal(1))
Expect(kvTestData.resourceChanges["poddisruptionbudgets"][Added]).To(Equal(1))
}, 30)
Context("when the monitor namespace does not exist", func() {
It("should not create ServiceMonitor resources", func() {
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
}
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
// install strategy config
resource, _ := install.NewInstallStrategyConfigMap(kvTestData.defaultConfig, false, NAMESPACE)
resource.Name = fmt.Sprintf("%s-%s", resource.Name, rand.String(10))
kvTestData.addResource(resource, kvTestData.defaultConfig, nil)
job, err := kvTestData.controller.generateInstallStrategyJob(util.GetTargetConfigFromKV(kv))
Expect(err).ToNot(HaveOccurred())
job.Status.CompletionTime = now()
kvTestData.addInstallStrategyJob(job)
// ensure completed jobs are garbage collected once install strategy
// is loaded
kvTestData.deleteFromCache = false
kvTestData.shouldExpectJobDeletion()
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.shouldExpectCreations()
kvTestData.controller.Execute()
Expect(len(kvTestData.controller.stores.RoleCache.List())).To(Equal(2))
Expect(len(kvTestData.controller.stores.RoleBindingCache.List())).To(Equal(2))
Expect(len(kvTestData.controller.stores.ServiceMonitorCache.List())).To(Equal(0))
}, 30)
})
It("should pause rollback until api server is rolled over.", func(done Done) {
defer close(done)
defer GinkgoRecover()
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
rollbackConfig := getConfig("otherregistry", "9.9.7")
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: rollbackConfig.GetKubeVirtVersion(),
ImageRegistry: rollbackConfig.GetImageRegistry(),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
kvTestData.defaultConfig.SetTargetDeploymentConfig(kv)
kvTestData.defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
kvTestData.addInstallStrategy(rollbackConfig)
kvTestData.addAll(kvTestData.defaultConfig, kv)
kvTestData.addPodsAndPodDisruptionBudgets(kvTestData.defaultConfig, kv)
kvTestData.makeApiAndControllerReady()
kvTestData.makeHandlerReady()
kvTestData.addToCache = false
kvTestData.shouldExpectRbacBackupCreations()
kvTestData.shouldExpectPatchesAndUpdates()
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.controller.Execute()
kv = kvTestData.getLatestKubeVirt(kv)
// conditions should reflect an ongoing update
shouldExpectHCOConditions(kv, k8sv1.ConditionTrue, k8sv1.ConditionTrue, k8sv1.ConditionTrue)
// on rollback or create, api server must be online first before controllers and daemonset.
// On rollback this prevents someone from posting invalid specs to
// the cluster from newer versions when an older version is being deployed.
// On create this prevents invalid specs from entering the cluster
// while controllers are available to process them.
// 4 because 2 for virt-controller service and deployment,
// 1 because of the pdb of virt-controller
// and another 1 because of the namespace was not patched yet.
Expect(kvTestData.totalPatches).To(Equal(patchCount - 4))
// 2 for virt-controller and pdb
Expect(kvTestData.totalUpdates).To(Equal(updateCount))
Expect(kvTestData.resourceChanges["poddisruptionbudgets"][Patched]).To(Equal(1))
}, 30)
It("should pause update after daemonsets are rolled over", func(done Done) {
defer close(done)
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
updatedConfig := getConfig("otherregistry", "9.9.10")
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedConfig.GetKubeVirtVersion(),
ImageRegistry: updatedConfig.GetImageRegistry(),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
kvTestData.defaultConfig.SetTargetDeploymentConfig(kv)
kvTestData.defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
kvTestData.addInstallStrategy(updatedConfig)
kvTestData.addAll(kvTestData.defaultConfig, kv)
kvTestData.addPodsAndPodDisruptionBudgets(kvTestData.defaultConfig, kv)
kvTestData.makeApiAndControllerReady()
kvTestData.makeHandlerReady()
kvTestData.addToCache = false
kvTestData.shouldExpectRbacBackupCreations()
kvTestData.shouldExpectPatchesAndUpdates()
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.controller.Execute()
kv = kvTestData.getLatestKubeVirt(kv)
// conditions should reflect an ongoing update
shouldExpectHCOConditions(kv, k8sv1.ConditionTrue, k8sv1.ConditionTrue, k8sv1.ConditionTrue)
Expect(kvTestData.totalUpdates).To(Equal(updateCount))
// daemonset, controller and apiserver pods are updated in this order.
// this prevents the new API from coming online until the controllers can manage it.
// The PDBs will prevent updated pods from getting "ready", so update should pause after
// daemonsets and before controller and namespace
// 5 because virt-controller, virt-api, PDBs and the namespace are not patched
Expect(kvTestData.totalPatches).To(Equal(patchCount - 5))
// Make sure the 5 unpatched are as expected
Expect(kvTestData.resourceChanges["deployments"][Patched]).To(Equal(0)) // virt-controller and virt-api unpatched
Expect(kvTestData.resourceChanges["poddisruptionbudgets"][Patched]).To(Equal(0)) // PDBs unpatched
Expect(kvTestData.resourceChanges["namespace"][Patched]).To(Equal(0)) // namespace unpatched
}, 30)
It("should pause update after controllers are rolled over", func(done Done) {
defer close(done)
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
updatedConfig := getConfig("otherregistry", "9.9.10")
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedConfig.GetKubeVirtVersion(),
ImageRegistry: updatedConfig.GetImageRegistry(),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
kvTestData.defaultConfig.SetTargetDeploymentConfig(kv)
kvTestData.defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
kvTestData.addInstallStrategy(updatedConfig)
kvTestData.addAll(kvTestData.defaultConfig, kv)
// Create virt-api and virt-controller under kvTestData.defaultConfig,
// but use updatedConfig for virt-handler (hack) to avoid pausing after daemonsets
kvTestData.addPodsWithIndividualConfigs(kvTestData.defaultConfig, kvTestData.defaultConfig, updatedConfig, true, kv)
kvTestData.makeApiAndControllerReady()
kvTestData.makeHandlerReady()
kvTestData.addToCache = false
kvTestData.shouldExpectRbacBackupCreations()
kvTestData.shouldExpectPatchesAndUpdates()
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.controller.Execute()
kv = kvTestData.getLatestKubeVirt(kv)
// conditions should reflect an ongoing update
shouldExpectHCOConditions(kv, k8sv1.ConditionTrue, k8sv1.ConditionTrue, k8sv1.ConditionTrue)
Expect(kvTestData.totalUpdates).To(Equal(updateCount))
// The update was hacked to avoid pausing after rolling out the daemonsets (virt-handler)
// That will allow both daemonset and controller pods to get patched before the pause.
// 3 because virt-api, PDB and the namespace should not be patched
Expect(kvTestData.totalPatches).To(Equal(patchCount - 3))
// Make sure the 3 unpatched are as expected
Expect(kvTestData.resourceChanges["deployments"][Patched]).To(Equal(1)) // virt-operator patched, virt-api unpatched
Expect(kvTestData.resourceChanges["poddisruptionbudgets"][Patched]).To(Equal(1)) // 1 of 2 PDBs patched
Expect(kvTestData.resourceChanges["namespace"][Patched]).To(Equal(0)) // namespace unpatched
}, 30)
It("should update kubevirt resources when Operator version changes if no imageTag and imageRegistry is explicitly set.", func() {
os.Setenv(util.OperatorImageEnvName, fmt.Sprintf("%s/virt-operator:%s", "otherregistry", "1.1.1"))
updatedConfig := getConfig("", "")
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
kvTestData.defaultConfig.SetTargetDeploymentConfig(kv)
kvTestData.defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
kvTestData.addInstallStrategy(updatedConfig)
kvTestData.addAll(kvTestData.defaultConfig, kv)
kvTestData.addPodsAndPodDisruptionBudgets(kvTestData.defaultConfig, kv)
// pods for the new version are added so this test won't
// wait for daemonsets to rollover before updating/patching
// all resources.
kvTestData.addPodsWithOptionalPodDisruptionBudgets(updatedConfig, false, kv)
kvTestData.makeApiAndControllerReady()
kvTestData.makeHandlerReady()
kvTestData.shouldExpectPatchesAndUpdates()
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.fakeNamespaceModificationEvent()
kvTestData.shouldExpectNamespacePatch()
kvTestData.controller.Execute()
kv = kvTestData.getLatestKubeVirt(kv)
// conditions should reflect a successful update
shouldExpectHCOConditions(kv, k8sv1.ConditionTrue, k8sv1.ConditionFalse, k8sv1.ConditionFalse)
Expect(kvTestData.totalPatches).To(Equal(patchCount))
Expect(kvTestData.totalUpdates).To(Equal(updateCount))
// ensure every resource is either patched or updated
// + 1 is for the namespace patch which we don't consider as a resource we own.
Expect(kvTestData.totalUpdates + kvTestData.totalPatches).To(Equal(resourceCount + 1))
Expect(kvTestData.resourceChanges["poddisruptionbudgets"][Patched]).To(Equal(2))
}, 30)
It("should update resources when changing KubeVirt version.", func() {
updatedConfig := getConfig("otherregistry", "1.1.1")
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedConfig.GetKubeVirtVersion(),
ImageRegistry: updatedConfig.GetImageRegistry(),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
OperatorVersion: version.Get().String(),
},
}
kvTestData.defaultConfig.SetTargetDeploymentConfig(kv)
kvTestData.defaultConfig.SetObservedDeploymentConfig(kv)
util.UpdateConditionsCreated(kv)
util.UpdateConditionsAvailable(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
kvTestData.addInstallStrategy(updatedConfig)
kvTestData.addAll(kvTestData.defaultConfig, kv)
kvTestData.addPodsAndPodDisruptionBudgets(kvTestData.defaultConfig, kv)
// pods for the new version are added so this test won't
// wait for daemonsets to rollover before updating/patching
// all resources.
kvTestData.addPodsWithOptionalPodDisruptionBudgets(updatedConfig, false, kv)
kvTestData.makeApiAndControllerReady()
kvTestData.makeHandlerReady()
kvTestData.shouldExpectPatchesAndUpdates()
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.fakeNamespaceModificationEvent()
kvTestData.shouldExpectNamespacePatch()
kvTestData.controller.Execute()
kv = kvTestData.getLatestKubeVirt(kv)
// conditions should reflect a successful update
shouldExpectHCOConditions(kv, k8sv1.ConditionTrue, k8sv1.ConditionFalse, k8sv1.ConditionFalse)
Expect(kvTestData.totalPatches).To(Equal(patchCount))
Expect(kvTestData.totalUpdates).To(Equal(updateCount))
// ensure every resource is either patched or updated
// + 1 is for the namespace patch which we don't consider as a resource we own.
Expect(kvTestData.totalUpdates + kvTestData.totalPatches).To(Equal(resourceCount + 1))
}, 30)
It("should patch poddisruptionbudgets when changing KubeVirt version.", func() {
updatedConfig := getConfig("otherregistry", "1.1.1")
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedConfig.GetKubeVirtVersion(),
ImageRegistry: updatedConfig.GetImageRegistry(),
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: util.ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionAvailable,
Status: k8sv1.ConditionTrue,
Reason: util.ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: version.Get().String(),
},
}
kvTestData.defaultConfig.SetTargetDeploymentConfig(kv)
kvTestData.defaultConfig.SetObservedDeploymentConfig(kv)
// create all resources which should already exist
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
kvTestData.addInstallStrategy(updatedConfig)
kvTestData.addAll(kvTestData.defaultConfig, kv)
kvTestData.addPodsAndPodDisruptionBudgets(kvTestData.defaultConfig, kv)
// pods for the new version are added so this test won't
// wait for daemonsets to rollover before updating/patching
// all resources.
kvTestData.addPodsWithOptionalPodDisruptionBudgets(updatedConfig, false, kv)
kvTestData.makeApiAndControllerReady()
kvTestData.makeHandlerReady()
kvTestData.shouldExpectPatchesAndUpdates()
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.controller.Execute()
Expect(kvTestData.resourceChanges["poddisruptionbudgets"][Patched]).To(Equal(2))
}, 30)
It("should remove resources on deletion", func() {
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
},
}
kv.DeletionTimestamp = now()
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
// create all resources which should be deleted
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
kvTestData.addAll(kvTestData.defaultConfig, kv)
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.shouldExpectDeletions()
kvTestData.shouldExpectInstallStrategyDeletion()
kvTestData.controller.Execute()
// Note: in real life during the first execution loop very probably only CRDs are deleted,
// because that takes some time (see the check that the crd store is empty before going on with deletions)
// But in this test the deletion succeeds immediately, so everything is deleted on first try
Expect(kvTestData.totalDeletions).To(Equal(resourceCount))
kv = kvTestData.getLatestKubeVirt(kv)
Expect(kv.Status.Phase).To(Equal(v1.KubeVirtPhaseDeleted))
Expect(len(kv.Status.Conditions)).To(Equal(3))
shouldExpectHCOConditions(kv, k8sv1.ConditionFalse, k8sv1.ConditionFalse, k8sv1.ConditionTrue)
}, 30)
It("should remove poddisruptionbudgets on deletion", func() {
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
},
}
kv.DeletionTimestamp = now()
kubecontroller.SetLatestApiVersionAnnotation(kv)
kvTestData.addKubeVirt(kv)
// create all resources which should be deleted
kvTestData.addInstallStrategy(kvTestData.defaultConfig)
kvTestData.addAll(kvTestData.defaultConfig, kv)
kvTestData.shouldExpectKubeVirtUpdateStatus(1)
kvTestData.shouldExpectDeletions()
kvTestData.shouldExpectInstallStrategyDeletion()
kvTestData.controller.Execute()
Expect(kvTestData.resourceChanges["poddisruptionbudgets"][Deleted]).To(Equal(2))
}, 30)
})
Context("On install strategy dump", func() {
It("should generate latest install strategy and post as config map", func(done Done) {
defer close(done)
kvTestData := KubeVirtTestData{}
kvTestData.BeforeTest()
defer kvTestData.AfterTest()
config, err := util.GetConfigFromEnv()
Expect(err).ToNot(HaveOccurred())
kvTestData.kubeClient.Fake.PrependReactor("create", "configmaps", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
create, ok := action.(testing.CreateAction)
Expect(ok).To(BeTrue())
configMap := create.GetObject().(*k8sv1.ConfigMap)
Expect(configMap.GenerateName).To(Equal("kubevirt-install-strategy-"))
version, ok := configMap.ObjectMeta.Annotations[v1.InstallStrategyVersionAnnotation]
Expect(ok).To(BeTrue())
Expect(version).To(Equal(config.GetKubeVirtVersion()))
registry, ok := configMap.ObjectMeta.Annotations[v1.InstallStrategyRegistryAnnotation]
Expect(ok).To(BeTrue())
Expect(registry).To(Equal(config.GetImageRegistry()))
id, ok := configMap.ObjectMeta.Annotations[v1.InstallStrategyIdentifierAnnotation]
Expect(ok).To(BeTrue())
Expect(id).To(Equal(config.GetDeploymentID()))
_, ok = configMap.Data["manifests"]
Expect(ok).To(BeTrue())
return true, create.GetObject(), nil
})
// This generates and posts the install strategy config map
install.DumpInstallStrategyToConfigMap(kvTestData.virtClient, NAMESPACE)
}, 30)
})
})
func now() *metav1.Time {
now := metav1.Now()
return &now
}
func getSCC() secv1.SecurityContextConstraints {
return secv1.SecurityContextConstraints{
ObjectMeta: metav1.ObjectMeta{
Name: "privileged",
},
Users: []string{
"someUser",
},
}
}
func getConfig(registry, version string) *util.KubeVirtDeploymentConfig {
return util.GetTargetConfigFromKV(&v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Namespace: NAMESPACE,
},
Spec: v1.KubeVirtSpec{
ImageRegistry: registry,
ImageTag: version,
},
})
}
func syncCaches(stop chan struct{}, kvInformer cache.SharedIndexInformer, informers util.Informers) {
go kvInformer.Run(stop)
go informers.ServiceAccount.Run(stop)
go informers.ClusterRole.Run(stop)
go informers.ClusterRoleBinding.Run(stop)
go informers.Role.Run(stop)
go informers.RoleBinding.Run(stop)
go informers.Crd.Run(stop)
go informers.Service.Run(stop)
go informers.Deployment.Run(stop)
go informers.DaemonSet.Run(stop)
go informers.ValidationWebhook.Run(stop)
go informers.MutatingWebhook.Run(stop)
go informers.APIService.Run(stop)
go informers.SCC.Run(stop)
go informers.InstallStrategyJob.Run(stop)
go informers.InstallStrategyConfigMap.Run(stop)
go informers.InfrastructurePod.Run(stop)
go informers.PodDisruptionBudget.Run(stop)
go informers.ServiceMonitor.Run(stop)
go informers.Namespace.Run(stop)
go informers.PrometheusRule.Run(stop)
go informers.Secrets.Run(stop)
go informers.ConfigMap.Run(stop)
Expect(cache.WaitForCacheSync(stop, kvInformer.HasSynced)).To(BeTrue())
cache.WaitForCacheSync(stop, informers.ServiceAccount.HasSynced)
cache.WaitForCacheSync(stop, informers.ClusterRole.HasSynced)
cache.WaitForCacheSync(stop, informers.ClusterRoleBinding.HasSynced)
cache.WaitForCacheSync(stop, informers.Role.HasSynced)
cache.WaitForCacheSync(stop, informers.RoleBinding.HasSynced)
cache.WaitForCacheSync(stop, informers.Crd.HasSynced)
cache.WaitForCacheSync(stop, informers.Service.HasSynced)
cache.WaitForCacheSync(stop, informers.Deployment.HasSynced)
cache.WaitForCacheSync(stop, informers.DaemonSet.HasSynced)
cache.WaitForCacheSync(stop, informers.ValidationWebhook.HasSynced)
cache.WaitForCacheSync(stop, informers.MutatingWebhook.HasSynced)
cache.WaitForCacheSync(stop, informers.APIService.HasSynced)
cache.WaitForCacheSync(stop, informers.SCC.HasSynced)
cache.WaitForCacheSync(stop, informers.InstallStrategyJob.HasSynced)
cache.WaitForCacheSync(stop, informers.InstallStrategyConfigMap.HasSynced)
cache.WaitForCacheSync(stop, informers.InfrastructurePod.HasSynced)
cache.WaitForCacheSync(stop, informers.PodDisruptionBudget.HasSynced)
cache.WaitForCacheSync(stop, informers.ServiceMonitor.HasSynced)
cache.WaitForCacheSync(stop, informers.Namespace.HasSynced)
cache.WaitForCacheSync(stop, informers.PrometheusRule.HasSynced)
cache.WaitForCacheSync(stop, informers.Secrets.HasSynced)
cache.WaitForCacheSync(stop, informers.ConfigMap.HasSynced)
}
func injectMetadata(objectMeta *metav1.ObjectMeta, config *util.KubeVirtDeploymentConfig) {
if config == nil {
return
}
if objectMeta.Labels == nil {
objectMeta.Labels = make(map[string]string)
}
objectMeta.Labels[v1.ManagedByLabel] = v1.ManagedByLabelOperatorValue
if config.GetProductVersion() != "" {
objectMeta.Labels[v1.AppVersionLabel] = config.GetProductVersion()
}
if config.GetProductName() != "" {
objectMeta.Labels[v1.AppPartOfLabel] = config.GetProductName()
}
if objectMeta.Annotations == nil {
objectMeta.Annotations = make(map[string]string)
}
objectMeta.Annotations[v1.InstallStrategyVersionAnnotation] = config.GetKubeVirtVersion()
objectMeta.Annotations[v1.InstallStrategyRegistryAnnotation] = config.GetImageRegistry()
objectMeta.Annotations[v1.InstallStrategyIdentifierAnnotation] = config.GetDeploymentID()
objectMeta.Annotations[v1.KubeVirtGenerationAnnotation] = "1"
objectMeta.Labels[v1.AppComponentLabel] = v1.AppComponent
}
func shouldExpectHCOConditions(kv *v1.KubeVirt, available k8sv1.ConditionStatus, progressing k8sv1.ConditionStatus, degraded k8sv1.ConditionStatus) {
getType := func(c v1.KubeVirtCondition) v1.KubeVirtConditionType { return c.Type }
getStatus := func(c v1.KubeVirtCondition) k8sv1.ConditionStatus { return c.Status }
Expect(kv.Status.Conditions).To(ContainElement(
And(
WithTransform(getType, Equal(v1.KubeVirtConditionAvailable)),
WithTransform(getStatus, Equal(available)),
),
))
Expect(kv.Status.Conditions).To(ContainElement(
And(
WithTransform(getType, Equal(v1.KubeVirtConditionProgressing)),
WithTransform(getStatus, Equal(progressing)),
),
))
Expect(kv.Status.Conditions).To(ContainElement(
And(
WithTransform(getType, Equal(v1.KubeVirtConditionDegraded)),
WithTransform(getStatus, Equal(degraded)),
),
))
}
|
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2017 Red Hat, Inc.
*
*/
package virt_operator
import (
"encoding/json"
"fmt"
"os"
"time"
"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
secv1 "github.com/openshift/api/security/v1"
secv1fake "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
k8sv1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
extv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
extclientfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
framework "k8s.io/client-go/tools/cache/testing"
"k8s.io/client-go/tools/record"
v1 "kubevirt.io/kubevirt/pkg/api/v1"
"kubevirt.io/kubevirt/pkg/kubecli"
"kubevirt.io/kubevirt/pkg/log"
"kubevirt.io/kubevirt/pkg/testutils"
"kubevirt.io/kubevirt/pkg/version"
"kubevirt.io/kubevirt/pkg/virt-operator/creation/components"
"kubevirt.io/kubevirt/pkg/virt-operator/creation/rbac"
installstrategy "kubevirt.io/kubevirt/pkg/virt-operator/install-strategy"
"kubevirt.io/kubevirt/pkg/virt-operator/util"
)
var _ = Describe("KubeVirt Operator", func() {
log.Log.SetIOWriter(GinkgoWriter)
var ctrl *gomock.Controller
var kvInterface *kubecli.MockKubeVirtInterface
var kvSource *framework.FakeControllerSource
var kvInformer cache.SharedIndexInformer
var serviceAccountSource *framework.FakeControllerSource
var clusterRoleSource *framework.FakeControllerSource
var clusterRoleBindingSource *framework.FakeControllerSource
var roleSource *framework.FakeControllerSource
var roleBindingSource *framework.FakeControllerSource
var crdSource *framework.FakeControllerSource
var serviceSource *framework.FakeControllerSource
var deploymentSource *framework.FakeControllerSource
var daemonSetSource *framework.FakeControllerSource
var sccSource *framework.FakeControllerSource
var installStrategyConfigMapSource *framework.FakeControllerSource
var installStrategyJobSource *framework.FakeControllerSource
var infrastructurePodSource *framework.FakeControllerSource
var stop chan struct{}
var controller *KubeVirtController
var recorder *record.FakeRecorder
var mockQueue *testutils.MockWorkQueue
var virtClient *kubecli.MockKubevirtClient
var kubeClient *fake.Clientset
var secClient *secv1fake.FakeSecurityV1
var extClient *extclientfake.Clientset
var informers util.Informers
var stores util.Stores
defaultImageTag := "v9.9.9"
defaultRegistry := "someregistry"
os.Setenv(util.OperatorImageEnvName, fmt.Sprintf("%s/virt-operator:%s", defaultRegistry, defaultImageTag))
var totalAdds int
var totalUpdates int
var totalPatches int
var totalDeletions int
NAMESPACE := "kubevirt-test"
resourceCount := 29
patchCount := 13
updateCount := 16
deleteFromCache := true
syncCaches := func(stop chan struct{}) {
go kvInformer.Run(stop)
go informers.ServiceAccount.Run(stop)
go informers.ClusterRole.Run(stop)
go informers.ClusterRoleBinding.Run(stop)
go informers.Role.Run(stop)
go informers.RoleBinding.Run(stop)
go informers.Crd.Run(stop)
go informers.Service.Run(stop)
go informers.Deployment.Run(stop)
go informers.DaemonSet.Run(stop)
go informers.SCC.Run(stop)
go informers.InstallStrategyJob.Run(stop)
go informers.InstallStrategyConfigMap.Run(stop)
go informers.InfrastructurePod.Run(stop)
Expect(cache.WaitForCacheSync(stop, kvInformer.HasSynced)).To(BeTrue())
cache.WaitForCacheSync(stop, informers.ServiceAccount.HasSynced)
cache.WaitForCacheSync(stop, informers.ClusterRole.HasSynced)
cache.WaitForCacheSync(stop, informers.ClusterRoleBinding.HasSynced)
cache.WaitForCacheSync(stop, informers.Role.HasSynced)
cache.WaitForCacheSync(stop, informers.RoleBinding.HasSynced)
cache.WaitForCacheSync(stop, informers.Crd.HasSynced)
cache.WaitForCacheSync(stop, informers.Service.HasSynced)
cache.WaitForCacheSync(stop, informers.Deployment.HasSynced)
cache.WaitForCacheSync(stop, informers.DaemonSet.HasSynced)
cache.WaitForCacheSync(stop, informers.SCC.HasSynced)
cache.WaitForCacheSync(stop, informers.InstallStrategyJob.HasSynced)
cache.WaitForCacheSync(stop, informers.InstallStrategyConfigMap.HasSynced)
cache.WaitForCacheSync(stop, informers.InfrastructurePod.HasSynced)
}
getSCC := func() secv1.SecurityContextConstraints {
return secv1.SecurityContextConstraints{
ObjectMeta: metav1.ObjectMeta{
Name: "privileged",
},
Users: []string{
"someUser",
},
}
}
BeforeEach(func() {
totalAdds = 0
totalUpdates = 0
totalPatches = 0
totalDeletions = 0
deleteFromCache = true
stop = make(chan struct{})
ctrl = gomock.NewController(GinkgoT())
virtClient = kubecli.NewMockKubevirtClient(ctrl)
kvInterface = kubecli.NewMockKubeVirtInterface(ctrl)
kvInformer, kvSource = testutils.NewFakeInformerFor(&v1.KubeVirt{})
recorder = record.NewFakeRecorder(100)
informers.ServiceAccount, serviceAccountSource = testutils.NewFakeInformerFor(&k8sv1.ServiceAccount{})
stores.ServiceAccountCache = informers.ServiceAccount.GetStore()
informers.ClusterRole, clusterRoleSource = testutils.NewFakeInformerFor(&rbacv1.ClusterRole{})
stores.ClusterRoleCache = informers.ClusterRole.GetStore()
informers.ClusterRoleBinding, clusterRoleBindingSource = testutils.NewFakeInformerFor(&rbacv1.ClusterRoleBinding{})
stores.ClusterRoleBindingCache = informers.ClusterRoleBinding.GetStore()
informers.Role, roleSource = testutils.NewFakeInformerFor(&rbacv1.Role{})
stores.RoleCache = informers.Role.GetStore()
informers.RoleBinding, roleBindingSource = testutils.NewFakeInformerFor(&rbacv1.RoleBinding{})
stores.RoleBindingCache = informers.RoleBinding.GetStore()
informers.Crd, crdSource = testutils.NewFakeInformerFor(&extv1beta1.CustomResourceDefinition{})
stores.CrdCache = informers.Crd.GetStore()
informers.Service, serviceSource = testutils.NewFakeInformerFor(&k8sv1.Service{})
stores.ServiceCache = informers.Service.GetStore()
informers.Deployment, deploymentSource = testutils.NewFakeInformerFor(&appsv1.Deployment{})
stores.DeploymentCache = informers.Deployment.GetStore()
informers.DaemonSet, daemonSetSource = testutils.NewFakeInformerFor(&appsv1.DaemonSet{})
stores.DaemonSetCache = informers.DaemonSet.GetStore()
informers.SCC, sccSource = testutils.NewFakeInformerFor(&secv1.SecurityContextConstraints{})
stores.SCCCache = informers.SCC.GetStore()
informers.InstallStrategyConfigMap, installStrategyConfigMapSource = testutils.NewFakeInformerFor(&k8sv1.ConfigMap{})
stores.InstallStrategyConfigMapCache = informers.InstallStrategyConfigMap.GetStore()
informers.InstallStrategyJob, installStrategyJobSource = testutils.NewFakeInformerFor(&batchv1.Job{})
stores.InstallStrategyJobCache = informers.InstallStrategyJob.GetStore()
informers.InfrastructurePod, infrastructurePodSource = testutils.NewFakeInformerFor(&k8sv1.Pod{})
stores.InfrastructurePodCache = informers.InfrastructurePod.GetStore()
controller = NewKubeVirtController(virtClient, kvInformer, recorder, stores, informers)
// Wrap our workqueue to have a way to detect when we are done processing updates
mockQueue = testutils.NewMockWorkQueue(controller.queue)
controller.queue = mockQueue
// Set up mock client
virtClient.EXPECT().KubeVirt(NAMESPACE).Return(kvInterface).AnyTimes()
kubeClient = fake.NewSimpleClientset()
secClient = &secv1fake.FakeSecurityV1{
Fake: &fake.NewSimpleClientset().Fake,
}
extClient = extclientfake.NewSimpleClientset()
virtClient.EXPECT().CoreV1().Return(kubeClient.CoreV1()).AnyTimes()
virtClient.EXPECT().BatchV1().Return(kubeClient.BatchV1()).AnyTimes()
virtClient.EXPECT().RbacV1().Return(kubeClient.RbacV1()).AnyTimes()
virtClient.EXPECT().AppsV1().Return(kubeClient.AppsV1()).AnyTimes()
virtClient.EXPECT().SecClient().Return(secClient).AnyTimes()
virtClient.EXPECT().ExtensionsClient().Return(extClient).AnyTimes()
// Make sure that all unexpected calls to kubeClient will fail
kubeClient.Fake.PrependReactor("*", "*", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
Expect(action).To(BeNil())
return true, nil, nil
})
secClient.Fake.PrependReactor("*", "*", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
Expect(action).To(BeNil())
return true, nil, nil
})
extClient.Fake.PrependReactor("*", "*", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
Expect(action).To(BeNil())
return true, nil, nil
})
syncCaches(stop)
// add the privileged SCC without KubeVirt accounts
scc := getSCC()
sccSource.Add(&scc)
})
AfterEach(func() {
close(stop)
// Ensure that we add checks for expected events to every test
Expect(recorder.Events).To(BeEmpty())
ctrl.Finish()
})
injectMetadata := func(objectMeta *metav1.ObjectMeta, version string, registry string) {
if version == "" && registry == "" {
return
}
if objectMeta.Labels == nil {
objectMeta.Labels = make(map[string]string)
}
objectMeta.Labels[v1.ManagedByLabel] = v1.ManagedByLabelOperatorValue
if objectMeta.Annotations == nil {
objectMeta.Annotations = make(map[string]string)
}
objectMeta.Annotations[v1.InstallStrategyVersionAnnotation] = version
objectMeta.Annotations[v1.InstallStrategyRegistryAnnotation] = registry
}
addKubeVirt := func(kv *v1.KubeVirt) {
mockQueue.ExpectAdds(1)
kvSource.Add(kv)
mockQueue.Wait()
}
addServiceAccount := func(sa *k8sv1.ServiceAccount) {
mockQueue.ExpectAdds(1)
serviceAccountSource.Add(sa)
mockQueue.Wait()
}
addClusterRole := func(cr *rbacv1.ClusterRole) {
mockQueue.ExpectAdds(1)
clusterRoleSource.Add(cr)
mockQueue.Wait()
}
addClusterRoleBinding := func(crb *rbacv1.ClusterRoleBinding) {
mockQueue.ExpectAdds(1)
clusterRoleBindingSource.Add(crb)
mockQueue.Wait()
}
addRole := func(role *rbacv1.Role) {
mockQueue.ExpectAdds(1)
roleSource.Add(role)
mockQueue.Wait()
}
addRoleBinding := func(rb *rbacv1.RoleBinding) {
mockQueue.ExpectAdds(1)
roleBindingSource.Add(rb)
mockQueue.Wait()
}
addCrd := func(crd *extv1beta1.CustomResourceDefinition) {
mockQueue.ExpectAdds(1)
crdSource.Add(crd)
mockQueue.Wait()
}
addService := func(svc *k8sv1.Service) {
mockQueue.ExpectAdds(1)
serviceSource.Add(svc)
mockQueue.Wait()
}
addDeployment := func(depl *appsv1.Deployment) {
mockQueue.ExpectAdds(1)
deploymentSource.Add(depl)
mockQueue.Wait()
}
addDaemonset := func(ds *appsv1.DaemonSet) {
mockQueue.ExpectAdds(1)
daemonSetSource.Add(ds)
mockQueue.Wait()
}
addInstallStrategyConfigMap := func(c *k8sv1.ConfigMap) {
mockQueue.ExpectAdds(1)
installStrategyConfigMapSource.Add(c)
mockQueue.Wait()
}
addInstallStrategyJob := func(job *batchv1.Job) {
mockQueue.ExpectAdds(1)
installStrategyJobSource.Add(job)
mockQueue.Wait()
}
addPod := func(pod *k8sv1.Pod) {
mockQueue.ExpectAdds(1)
infrastructurePodSource.Add(pod)
mockQueue.Wait()
}
addResource := func(obj runtime.Object, version string, registry string) {
switch resource := obj.(type) {
case *k8sv1.ServiceAccount:
injectMetadata(&obj.(*k8sv1.ServiceAccount).ObjectMeta, version, registry)
addServiceAccount(resource)
case *rbacv1.ClusterRole:
injectMetadata(&obj.(*rbacv1.ClusterRole).ObjectMeta, version, registry)
addClusterRole(resource)
case *rbacv1.ClusterRoleBinding:
injectMetadata(&obj.(*rbacv1.ClusterRoleBinding).ObjectMeta, version, registry)
addClusterRoleBinding(resource)
case *rbacv1.Role:
injectMetadata(&obj.(*rbacv1.Role).ObjectMeta, version, registry)
addRole(resource)
case *rbacv1.RoleBinding:
injectMetadata(&obj.(*rbacv1.RoleBinding).ObjectMeta, version, registry)
addRoleBinding(resource)
case *extv1beta1.CustomResourceDefinition:
injectMetadata(&obj.(*extv1beta1.CustomResourceDefinition).ObjectMeta, version, registry)
addCrd(resource)
case *k8sv1.Service:
injectMetadata(&obj.(*k8sv1.Service).ObjectMeta, version, registry)
addService(resource)
case *appsv1.Deployment:
injectMetadata(&obj.(*appsv1.Deployment).ObjectMeta, version, registry)
addDeployment(resource)
case *appsv1.DaemonSet:
injectMetadata(&obj.(*appsv1.DaemonSet).ObjectMeta, version, registry)
addDaemonset(resource)
case *batchv1.Job:
injectMetadata(&obj.(*batchv1.Job).ObjectMeta, version, registry)
addInstallStrategyJob(resource)
case *k8sv1.ConfigMap:
injectMetadata(&obj.(*k8sv1.ConfigMap).ObjectMeta, version, registry)
addInstallStrategyConfigMap(resource)
case *k8sv1.Pod:
injectMetadata(&obj.(*k8sv1.Pod).ObjectMeta, version, registry)
addPod(resource)
default:
Fail("unknown resource type")
}
}
addInstallStrategy := func(imageTag string, imageRegistry string) {
// install strategy config
resource, _ := installstrategy.NewInstallStrategyConfigMap(NAMESPACE, imageTag, imageRegistry)
resource.Name = fmt.Sprintf("%s-%s", resource.Name, rand.String(10))
addResource(resource, imageTag, imageRegistry)
}
addPods := func(version string, registry string) {
pullPolicy := "IfNotPresent"
imagePullPolicy := k8sv1.PullPolicy(pullPolicy)
verbosity := "2"
// we need at least one active pod for
// virt-api
// virt-controller
// virt-handler
apiDeployment, _ := components.NewApiServerDeployment(NAMESPACE, registry, version, imagePullPolicy, verbosity)
pod := &k8sv1.Pod{
ObjectMeta: apiDeployment.Spec.Template.ObjectMeta,
Spec: apiDeployment.Spec.Template.Spec,
Status: k8sv1.PodStatus{
Phase: k8sv1.PodRunning,
ContainerStatuses: []k8sv1.ContainerStatus{
{Ready: true, Name: "somecontainer"},
},
},
}
injectMetadata(&pod.ObjectMeta, version, registry)
pod.Name = "virt-api-xxxx"
addPod(pod)
controller, _ := components.NewControllerDeployment(NAMESPACE, registry, version, imagePullPolicy, verbosity)
pod = &k8sv1.Pod{
ObjectMeta: controller.Spec.Template.ObjectMeta,
Spec: controller.Spec.Template.Spec,
Status: k8sv1.PodStatus{
Phase: k8sv1.PodRunning,
ContainerStatuses: []k8sv1.ContainerStatus{
{Ready: true, Name: "somecontainer"},
},
},
}
pod.Name = "virt-controller-xxxx"
injectMetadata(&pod.ObjectMeta, version, registry)
addPod(pod)
handler, _ := components.NewHandlerDaemonSet(NAMESPACE, registry, version, imagePullPolicy, verbosity)
pod = &k8sv1.Pod{
ObjectMeta: handler.Spec.Template.ObjectMeta,
Spec: handler.Spec.Template.Spec,
Status: k8sv1.PodStatus{
Phase: k8sv1.PodRunning,
ContainerStatuses: []k8sv1.ContainerStatus{
{Ready: true, Name: "somecontainer"},
},
},
}
injectMetadata(&pod.ObjectMeta, version, registry)
pod.Name = "virt-handler-xxxx"
addPod(pod)
}
generateRandomResources := func() int {
version := fmt.Sprintf("rand-%s", rand.String(10))
registry := fmt.Sprintf("rand-%s", rand.String(10))
all := make([]interface{}, 0)
all = append(all, &k8sv1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &extv1beta1.CustomResourceDefinition{
TypeMeta: metav1.TypeMeta{
APIVersion: "apiextensions.k8s.io/v1beta1",
Kind: "CustomResourceDefinition",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &k8sv1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
for _, obj := range all {
if resource, ok := obj.(runtime.Object); ok {
addResource(resource, version, registry)
} else {
Fail("could not cast to runtime.Object")
}
}
return len(all)
}
addAll := func(version string, registry string) {
pullPolicy := "IfNotPresent"
imagePullPolicy := k8sv1.PullPolicy(pullPolicy)
verbosity := "2"
all := make([]interface{}, 0)
// rbac
all = append(all, rbac.GetAllCluster(NAMESPACE)...)
all = append(all, rbac.GetAllApiServer(NAMESPACE)...)
all = append(all, rbac.GetAllHandler(NAMESPACE)...)
all = append(all, rbac.GetAllController(NAMESPACE)...)
// crds
all = append(all, components.NewVirtualMachineInstanceCrd())
all = append(all, components.NewPresetCrd())
all = append(all, components.NewReplicaSetCrd())
all = append(all, components.NewVirtualMachineCrd())
all = append(all, components.NewVirtualMachineInstanceMigrationCrd())
// services and deployments
all = append(all, components.NewPrometheusService(NAMESPACE))
all = append(all, components.NewApiServerService(NAMESPACE))
apiDeployment, _ := components.NewApiServerDeployment(NAMESPACE, registry, version, imagePullPolicy, verbosity)
controller, _ := components.NewControllerDeployment(NAMESPACE, registry, version, imagePullPolicy, verbosity)
handler, _ := components.NewHandlerDaemonSet(NAMESPACE, registry, version, imagePullPolicy, verbosity)
all = append(all, apiDeployment, controller, handler)
for _, obj := range all {
if resource, ok := obj.(runtime.Object); ok {
addResource(resource, version, registry)
} else {
Fail("could not cast to runtime.Object")
}
}
// update SCC
scc := getSCC()
prefix := "system:serviceaccount"
scc.Users = append(scc.Users,
fmt.Sprintf("%s:%s:%s", prefix, NAMESPACE, "kubevirt-handler"),
fmt.Sprintf("%s:%s:%s", prefix, NAMESPACE, "kubevirt-apiserver"),
fmt.Sprintf("%s:%s:%s", prefix, NAMESPACE, "kubevirt-controller"))
sccSource.Modify(&scc)
}
makeApiAndControllerReady := func() {
makeDeploymentReady := func(item interface{}) {
depl, _ := item.(*appsv1.Deployment)
deplNew := depl.DeepCopy()
var replicas int32 = 1
if depl.Spec.Replicas != nil {
replicas = *depl.Spec.Replicas
}
deplNew.Status.Replicas = replicas
deplNew.Status.ReadyReplicas = replicas
deploymentSource.Modify(deplNew)
}
for _, name := range []string{"/virt-api", "/virt-controller"} {
exists := false
var obj interface{}
// we need to wait until the deployment exists
for !exists {
obj, exists, _ = controller.stores.DeploymentCache.GetByKey(NAMESPACE + name)
if exists {
makeDeploymentReady(obj)
}
time.Sleep(time.Second)
}
}
}
makeHandlerReady := func() {
exists := false
var obj interface{}
// we need to wait until the daemonset exists
for !exists {
obj, exists, _ = controller.stores.DaemonSetCache.GetByKey(NAMESPACE + "/virt-handler")
if exists {
handler, _ := obj.(*appsv1.DaemonSet)
handlerNew := handler.DeepCopy()
handlerNew.Status.DesiredNumberScheduled = 1
handlerNew.Status.NumberReady = 1
daemonSetSource.Modify(handlerNew)
}
time.Sleep(time.Second)
}
}
deleteServiceAccount := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.ServiceAccount.GetStore().GetByKey(key); exists {
serviceAccountSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteClusterRole := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.ClusterRole.GetStore().GetByKey(key); exists {
clusterRoleSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteClusterRoleBinding := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.ClusterRoleBinding.GetStore().GetByKey(key); exists {
clusterRoleBindingSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteRole := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.Role.GetStore().GetByKey(key); exists {
roleSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteRoleBinding := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.RoleBinding.GetStore().GetByKey(key); exists {
roleBindingSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteCrd := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.Crd.GetStore().GetByKey(key); exists {
crdSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteService := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.Service.GetStore().GetByKey(key); exists {
serviceSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteDeployment := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.Deployment.GetStore().GetByKey(key); exists {
deploymentSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteDaemonset := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.DaemonSet.GetStore().GetByKey(key); exists {
daemonSetSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteInstallStrategyJob := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.InstallStrategyJob.GetStore().GetByKey(key); exists {
installStrategyJobSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteInstallStrategyConfigMap := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.InstallStrategyConfigMap.GetStore().GetByKey(key); exists {
installStrategyConfigMapSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteResource := func(resource string, key string) {
switch resource {
case "serviceaccounts":
deleteServiceAccount(key)
case "clusterroles":
deleteClusterRole(key)
case "clusterrolebindings":
deleteClusterRoleBinding(key)
case "roles":
deleteRole(key)
case "rolebindings":
deleteRoleBinding(key)
case "customresourcedefinitions":
deleteCrd(key)
case "services":
deleteService(key)
case "deployments":
deleteDeployment(key)
case "daemonsets":
deleteDaemonset(key)
case "jobs":
deleteInstallStrategyJob(key)
case "configmaps":
deleteInstallStrategyConfigMap(key)
default:
Fail("unknown resource type")
}
}
genericUpdateFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
update, ok := action.(testing.UpdateAction)
Expect(ok).To(BeTrue())
totalUpdates++
return true, update.GetObject(), nil
}
genericPatchFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
_, ok := action.(testing.PatchAction)
Expect(ok).To(BeTrue())
totalPatches++
return true, nil, nil
}
genericCreateFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
create, ok := action.(testing.CreateAction)
Expect(ok).To(BeTrue())
totalAdds++
addResource(create.GetObject(), "", "")
return true, create.GetObject(), nil
}
genericDeleteFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
delete, ok := action.(testing.DeleteAction)
Expect(ok).To(BeTrue())
totalDeletions++
var key string
if len(delete.GetNamespace()) > 0 {
key = delete.GetNamespace() + "/"
}
key += delete.GetName()
if deleteFromCache {
deleteResource(delete.GetResource().Resource, key)
}
return true, nil, nil
}
expectUsers := func(userBytes []byte, count int) {
type _users struct {
Users []string `json:"users"`
}
users := &_users{}
json.Unmarshal(userBytes, users)
ExpectWithOffset(2, len(users.Users)).To(Equal(count))
}
shouldExpectInstallStrategyDeletion := func() {
kubeClient.Fake.PrependReactor("delete", "configmaps", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
delete, ok := action.(testing.DeleteAction)
Expect(ok).To(BeTrue())
var key string
if len(delete.GetNamespace()) > 0 {
key = delete.GetNamespace() + "/"
}
key += delete.GetName()
deleteResource(delete.GetResource().Resource, key)
return true, nil, nil
})
}
shouldExpectDeletions := func() {
kubeClient.Fake.PrependReactor("delete", "serviceaccounts", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "clusterroles", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "clusterrolebindings", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "roles", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "rolebindings", genericDeleteFunc)
secClient.Fake.PrependReactor("patch", "securitycontextconstraints", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
patch, _ := action.(testing.PatchAction)
expectUsers(patch.GetPatch(), 1)
return true, nil, nil
})
extClient.Fake.PrependReactor("delete", "customresourcedefinitions", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "services", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "deployments", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "daemonsets", genericDeleteFunc)
}
shouldExpectJobDeletion := func() {
kubeClient.Fake.PrependReactor("delete", "jobs", genericDeleteFunc)
}
shouldExpectJobCreation := func() {
kubeClient.Fake.PrependReactor("create", "jobs", genericCreateFunc)
}
shouldExpectPatchesAndUpdates := func() {
extClient.Fake.PrependReactor("patch", "customresourcedefinitions", genericPatchFunc)
kubeClient.Fake.PrependReactor("patch", "serviceaccounts", genericPatchFunc)
kubeClient.Fake.PrependReactor("update", "clusterroles", genericUpdateFunc)
kubeClient.Fake.PrependReactor("update", "clusterrolebindings", genericUpdateFunc)
kubeClient.Fake.PrependReactor("update", "roles", genericUpdateFunc)
kubeClient.Fake.PrependReactor("update", "rolebindings", genericUpdateFunc)
kubeClient.Fake.PrependReactor("patch", "services", genericPatchFunc)
kubeClient.Fake.PrependReactor("patch", "daemonsets", genericPatchFunc)
kubeClient.Fake.PrependReactor("patch", "deployments", genericPatchFunc)
}
shouldExpectCreations := func() {
kubeClient.Fake.PrependReactor("create", "serviceaccounts", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "clusterroles", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "clusterrolebindings", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "roles", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "rolebindings", genericCreateFunc)
secClient.Fake.PrependReactor("patch", "securitycontextconstraints", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
patch, _ := action.(testing.PatchAction)
expectUsers(patch.GetPatch(), 4)
return true, nil, nil
})
extClient.Fake.PrependReactor("create", "customresourcedefinitions", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "services", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "deployments", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "daemonsets", genericCreateFunc)
}
shouldExpectKubeVirtUpdate := func(times int) {
update := kvInterface.EXPECT().Update(gomock.Any())
update.Do(func(kv *v1.KubeVirt) {
kvInformer.GetStore().Update(kv)
update.Return(kv, nil)
}).Times(times)
}
shouldExpectKubeVirtUpdateVersion := func(times int, imageTag string) {
update := kvInterface.EXPECT().Update(gomock.Any())
update.Do(func(kv *v1.KubeVirt) {
Expect(kv.Status.TargetKubeVirtVersion).To(Equal(imageTag))
Expect(kv.Status.ObservedKubeVirtVersion).To(Equal(imageTag))
kvInformer.GetStore().Update(kv)
update.Return(kv, nil)
}).Times(times)
}
shouldExpectKubeVirtUpdateFailureCondition := func(reason string) {
update := kvInterface.EXPECT().Update(gomock.Any())
update.Do(func(kv *v1.KubeVirt) {
Expect(len(kv.Status.Conditions)).To(Equal(1))
Expect(kv.Status.Conditions[0].Reason).To(Equal(reason))
kvInformer.GetStore().Update(kv)
update.Return(kv, nil)
}).Times(1)
}
getLatestKubeVirt := func(kv *v1.KubeVirt) *v1.KubeVirt {
if obj, exists, _ := kvInformer.GetStore().GetByKey(kv.GetNamespace() + "/" + kv.GetName()); exists {
if kvLatest, ok := obj.(*v1.KubeVirt); ok {
return kvLatest
}
}
return nil
}
Context("On valid KubeVirt object", func() {
It("should delete install strategy configmap once kubevirt install is deleted", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeleted,
},
}
kv.DeletionTimestamp = now()
shouldExpectInstallStrategyDeletion()
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
controller.Execute()
}, 15)
It("should observe custom image tag in status during deploy", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: "custom.tag",
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionReady,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: version.Get().String(),
},
}
// create all resources which should already exist
addKubeVirt(kv)
addAll("custom.tag", defaultRegistry)
// install strategy config
addInstallStrategy("custom.tag", defaultRegistry)
addPods("custom.tag", defaultRegistry)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectKubeVirtUpdateVersion(1, "custom.tag")
controller.Execute()
}, 15)
It("should do nothing if KubeVirt object is deployed", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionReady,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: version.Get().String(),
TargetKubeVirtVersion: defaultImageTag,
TargetKubeVirtRegistry: defaultRegistry,
ObservedKubeVirtVersion: defaultImageTag,
ObservedKubeVirtRegistry: defaultRegistry,
},
}
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
addAll(defaultImageTag, defaultRegistry)
addPods(defaultImageTag, defaultRegistry)
makeApiAndControllerReady()
makeHandlerReady()
controller.Execute()
}, 15)
It("should delete operator managed resources not in the deployed installstrategy", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
},
OperatorVersion: version.Get().String(),
TargetKubeVirtVersion: defaultImageTag,
TargetKubeVirtRegistry: defaultRegistry,
ObservedKubeVirtVersion: defaultImageTag,
ObservedKubeVirtRegistry: defaultRegistry,
},
}
deleteFromCache = false
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
addAll(defaultImageTag, defaultRegistry)
numResources := generateRandomResources()
addPods(defaultImageTag, defaultRegistry)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectDeletions()
controller.Execute()
Expect(totalDeletions).To(Equal(numResources))
}, 15)
It("should fail if KubeVirt object already exists", func(done Done) {
defer close(done)
kv1 := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install-1",
Namespace: NAMESPACE,
UID: "11111111111",
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionReady,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: "v0.0.0-master+$Format:%h$",
},
}
kv2 := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install-2",
Namespace: NAMESPACE,
UID: "123123123",
},
Status: v1.KubeVirtStatus{},
}
addKubeVirt(kv1)
addKubeVirt(kv2)
shouldExpectKubeVirtUpdateFailureCondition(ConditionReasonDeploymentFailedExisting)
controller.execute(fmt.Sprintf("%s/%s", kv2.Namespace, kv2.Name))
}, 15)
It("should generate install strategy creation job for update version", func(done Done) {
defer close(done)
updatedVersion := "1.1.1"
updatedRegistry := "otherregistry"
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedVersion,
ImageRegistry: updatedRegistry,
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionReady,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: version.Get().String(),
TargetKubeVirtVersion: defaultImageTag,
TargetKubeVirtRegistry: defaultRegistry,
ObservedKubeVirtVersion: defaultImageTag,
ObservedKubeVirtRegistry: defaultRegistry,
},
}
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
shouldExpectKubeVirtUpdate(1)
shouldExpectJobCreation()
controller.Execute()
}, 15)
It("should generate install strategy creation job if no install strategy exists", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{},
}
// create all resources which should already exist
addKubeVirt(kv)
shouldExpectKubeVirtUpdate(1)
shouldExpectJobCreation()
controller.Execute()
}, 15)
It("should delete install strategy creation job if job has failed", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{},
}
job := controller.generateInstallStrategyJob(kv)
// will only create a new job after 10 seconds has passed.
// this is just a simple mechanism to prevent spin loops
// in the event that jobs are fast failing for some unknown reason.
completionTime := time.Now().Add(time.Duration(-10) * time.Second)
job.Status.CompletionTime = &metav1.Time{Time: completionTime}
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategyJob(job)
shouldExpectJobDeletion()
shouldExpectKubeVirtUpdate(1)
controller.Execute()
}, 15)
It("should not delete completed install strategy creation job if job has failed less that 10 seconds ago", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{},
}
job := controller.generateInstallStrategyJob(kv)
job.Status.CompletionTime = now()
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategyJob(job)
shouldExpectKubeVirtUpdate(1)
controller.Execute()
}, 15)
It("should add resources on create", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
},
}
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
job := controller.generateInstallStrategyJob(kv)
job.Status.CompletionTime = now()
addInstallStrategyJob(job)
// ensure completed jobs are garbage collected once install strategy
// is loaded
deleteFromCache = false
shouldExpectJobDeletion()
shouldExpectKubeVirtUpdate(1)
shouldExpectCreations()
controller.Execute()
kv = getLatestKubeVirt(kv)
Expect(kv.Status.Phase).To(Equal(v1.KubeVirtPhaseDeploying))
Expect(len(kv.Status.Conditions)).To(Equal(0))
// -2 because waiting on controller and virt-handler daemonset until API server deploys successfully
Expect(totalAdds).To(Equal(resourceCount - 2))
Expect(len(controller.stores.ServiceAccountCache.List())).To(Equal(3))
Expect(len(controller.stores.ClusterRoleCache.List())).To(Equal(7))
Expect(len(controller.stores.ClusterRoleBindingCache.List())).To(Equal(5))
Expect(len(controller.stores.RoleCache.List())).To(Equal(2))
Expect(len(controller.stores.RoleBindingCache.List())).To(Equal(2))
Expect(len(controller.stores.CrdCache.List())).To(Equal(5))
Expect(len(controller.stores.ServiceCache.List())).To(Equal(2))
Expect(len(controller.stores.DeploymentCache.List())).To(Equal(1))
Expect(len(controller.stores.DaemonSetCache.List())).To(Equal(0))
}, 15)
It("should pause rollback until api server is rolled over.", func(done Done) {
defer close(done)
rollbackVersion := "9.9.7"
rollbackRegistry := "otherregistry"
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: rollbackVersion,
ImageRegistry: rollbackRegistry,
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionReady,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: version.Get().String(),
TargetKubeVirtVersion: defaultImageTag,
TargetKubeVirtRegistry: defaultRegistry,
ObservedKubeVirtVersion: defaultImageTag,
ObservedKubeVirtRegistry: defaultRegistry,
},
}
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
addInstallStrategy(rollbackVersion, rollbackRegistry)
addAll(defaultImageTag, defaultRegistry)
addPods(defaultImageTag, defaultRegistry)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdate(1)
controller.Execute()
// on rollback or create, api server must be online first before controllers and daemonset.
// On rollback this prevents someone from posting invalid specs to
// the cluster from newer versions when an older version is being deployed.
// On create this prevents invalid specs from entering the cluster
// while controllers are available to process them.
Expect(totalPatches).To(Equal(patchCount - 2))
Expect(totalUpdates).To(Equal(updateCount))
}, 15)
It("should pause update until daemonsets and controllers are rolled over.", func(done Done) {
defer close(done)
updatedVersion := "9.9.10"
updatedRegistry := "otherregistry"
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedVersion,
ImageRegistry: updatedRegistry,
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionReady,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: version.Get().String(),
TargetKubeVirtVersion: defaultImageTag,
TargetKubeVirtRegistry: defaultRegistry,
ObservedKubeVirtVersion: defaultImageTag,
ObservedKubeVirtRegistry: defaultRegistry,
},
}
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
addInstallStrategy(updatedVersion, updatedRegistry)
addAll(defaultImageTag, defaultRegistry)
addPods(defaultImageTag, defaultRegistry)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdate(1)
controller.Execute()
// on update, apiserver won't get patched until daemonset and controller pods are online.
// this prevents the new API from coming online until the controllers can manage it.
Expect(totalPatches).To(Equal(patchCount - 1))
Expect(totalUpdates).To(Equal(updateCount))
}, 15)
It("should update resources when changing KubeVirt version.", func(done Done) {
defer close(done)
updatedVersion := "1.1.1"
updatedRegistry := "otherregistry"
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedVersion,
ImageRegistry: updatedRegistry,
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionReady,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: version.Get().String(),
TargetKubeVirtVersion: defaultImageTag,
TargetKubeVirtRegistry: defaultRegistry,
ObservedKubeVirtVersion: defaultImageTag,
ObservedKubeVirtRegistry: defaultRegistry,
},
}
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
addInstallStrategy(updatedVersion, updatedRegistry)
addAll(defaultImageTag, defaultRegistry)
addPods(defaultImageTag, defaultRegistry)
// pods for the new version are added so this test won't
// wait for daemonsets to rollover before updating/patching
// all resources.
addPods(updatedVersion, updatedRegistry)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdate(1)
controller.Execute()
Expect(totalPatches).To(Equal(patchCount))
Expect(totalUpdates).To(Equal(updateCount))
// ensure every resource is either patched or updated
Expect(totalUpdates + totalPatches).To(Equal(resourceCount))
}, 15)
It("should remove resources on deletion", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
},
}
kv.DeletionTimestamp = now()
addKubeVirt(kv)
// create all resources which should be deleted
addInstallStrategy(defaultImageTag, defaultRegistry)
addAll(defaultImageTag, defaultRegistry)
shouldExpectKubeVirtUpdate(1)
shouldExpectDeletions()
shouldExpectInstallStrategyDeletion()
controller.Execute()
// Note: in real life during the first execution loop very probably only CRDs are deleted,
// because that takes some time (see the check that the crd store is empty before going on with deletions)
// But in this test the deletion succeeds immediately, so everything is deleted on first try
Expect(totalDeletions).To(Equal(resourceCount))
kv = getLatestKubeVirt(kv)
Expect(kv.Status.Phase).To(Equal(v1.KubeVirtPhaseDeleted))
Expect(len(kv.Status.Conditions)).To(Equal(0))
}, 15)
})
Context("On install strategy dump", func() {
It("should generate latest install strategy and post as config map", func(done Done) {
defer close(done)
kubeClient.Fake.PrependReactor("create", "configmaps", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
create, ok := action.(testing.CreateAction)
Expect(ok).To(BeTrue())
configMap := create.GetObject().(*k8sv1.ConfigMap)
Expect(configMap.GenerateName).To(Equal("kubevirt-install-strategy-"))
version, ok := configMap.ObjectMeta.Annotations[v1.InstallStrategyVersionAnnotation]
Expect(ok).To(BeTrue())
Expect(version).To(Equal(defaultImageTag))
registry, ok := configMap.ObjectMeta.Annotations[v1.InstallStrategyRegistryAnnotation]
Expect(registry).To(Equal(defaultRegistry))
Expect(ok).To(BeTrue())
_, ok = configMap.Data["manifests"]
Expect(ok).To(BeTrue())
return true, create.GetObject(), nil
})
// This generates and posts the install strategy config map
installstrategy.DumpInstallStrategyToConfigMap(virtClient)
}, 15)
})
})
func now() *metav1.Time {
now := metav1.Now()
return &now
}
unit test for syncing operator and operand versions
Signed-off-by: David Vossel <3a865980c5ac97d5aadbbcbf1cbfa33e47e26202@gmail.com>
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2017 Red Hat, Inc.
*
*/
package virt_operator
import (
"encoding/json"
"fmt"
"os"
"time"
"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
secv1 "github.com/openshift/api/security/v1"
secv1fake "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
k8sv1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
extv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
extclientfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
framework "k8s.io/client-go/tools/cache/testing"
"k8s.io/client-go/tools/record"
v1 "kubevirt.io/kubevirt/pkg/api/v1"
"kubevirt.io/kubevirt/pkg/kubecli"
"kubevirt.io/kubevirt/pkg/log"
"kubevirt.io/kubevirt/pkg/testutils"
"kubevirt.io/kubevirt/pkg/version"
"kubevirt.io/kubevirt/pkg/virt-operator/creation/components"
"kubevirt.io/kubevirt/pkg/virt-operator/creation/rbac"
installstrategy "kubevirt.io/kubevirt/pkg/virt-operator/install-strategy"
"kubevirt.io/kubevirt/pkg/virt-operator/util"
)
var _ = Describe("KubeVirt Operator", func() {
log.Log.SetIOWriter(GinkgoWriter)
var ctrl *gomock.Controller
var kvInterface *kubecli.MockKubeVirtInterface
var kvSource *framework.FakeControllerSource
var kvInformer cache.SharedIndexInformer
var serviceAccountSource *framework.FakeControllerSource
var clusterRoleSource *framework.FakeControllerSource
var clusterRoleBindingSource *framework.FakeControllerSource
var roleSource *framework.FakeControllerSource
var roleBindingSource *framework.FakeControllerSource
var crdSource *framework.FakeControllerSource
var serviceSource *framework.FakeControllerSource
var deploymentSource *framework.FakeControllerSource
var daemonSetSource *framework.FakeControllerSource
var sccSource *framework.FakeControllerSource
var installStrategyConfigMapSource *framework.FakeControllerSource
var installStrategyJobSource *framework.FakeControllerSource
var infrastructurePodSource *framework.FakeControllerSource
var stop chan struct{}
var controller *KubeVirtController
var recorder *record.FakeRecorder
var mockQueue *testutils.MockWorkQueue
var virtClient *kubecli.MockKubevirtClient
var kubeClient *fake.Clientset
var secClient *secv1fake.FakeSecurityV1
var extClient *extclientfake.Clientset
var informers util.Informers
var stores util.Stores
defaultImageTag := "v9.9.9"
defaultRegistry := "someregistry"
var totalAdds int
var totalUpdates int
var totalPatches int
var totalDeletions int
NAMESPACE := "kubevirt-test"
resourceCount := 29
patchCount := 13
updateCount := 16
deleteFromCache := true
syncCaches := func(stop chan struct{}) {
go kvInformer.Run(stop)
go informers.ServiceAccount.Run(stop)
go informers.ClusterRole.Run(stop)
go informers.ClusterRoleBinding.Run(stop)
go informers.Role.Run(stop)
go informers.RoleBinding.Run(stop)
go informers.Crd.Run(stop)
go informers.Service.Run(stop)
go informers.Deployment.Run(stop)
go informers.DaemonSet.Run(stop)
go informers.SCC.Run(stop)
go informers.InstallStrategyJob.Run(stop)
go informers.InstallStrategyConfigMap.Run(stop)
go informers.InfrastructurePod.Run(stop)
Expect(cache.WaitForCacheSync(stop, kvInformer.HasSynced)).To(BeTrue())
cache.WaitForCacheSync(stop, informers.ServiceAccount.HasSynced)
cache.WaitForCacheSync(stop, informers.ClusterRole.HasSynced)
cache.WaitForCacheSync(stop, informers.ClusterRoleBinding.HasSynced)
cache.WaitForCacheSync(stop, informers.Role.HasSynced)
cache.WaitForCacheSync(stop, informers.RoleBinding.HasSynced)
cache.WaitForCacheSync(stop, informers.Crd.HasSynced)
cache.WaitForCacheSync(stop, informers.Service.HasSynced)
cache.WaitForCacheSync(stop, informers.Deployment.HasSynced)
cache.WaitForCacheSync(stop, informers.DaemonSet.HasSynced)
cache.WaitForCacheSync(stop, informers.SCC.HasSynced)
cache.WaitForCacheSync(stop, informers.InstallStrategyJob.HasSynced)
cache.WaitForCacheSync(stop, informers.InstallStrategyConfigMap.HasSynced)
cache.WaitForCacheSync(stop, informers.InfrastructurePod.HasSynced)
}
getSCC := func() secv1.SecurityContextConstraints {
return secv1.SecurityContextConstraints{
ObjectMeta: metav1.ObjectMeta{
Name: "privileged",
},
Users: []string{
"someUser",
},
}
}
BeforeEach(func() {
os.Setenv(util.OperatorImageEnvName, fmt.Sprintf("%s/virt-operator:%s", defaultRegistry, defaultImageTag))
totalAdds = 0
totalUpdates = 0
totalPatches = 0
totalDeletions = 0
deleteFromCache = true
stop = make(chan struct{})
ctrl = gomock.NewController(GinkgoT())
virtClient = kubecli.NewMockKubevirtClient(ctrl)
kvInterface = kubecli.NewMockKubeVirtInterface(ctrl)
kvInformer, kvSource = testutils.NewFakeInformerFor(&v1.KubeVirt{})
recorder = record.NewFakeRecorder(100)
informers.ServiceAccount, serviceAccountSource = testutils.NewFakeInformerFor(&k8sv1.ServiceAccount{})
stores.ServiceAccountCache = informers.ServiceAccount.GetStore()
informers.ClusterRole, clusterRoleSource = testutils.NewFakeInformerFor(&rbacv1.ClusterRole{})
stores.ClusterRoleCache = informers.ClusterRole.GetStore()
informers.ClusterRoleBinding, clusterRoleBindingSource = testutils.NewFakeInformerFor(&rbacv1.ClusterRoleBinding{})
stores.ClusterRoleBindingCache = informers.ClusterRoleBinding.GetStore()
informers.Role, roleSource = testutils.NewFakeInformerFor(&rbacv1.Role{})
stores.RoleCache = informers.Role.GetStore()
informers.RoleBinding, roleBindingSource = testutils.NewFakeInformerFor(&rbacv1.RoleBinding{})
stores.RoleBindingCache = informers.RoleBinding.GetStore()
informers.Crd, crdSource = testutils.NewFakeInformerFor(&extv1beta1.CustomResourceDefinition{})
stores.CrdCache = informers.Crd.GetStore()
informers.Service, serviceSource = testutils.NewFakeInformerFor(&k8sv1.Service{})
stores.ServiceCache = informers.Service.GetStore()
informers.Deployment, deploymentSource = testutils.NewFakeInformerFor(&appsv1.Deployment{})
stores.DeploymentCache = informers.Deployment.GetStore()
informers.DaemonSet, daemonSetSource = testutils.NewFakeInformerFor(&appsv1.DaemonSet{})
stores.DaemonSetCache = informers.DaemonSet.GetStore()
informers.SCC, sccSource = testutils.NewFakeInformerFor(&secv1.SecurityContextConstraints{})
stores.SCCCache = informers.SCC.GetStore()
informers.InstallStrategyConfigMap, installStrategyConfigMapSource = testutils.NewFakeInformerFor(&k8sv1.ConfigMap{})
stores.InstallStrategyConfigMapCache = informers.InstallStrategyConfigMap.GetStore()
informers.InstallStrategyJob, installStrategyJobSource = testutils.NewFakeInformerFor(&batchv1.Job{})
stores.InstallStrategyJobCache = informers.InstallStrategyJob.GetStore()
informers.InfrastructurePod, infrastructurePodSource = testutils.NewFakeInformerFor(&k8sv1.Pod{})
stores.InfrastructurePodCache = informers.InfrastructurePod.GetStore()
controller = NewKubeVirtController(virtClient, kvInformer, recorder, stores, informers)
// Wrap our workqueue to have a way to detect when we are done processing updates
mockQueue = testutils.NewMockWorkQueue(controller.queue)
controller.queue = mockQueue
// Set up mock client
virtClient.EXPECT().KubeVirt(NAMESPACE).Return(kvInterface).AnyTimes()
kubeClient = fake.NewSimpleClientset()
secClient = &secv1fake.FakeSecurityV1{
Fake: &fake.NewSimpleClientset().Fake,
}
extClient = extclientfake.NewSimpleClientset()
virtClient.EXPECT().CoreV1().Return(kubeClient.CoreV1()).AnyTimes()
virtClient.EXPECT().BatchV1().Return(kubeClient.BatchV1()).AnyTimes()
virtClient.EXPECT().RbacV1().Return(kubeClient.RbacV1()).AnyTimes()
virtClient.EXPECT().AppsV1().Return(kubeClient.AppsV1()).AnyTimes()
virtClient.EXPECT().SecClient().Return(secClient).AnyTimes()
virtClient.EXPECT().ExtensionsClient().Return(extClient).AnyTimes()
// Make sure that all unexpected calls to kubeClient will fail
kubeClient.Fake.PrependReactor("*", "*", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
Expect(action).To(BeNil())
return true, nil, nil
})
secClient.Fake.PrependReactor("*", "*", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
Expect(action).To(BeNil())
return true, nil, nil
})
extClient.Fake.PrependReactor("*", "*", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
Expect(action).To(BeNil())
return true, nil, nil
})
syncCaches(stop)
// add the privileged SCC without KubeVirt accounts
scc := getSCC()
sccSource.Add(&scc)
})
AfterEach(func() {
close(stop)
// Ensure that we add checks for expected events to every test
Expect(recorder.Events).To(BeEmpty())
ctrl.Finish()
})
injectMetadata := func(objectMeta *metav1.ObjectMeta, version string, registry string) {
if version == "" && registry == "" {
return
}
if objectMeta.Labels == nil {
objectMeta.Labels = make(map[string]string)
}
objectMeta.Labels[v1.ManagedByLabel] = v1.ManagedByLabelOperatorValue
if objectMeta.Annotations == nil {
objectMeta.Annotations = make(map[string]string)
}
objectMeta.Annotations[v1.InstallStrategyVersionAnnotation] = version
objectMeta.Annotations[v1.InstallStrategyRegistryAnnotation] = registry
}
addKubeVirt := func(kv *v1.KubeVirt) {
mockQueue.ExpectAdds(1)
kvSource.Add(kv)
mockQueue.Wait()
}
addServiceAccount := func(sa *k8sv1.ServiceAccount) {
mockQueue.ExpectAdds(1)
serviceAccountSource.Add(sa)
mockQueue.Wait()
}
addClusterRole := func(cr *rbacv1.ClusterRole) {
mockQueue.ExpectAdds(1)
clusterRoleSource.Add(cr)
mockQueue.Wait()
}
addClusterRoleBinding := func(crb *rbacv1.ClusterRoleBinding) {
mockQueue.ExpectAdds(1)
clusterRoleBindingSource.Add(crb)
mockQueue.Wait()
}
addRole := func(role *rbacv1.Role) {
mockQueue.ExpectAdds(1)
roleSource.Add(role)
mockQueue.Wait()
}
addRoleBinding := func(rb *rbacv1.RoleBinding) {
mockQueue.ExpectAdds(1)
roleBindingSource.Add(rb)
mockQueue.Wait()
}
addCrd := func(crd *extv1beta1.CustomResourceDefinition) {
mockQueue.ExpectAdds(1)
crdSource.Add(crd)
mockQueue.Wait()
}
addService := func(svc *k8sv1.Service) {
mockQueue.ExpectAdds(1)
serviceSource.Add(svc)
mockQueue.Wait()
}
addDeployment := func(depl *appsv1.Deployment) {
mockQueue.ExpectAdds(1)
deploymentSource.Add(depl)
mockQueue.Wait()
}
addDaemonset := func(ds *appsv1.DaemonSet) {
mockQueue.ExpectAdds(1)
daemonSetSource.Add(ds)
mockQueue.Wait()
}
addInstallStrategyConfigMap := func(c *k8sv1.ConfigMap) {
mockQueue.ExpectAdds(1)
installStrategyConfigMapSource.Add(c)
mockQueue.Wait()
}
addInstallStrategyJob := func(job *batchv1.Job) {
mockQueue.ExpectAdds(1)
installStrategyJobSource.Add(job)
mockQueue.Wait()
}
addPod := func(pod *k8sv1.Pod) {
mockQueue.ExpectAdds(1)
infrastructurePodSource.Add(pod)
mockQueue.Wait()
}
addResource := func(obj runtime.Object, version string, registry string) {
switch resource := obj.(type) {
case *k8sv1.ServiceAccount:
injectMetadata(&obj.(*k8sv1.ServiceAccount).ObjectMeta, version, registry)
addServiceAccount(resource)
case *rbacv1.ClusterRole:
injectMetadata(&obj.(*rbacv1.ClusterRole).ObjectMeta, version, registry)
addClusterRole(resource)
case *rbacv1.ClusterRoleBinding:
injectMetadata(&obj.(*rbacv1.ClusterRoleBinding).ObjectMeta, version, registry)
addClusterRoleBinding(resource)
case *rbacv1.Role:
injectMetadata(&obj.(*rbacv1.Role).ObjectMeta, version, registry)
addRole(resource)
case *rbacv1.RoleBinding:
injectMetadata(&obj.(*rbacv1.RoleBinding).ObjectMeta, version, registry)
addRoleBinding(resource)
case *extv1beta1.CustomResourceDefinition:
injectMetadata(&obj.(*extv1beta1.CustomResourceDefinition).ObjectMeta, version, registry)
addCrd(resource)
case *k8sv1.Service:
injectMetadata(&obj.(*k8sv1.Service).ObjectMeta, version, registry)
addService(resource)
case *appsv1.Deployment:
injectMetadata(&obj.(*appsv1.Deployment).ObjectMeta, version, registry)
addDeployment(resource)
case *appsv1.DaemonSet:
injectMetadata(&obj.(*appsv1.DaemonSet).ObjectMeta, version, registry)
addDaemonset(resource)
case *batchv1.Job:
injectMetadata(&obj.(*batchv1.Job).ObjectMeta, version, registry)
addInstallStrategyJob(resource)
case *k8sv1.ConfigMap:
injectMetadata(&obj.(*k8sv1.ConfigMap).ObjectMeta, version, registry)
addInstallStrategyConfigMap(resource)
case *k8sv1.Pod:
injectMetadata(&obj.(*k8sv1.Pod).ObjectMeta, version, registry)
addPod(resource)
default:
Fail("unknown resource type")
}
}
addInstallStrategy := func(imageTag string, imageRegistry string) {
// install strategy config
resource, _ := installstrategy.NewInstallStrategyConfigMap(NAMESPACE, imageTag, imageRegistry)
resource.Name = fmt.Sprintf("%s-%s", resource.Name, rand.String(10))
addResource(resource, imageTag, imageRegistry)
}
addPods := func(version string, registry string) {
pullPolicy := "IfNotPresent"
imagePullPolicy := k8sv1.PullPolicy(pullPolicy)
verbosity := "2"
// we need at least one active pod for
// virt-api
// virt-controller
// virt-handler
apiDeployment, _ := components.NewApiServerDeployment(NAMESPACE, registry, version, imagePullPolicy, verbosity)
pod := &k8sv1.Pod{
ObjectMeta: apiDeployment.Spec.Template.ObjectMeta,
Spec: apiDeployment.Spec.Template.Spec,
Status: k8sv1.PodStatus{
Phase: k8sv1.PodRunning,
ContainerStatuses: []k8sv1.ContainerStatus{
{Ready: true, Name: "somecontainer"},
},
},
}
injectMetadata(&pod.ObjectMeta, version, registry)
pod.Name = "virt-api-xxxx"
addPod(pod)
controller, _ := components.NewControllerDeployment(NAMESPACE, registry, version, imagePullPolicy, verbosity)
pod = &k8sv1.Pod{
ObjectMeta: controller.Spec.Template.ObjectMeta,
Spec: controller.Spec.Template.Spec,
Status: k8sv1.PodStatus{
Phase: k8sv1.PodRunning,
ContainerStatuses: []k8sv1.ContainerStatus{
{Ready: true, Name: "somecontainer"},
},
},
}
pod.Name = "virt-controller-xxxx"
injectMetadata(&pod.ObjectMeta, version, registry)
addPod(pod)
handler, _ := components.NewHandlerDaemonSet(NAMESPACE, registry, version, imagePullPolicy, verbosity)
pod = &k8sv1.Pod{
ObjectMeta: handler.Spec.Template.ObjectMeta,
Spec: handler.Spec.Template.Spec,
Status: k8sv1.PodStatus{
Phase: k8sv1.PodRunning,
ContainerStatuses: []k8sv1.ContainerStatus{
{Ready: true, Name: "somecontainer"},
},
},
}
injectMetadata(&pod.ObjectMeta, version, registry)
pod.Name = "virt-handler-xxxx"
addPod(pod)
}
generateRandomResources := func() int {
version := fmt.Sprintf("rand-%s", rand.String(10))
registry := fmt.Sprintf("rand-%s", rand.String(10))
all := make([]interface{}, 0)
all = append(all, &k8sv1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &extv1beta1.CustomResourceDefinition{
TypeMeta: metav1.TypeMeta{
APIVersion: "apiextensions.k8s.io/v1beta1",
Kind: "CustomResourceDefinition",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &k8sv1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
all = append(all, &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("rand-%s", rand.String(10)),
},
})
for _, obj := range all {
if resource, ok := obj.(runtime.Object); ok {
addResource(resource, version, registry)
} else {
Fail("could not cast to runtime.Object")
}
}
return len(all)
}
addAll := func(version string, registry string) {
pullPolicy := "IfNotPresent"
imagePullPolicy := k8sv1.PullPolicy(pullPolicy)
verbosity := "2"
all := make([]interface{}, 0)
// rbac
all = append(all, rbac.GetAllCluster(NAMESPACE)...)
all = append(all, rbac.GetAllApiServer(NAMESPACE)...)
all = append(all, rbac.GetAllHandler(NAMESPACE)...)
all = append(all, rbac.GetAllController(NAMESPACE)...)
// crds
all = append(all, components.NewVirtualMachineInstanceCrd())
all = append(all, components.NewPresetCrd())
all = append(all, components.NewReplicaSetCrd())
all = append(all, components.NewVirtualMachineCrd())
all = append(all, components.NewVirtualMachineInstanceMigrationCrd())
// services and deployments
all = append(all, components.NewPrometheusService(NAMESPACE))
all = append(all, components.NewApiServerService(NAMESPACE))
apiDeployment, _ := components.NewApiServerDeployment(NAMESPACE, registry, version, imagePullPolicy, verbosity)
controller, _ := components.NewControllerDeployment(NAMESPACE, registry, version, imagePullPolicy, verbosity)
handler, _ := components.NewHandlerDaemonSet(NAMESPACE, registry, version, imagePullPolicy, verbosity)
all = append(all, apiDeployment, controller, handler)
for _, obj := range all {
if resource, ok := obj.(runtime.Object); ok {
addResource(resource, version, registry)
} else {
Fail("could not cast to runtime.Object")
}
}
// update SCC
scc := getSCC()
prefix := "system:serviceaccount"
scc.Users = append(scc.Users,
fmt.Sprintf("%s:%s:%s", prefix, NAMESPACE, "kubevirt-handler"),
fmt.Sprintf("%s:%s:%s", prefix, NAMESPACE, "kubevirt-apiserver"),
fmt.Sprintf("%s:%s:%s", prefix, NAMESPACE, "kubevirt-controller"))
sccSource.Modify(&scc)
}
makeApiAndControllerReady := func() {
makeDeploymentReady := func(item interface{}) {
depl, _ := item.(*appsv1.Deployment)
deplNew := depl.DeepCopy()
var replicas int32 = 1
if depl.Spec.Replicas != nil {
replicas = *depl.Spec.Replicas
}
deplNew.Status.Replicas = replicas
deplNew.Status.ReadyReplicas = replicas
deploymentSource.Modify(deplNew)
}
for _, name := range []string{"/virt-api", "/virt-controller"} {
exists := false
var obj interface{}
// we need to wait until the deployment exists
for !exists {
obj, exists, _ = controller.stores.DeploymentCache.GetByKey(NAMESPACE + name)
if exists {
makeDeploymentReady(obj)
}
time.Sleep(time.Second)
}
}
}
makeHandlerReady := func() {
exists := false
var obj interface{}
// we need to wait until the daemonset exists
for !exists {
obj, exists, _ = controller.stores.DaemonSetCache.GetByKey(NAMESPACE + "/virt-handler")
if exists {
handler, _ := obj.(*appsv1.DaemonSet)
handlerNew := handler.DeepCopy()
handlerNew.Status.DesiredNumberScheduled = 1
handlerNew.Status.NumberReady = 1
daemonSetSource.Modify(handlerNew)
}
time.Sleep(time.Second)
}
}
deleteServiceAccount := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.ServiceAccount.GetStore().GetByKey(key); exists {
serviceAccountSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteClusterRole := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.ClusterRole.GetStore().GetByKey(key); exists {
clusterRoleSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteClusterRoleBinding := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.ClusterRoleBinding.GetStore().GetByKey(key); exists {
clusterRoleBindingSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteRole := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.Role.GetStore().GetByKey(key); exists {
roleSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteRoleBinding := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.RoleBinding.GetStore().GetByKey(key); exists {
roleBindingSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteCrd := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.Crd.GetStore().GetByKey(key); exists {
crdSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteService := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.Service.GetStore().GetByKey(key); exists {
serviceSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteDeployment := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.Deployment.GetStore().GetByKey(key); exists {
deploymentSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteDaemonset := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.DaemonSet.GetStore().GetByKey(key); exists {
daemonSetSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteInstallStrategyJob := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.InstallStrategyJob.GetStore().GetByKey(key); exists {
installStrategyJobSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteInstallStrategyConfigMap := func(key string) {
mockQueue.ExpectAdds(1)
if obj, exists, _ := informers.InstallStrategyConfigMap.GetStore().GetByKey(key); exists {
installStrategyConfigMapSource.Delete(obj.(runtime.Object))
}
mockQueue.Wait()
}
deleteResource := func(resource string, key string) {
switch resource {
case "serviceaccounts":
deleteServiceAccount(key)
case "clusterroles":
deleteClusterRole(key)
case "clusterrolebindings":
deleteClusterRoleBinding(key)
case "roles":
deleteRole(key)
case "rolebindings":
deleteRoleBinding(key)
case "customresourcedefinitions":
deleteCrd(key)
case "services":
deleteService(key)
case "deployments":
deleteDeployment(key)
case "daemonsets":
deleteDaemonset(key)
case "jobs":
deleteInstallStrategyJob(key)
case "configmaps":
deleteInstallStrategyConfigMap(key)
default:
Fail("unknown resource type")
}
}
genericUpdateFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
update, ok := action.(testing.UpdateAction)
Expect(ok).To(BeTrue())
totalUpdates++
return true, update.GetObject(), nil
}
genericPatchFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
_, ok := action.(testing.PatchAction)
Expect(ok).To(BeTrue())
totalPatches++
return true, nil, nil
}
genericCreateFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
create, ok := action.(testing.CreateAction)
Expect(ok).To(BeTrue())
totalAdds++
addResource(create.GetObject(), "", "")
return true, create.GetObject(), nil
}
genericDeleteFunc := func(action testing.Action) (handled bool, obj runtime.Object, err error) {
delete, ok := action.(testing.DeleteAction)
Expect(ok).To(BeTrue())
totalDeletions++
var key string
if len(delete.GetNamespace()) > 0 {
key = delete.GetNamespace() + "/"
}
key += delete.GetName()
if deleteFromCache {
deleteResource(delete.GetResource().Resource, key)
}
return true, nil, nil
}
expectUsers := func(userBytes []byte, count int) {
type _users struct {
Users []string `json:"users"`
}
users := &_users{}
json.Unmarshal(userBytes, users)
ExpectWithOffset(2, len(users.Users)).To(Equal(count))
}
shouldExpectInstallStrategyDeletion := func() {
kubeClient.Fake.PrependReactor("delete", "configmaps", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
delete, ok := action.(testing.DeleteAction)
Expect(ok).To(BeTrue())
var key string
if len(delete.GetNamespace()) > 0 {
key = delete.GetNamespace() + "/"
}
key += delete.GetName()
deleteResource(delete.GetResource().Resource, key)
return true, nil, nil
})
}
shouldExpectDeletions := func() {
kubeClient.Fake.PrependReactor("delete", "serviceaccounts", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "clusterroles", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "clusterrolebindings", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "roles", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "rolebindings", genericDeleteFunc)
secClient.Fake.PrependReactor("patch", "securitycontextconstraints", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
patch, _ := action.(testing.PatchAction)
expectUsers(patch.GetPatch(), 1)
return true, nil, nil
})
extClient.Fake.PrependReactor("delete", "customresourcedefinitions", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "services", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "deployments", genericDeleteFunc)
kubeClient.Fake.PrependReactor("delete", "daemonsets", genericDeleteFunc)
}
shouldExpectJobDeletion := func() {
kubeClient.Fake.PrependReactor("delete", "jobs", genericDeleteFunc)
}
shouldExpectJobCreation := func() {
kubeClient.Fake.PrependReactor("create", "jobs", genericCreateFunc)
}
shouldExpectPatchesAndUpdates := func() {
extClient.Fake.PrependReactor("patch", "customresourcedefinitions", genericPatchFunc)
kubeClient.Fake.PrependReactor("patch", "serviceaccounts", genericPatchFunc)
kubeClient.Fake.PrependReactor("update", "clusterroles", genericUpdateFunc)
kubeClient.Fake.PrependReactor("update", "clusterrolebindings", genericUpdateFunc)
kubeClient.Fake.PrependReactor("update", "roles", genericUpdateFunc)
kubeClient.Fake.PrependReactor("update", "rolebindings", genericUpdateFunc)
kubeClient.Fake.PrependReactor("patch", "services", genericPatchFunc)
kubeClient.Fake.PrependReactor("patch", "daemonsets", genericPatchFunc)
kubeClient.Fake.PrependReactor("patch", "deployments", genericPatchFunc)
}
shouldExpectCreations := func() {
kubeClient.Fake.PrependReactor("create", "serviceaccounts", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "clusterroles", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "clusterrolebindings", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "roles", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "rolebindings", genericCreateFunc)
secClient.Fake.PrependReactor("patch", "securitycontextconstraints", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
patch, _ := action.(testing.PatchAction)
expectUsers(patch.GetPatch(), 4)
return true, nil, nil
})
extClient.Fake.PrependReactor("create", "customresourcedefinitions", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "services", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "deployments", genericCreateFunc)
kubeClient.Fake.PrependReactor("create", "daemonsets", genericCreateFunc)
}
shouldExpectKubeVirtUpdate := func(times int) {
update := kvInterface.EXPECT().Update(gomock.Any())
update.Do(func(kv *v1.KubeVirt) {
kvInformer.GetStore().Update(kv)
update.Return(kv, nil)
}).Times(times)
}
shouldExpectKubeVirtUpdateVersion := func(times int, imageTag string) {
update := kvInterface.EXPECT().Update(gomock.Any())
update.Do(func(kv *v1.KubeVirt) {
Expect(kv.Status.TargetKubeVirtVersion).To(Equal(imageTag))
Expect(kv.Status.ObservedKubeVirtVersion).To(Equal(imageTag))
kvInformer.GetStore().Update(kv)
update.Return(kv, nil)
}).Times(times)
}
shouldExpectKubeVirtUpdateFailureCondition := func(reason string) {
update := kvInterface.EXPECT().Update(gomock.Any())
update.Do(func(kv *v1.KubeVirt) {
Expect(len(kv.Status.Conditions)).To(Equal(1))
Expect(kv.Status.Conditions[0].Reason).To(Equal(reason))
kvInformer.GetStore().Update(kv)
update.Return(kv, nil)
}).Times(1)
}
getLatestKubeVirt := func(kv *v1.KubeVirt) *v1.KubeVirt {
if obj, exists, _ := kvInformer.GetStore().GetByKey(kv.GetNamespace() + "/" + kv.GetName()); exists {
if kvLatest, ok := obj.(*v1.KubeVirt); ok {
return kvLatest
}
}
return nil
}
Context("On valid KubeVirt object", func() {
It("should delete install strategy configmap once kubevirt install is deleted", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeleted,
},
}
kv.DeletionTimestamp = now()
shouldExpectInstallStrategyDeletion()
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
controller.Execute()
}, 15)
It("should observe custom image tag in status during deploy", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: "custom.tag",
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionReady,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: version.Get().String(),
},
}
// create all resources which should already exist
addKubeVirt(kv)
addAll("custom.tag", defaultRegistry)
// install strategy config
addInstallStrategy("custom.tag", defaultRegistry)
addPods("custom.tag", defaultRegistry)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectKubeVirtUpdateVersion(1, "custom.tag")
controller.Execute()
}, 15)
It("should do nothing if KubeVirt object is deployed", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionReady,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: version.Get().String(),
TargetKubeVirtVersion: defaultImageTag,
TargetKubeVirtRegistry: defaultRegistry,
ObservedKubeVirtVersion: defaultImageTag,
ObservedKubeVirtRegistry: defaultRegistry,
},
}
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
addAll(defaultImageTag, defaultRegistry)
addPods(defaultImageTag, defaultRegistry)
makeApiAndControllerReady()
makeHandlerReady()
controller.Execute()
}, 15)
It("should delete operator managed resources not in the deployed installstrategy", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
},
OperatorVersion: version.Get().String(),
TargetKubeVirtVersion: defaultImageTag,
TargetKubeVirtRegistry: defaultRegistry,
ObservedKubeVirtVersion: defaultImageTag,
ObservedKubeVirtRegistry: defaultRegistry,
},
}
deleteFromCache = false
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
addAll(defaultImageTag, defaultRegistry)
numResources := generateRandomResources()
addPods(defaultImageTag, defaultRegistry)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectDeletions()
controller.Execute()
Expect(totalDeletions).To(Equal(numResources))
}, 15)
It("should fail if KubeVirt object already exists", func(done Done) {
defer close(done)
kv1 := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install-1",
Namespace: NAMESPACE,
UID: "11111111111",
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionReady,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: "v0.0.0-master+$Format:%h$",
},
}
kv2 := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install-2",
Namespace: NAMESPACE,
UID: "123123123",
},
Status: v1.KubeVirtStatus{},
}
addKubeVirt(kv1)
addKubeVirt(kv2)
shouldExpectKubeVirtUpdateFailureCondition(ConditionReasonDeploymentFailedExisting)
controller.execute(fmt.Sprintf("%s/%s", kv2.Namespace, kv2.Name))
}, 15)
It("should generate install strategy creation job for update version", func(done Done) {
defer close(done)
updatedVersion := "1.1.1"
updatedRegistry := "otherregistry"
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedVersion,
ImageRegistry: updatedRegistry,
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionReady,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: version.Get().String(),
TargetKubeVirtVersion: defaultImageTag,
TargetKubeVirtRegistry: defaultRegistry,
ObservedKubeVirtVersion: defaultImageTag,
ObservedKubeVirtRegistry: defaultRegistry,
},
}
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
shouldExpectKubeVirtUpdate(1)
shouldExpectJobCreation()
controller.Execute()
}, 15)
It("should generate install strategy creation job if no install strategy exists", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{},
}
// create all resources which should already exist
addKubeVirt(kv)
shouldExpectKubeVirtUpdate(1)
shouldExpectJobCreation()
controller.Execute()
}, 15)
It("should delete install strategy creation job if job has failed", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{},
}
job := controller.generateInstallStrategyJob(kv)
// will only create a new job after 10 seconds has passed.
// this is just a simple mechanism to prevent spin loops
// in the event that jobs are fast failing for some unknown reason.
completionTime := time.Now().Add(time.Duration(-10) * time.Second)
job.Status.CompletionTime = &metav1.Time{Time: completionTime}
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategyJob(job)
shouldExpectJobDeletion()
shouldExpectKubeVirtUpdate(1)
controller.Execute()
}, 15)
It("should not delete completed install strategy creation job if job has failed less that 10 seconds ago", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Status: v1.KubeVirtStatus{},
}
job := controller.generateInstallStrategyJob(kv)
job.Status.CompletionTime = now()
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategyJob(job)
shouldExpectKubeVirtUpdate(1)
controller.Execute()
}, 15)
It("should add resources on create", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
},
}
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
job := controller.generateInstallStrategyJob(kv)
job.Status.CompletionTime = now()
addInstallStrategyJob(job)
// ensure completed jobs are garbage collected once install strategy
// is loaded
deleteFromCache = false
shouldExpectJobDeletion()
shouldExpectKubeVirtUpdate(1)
shouldExpectCreations()
controller.Execute()
kv = getLatestKubeVirt(kv)
Expect(kv.Status.Phase).To(Equal(v1.KubeVirtPhaseDeploying))
Expect(len(kv.Status.Conditions)).To(Equal(0))
// -2 because waiting on controller and virt-handler daemonset until API server deploys successfully
Expect(totalAdds).To(Equal(resourceCount - 2))
Expect(len(controller.stores.ServiceAccountCache.List())).To(Equal(3))
Expect(len(controller.stores.ClusterRoleCache.List())).To(Equal(7))
Expect(len(controller.stores.ClusterRoleBindingCache.List())).To(Equal(5))
Expect(len(controller.stores.RoleCache.List())).To(Equal(2))
Expect(len(controller.stores.RoleBindingCache.List())).To(Equal(2))
Expect(len(controller.stores.CrdCache.List())).To(Equal(5))
Expect(len(controller.stores.ServiceCache.List())).To(Equal(2))
Expect(len(controller.stores.DeploymentCache.List())).To(Equal(1))
Expect(len(controller.stores.DaemonSetCache.List())).To(Equal(0))
}, 15)
It("should pause rollback until api server is rolled over.", func(done Done) {
defer close(done)
rollbackVersion := "9.9.7"
rollbackRegistry := "otherregistry"
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: rollbackVersion,
ImageRegistry: rollbackRegistry,
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionReady,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: version.Get().String(),
TargetKubeVirtVersion: defaultImageTag,
TargetKubeVirtRegistry: defaultRegistry,
ObservedKubeVirtVersion: defaultImageTag,
ObservedKubeVirtRegistry: defaultRegistry,
},
}
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
addInstallStrategy(rollbackVersion, rollbackRegistry)
addAll(defaultImageTag, defaultRegistry)
addPods(defaultImageTag, defaultRegistry)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdate(1)
controller.Execute()
// on rollback or create, api server must be online first before controllers and daemonset.
// On rollback this prevents someone from posting invalid specs to
// the cluster from newer versions when an older version is being deployed.
// On create this prevents invalid specs from entering the cluster
// while controllers are available to process them.
Expect(totalPatches).To(Equal(patchCount - 2))
Expect(totalUpdates).To(Equal(updateCount))
}, 15)
It("should pause update until daemonsets and controllers are rolled over.", func(done Done) {
defer close(done)
updatedVersion := "9.9.10"
updatedRegistry := "otherregistry"
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedVersion,
ImageRegistry: updatedRegistry,
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionReady,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: version.Get().String(),
TargetKubeVirtVersion: defaultImageTag,
TargetKubeVirtRegistry: defaultRegistry,
ObservedKubeVirtVersion: defaultImageTag,
ObservedKubeVirtRegistry: defaultRegistry,
},
}
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
addInstallStrategy(updatedVersion, updatedRegistry)
addAll(defaultImageTag, defaultRegistry)
addPods(defaultImageTag, defaultRegistry)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdate(1)
controller.Execute()
// on update, apiserver won't get patched until daemonset and controller pods are online.
// this prevents the new API from coming online until the controllers can manage it.
Expect(totalPatches).To(Equal(patchCount - 1))
Expect(totalUpdates).To(Equal(updateCount))
}, 15)
It("should update kubevirt resources when Operator version changes if no imageTag and imageRegistry is explicilty set.", func(done Done) {
defer close(done)
updatedVersion := "1.1.1"
updatedRegistry := "otherregistry"
os.Setenv(util.OperatorImageEnvName, fmt.Sprintf("%s/virt-operator:%s", updatedRegistry, updatedVersion))
controller.config = util.GetConfig()
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionReady,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: version.Get().String(),
TargetKubeVirtVersion: defaultImageTag,
TargetKubeVirtRegistry: defaultRegistry,
ObservedKubeVirtVersion: defaultImageTag,
ObservedKubeVirtRegistry: defaultRegistry,
},
}
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
addInstallStrategy(updatedVersion, updatedRegistry)
addAll(defaultImageTag, defaultRegistry)
addPods(defaultImageTag, defaultRegistry)
// pods for the new version are added so this test won't
// wait for daemonsets to rollover before updating/patching
// all resources.
addPods(updatedVersion, updatedRegistry)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdate(1)
controller.Execute()
Expect(totalPatches).To(Equal(patchCount))
Expect(totalUpdates).To(Equal(updateCount))
// ensure every resource is either patched or updated
Expect(totalUpdates + totalPatches).To(Equal(resourceCount))
}, 15)
It("should update resources when changing KubeVirt version.", func(done Done) {
defer close(done)
updatedVersion := "1.1.1"
updatedRegistry := "otherregistry"
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
Finalizers: []string{util.KubeVirtFinalizer},
},
Spec: v1.KubeVirtSpec{
ImageTag: updatedVersion,
ImageRegistry: updatedRegistry,
},
Status: v1.KubeVirtStatus{
Phase: v1.KubeVirtPhaseDeployed,
Conditions: []v1.KubeVirtCondition{
{
Type: v1.KubeVirtConditionCreated,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentCreated,
Message: "All resources were created.",
},
{
Type: v1.KubeVirtConditionReady,
Status: k8sv1.ConditionTrue,
Reason: ConditionReasonDeploymentReady,
Message: "All components are ready.",
},
},
OperatorVersion: version.Get().String(),
TargetKubeVirtVersion: defaultImageTag,
TargetKubeVirtRegistry: defaultRegistry,
ObservedKubeVirtVersion: defaultImageTag,
ObservedKubeVirtRegistry: defaultRegistry,
},
}
// create all resources which should already exist
addKubeVirt(kv)
addInstallStrategy(defaultImageTag, defaultRegistry)
addInstallStrategy(updatedVersion, updatedRegistry)
addAll(defaultImageTag, defaultRegistry)
addPods(defaultImageTag, defaultRegistry)
// pods for the new version are added so this test won't
// wait for daemonsets to rollover before updating/patching
// all resources.
addPods(updatedVersion, updatedRegistry)
makeApiAndControllerReady()
makeHandlerReady()
shouldExpectPatchesAndUpdates()
shouldExpectKubeVirtUpdate(1)
controller.Execute()
Expect(totalPatches).To(Equal(patchCount))
Expect(totalUpdates).To(Equal(updateCount))
// ensure every resource is either patched or updated
Expect(totalUpdates + totalPatches).To(Equal(resourceCount))
}, 15)
It("should remove resources on deletion", func(done Done) {
defer close(done)
kv := &v1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: "test-install",
Namespace: NAMESPACE,
},
}
kv.DeletionTimestamp = now()
addKubeVirt(kv)
// create all resources which should be deleted
addInstallStrategy(defaultImageTag, defaultRegistry)
addAll(defaultImageTag, defaultRegistry)
shouldExpectKubeVirtUpdate(1)
shouldExpectDeletions()
shouldExpectInstallStrategyDeletion()
controller.Execute()
// Note: in real life during the first execution loop very probably only CRDs are deleted,
// because that takes some time (see the check that the crd store is empty before going on with deletions)
// But in this test the deletion succeeds immediately, so everything is deleted on first try
Expect(totalDeletions).To(Equal(resourceCount))
kv = getLatestKubeVirt(kv)
Expect(kv.Status.Phase).To(Equal(v1.KubeVirtPhaseDeleted))
Expect(len(kv.Status.Conditions)).To(Equal(0))
}, 15)
})
Context("On install strategy dump", func() {
It("should generate latest install strategy and post as config map", func(done Done) {
defer close(done)
kubeClient.Fake.PrependReactor("create", "configmaps", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
create, ok := action.(testing.CreateAction)
Expect(ok).To(BeTrue())
configMap := create.GetObject().(*k8sv1.ConfigMap)
Expect(configMap.GenerateName).To(Equal("kubevirt-install-strategy-"))
version, ok := configMap.ObjectMeta.Annotations[v1.InstallStrategyVersionAnnotation]
Expect(ok).To(BeTrue())
Expect(version).To(Equal(defaultImageTag))
registry, ok := configMap.ObjectMeta.Annotations[v1.InstallStrategyRegistryAnnotation]
Expect(registry).To(Equal(defaultRegistry))
Expect(ok).To(BeTrue())
_, ok = configMap.Data["manifests"]
Expect(ok).To(BeTrue())
return true, create.GetObject(), nil
})
// This generates and posts the install strategy config map
installstrategy.DumpInstallStrategyToConfigMap(virtClient)
}, 15)
})
})
func now() *metav1.Time {
now := metav1.Now()
return &now
}
|
/*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handlers
import (
"net/http"
"github.com/go-logr/logr"
admissionv1 "k8s.io/api/admission/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/validation/field"
"github.com/jetstack/cert-manager/pkg/internal/api/validation"
)
type registryBackedValidator struct {
log logr.Logger
decoder runtime.Decoder
registry *validation.Registry
}
func NewRegistryBackedValidator(log logr.Logger, scheme *runtime.Scheme, registry *validation.Registry) *registryBackedValidator {
factory := serializer.NewCodecFactory(scheme)
return ®istryBackedValidator{
log: log,
decoder: factory.UniversalDecoder(),
registry: registry,
}
}
func (r *registryBackedValidator) Validate(admissionSpec *admissionv1.AdmissionRequest) *admissionv1.AdmissionResponse {
status := &admissionv1.AdmissionResponse{}
status.UID = admissionSpec.UID
// decode new version of object
obj, _, err := r.decoder.Decode(admissionSpec.Object.Raw, nil, nil)
if err != nil {
status.Allowed = false
status.Result = &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: err.Error(),
}
return status
}
// attempt to decode old object
var oldObj runtime.Object
if len(admissionSpec.OldObject.Raw) > 0 {
oldObj, _, err = r.decoder.Decode(admissionSpec.OldObject.Raw, nil, nil)
if err != nil {
status.Allowed = false
status.Result = &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: err.Error(),
}
return status
}
}
// RequestKind field is only present from Kubernetes 1.15 onwards, so
// use the regular 'kind' if RequestKind is not present
gvk := schema.GroupVersionKind{
Group: admissionSpec.Kind.Group,
Version: admissionSpec.Kind.Version,
Kind: admissionSpec.Kind.Kind,
}
if admissionSpec.RequestKind != nil {
gvk = schema.GroupVersionKind{
Group: admissionSpec.RequestKind.Group,
Version: admissionSpec.RequestKind.Version,
Kind: admissionSpec.RequestKind.Kind,
}
}
errs := field.ErrorList{}
if admissionSpec.Operation == admissionv1.Create {
// perform validation on new version of resource
errs = append(errs, r.registry.Validate(obj, gvk)...)
} else if admissionSpec.Operation == admissionv1.Update {
// perform update validation on resource
errs = append(errs, r.registry.ValidateUpdate(oldObj, obj, gvk)...)
}
// return with allowed = false if any errors occurred
if err := errs.ToAggregate(); err != nil {
status.Allowed = false
status.Result = &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusNotAcceptable, Reason: metav1.StatusReasonNotAcceptable,
Message: err.Error(),
}
return status
}
status.Allowed = true
return status
}
Updates webhook validation handler to use new function signature
Signed-off-by: joshvanl <525f14a1257c4e494fcfb5b458ace90de1e9c01d@gmail.com>
/*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handlers
import (
"net/http"
"github.com/go-logr/logr"
admissionv1 "k8s.io/api/admission/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/validation/field"
"github.com/jetstack/cert-manager/pkg/internal/api/validation"
)
type registryBackedValidator struct {
log logr.Logger
decoder runtime.Decoder
registry *validation.Registry
}
func NewRegistryBackedValidator(log logr.Logger, scheme *runtime.Scheme, registry *validation.Registry) *registryBackedValidator {
factory := serializer.NewCodecFactory(scheme)
return ®istryBackedValidator{
log: log,
decoder: factory.UniversalDecoder(),
registry: registry,
}
}
func (r *registryBackedValidator) Validate(admissionSpec *admissionv1.AdmissionRequest) *admissionv1.AdmissionResponse {
status := &admissionv1.AdmissionResponse{}
status.UID = admissionSpec.UID
// decode new version of object
obj, _, err := r.decoder.Decode(admissionSpec.Object.Raw, nil, nil)
if err != nil {
status.Allowed = false
status.Result = &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: err.Error(),
}
return status
}
// attempt to decode old object
var oldObj runtime.Object
if len(admissionSpec.OldObject.Raw) > 0 {
oldObj, _, err = r.decoder.Decode(admissionSpec.OldObject.Raw, nil, nil)
if err != nil {
status.Allowed = false
status.Result = &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest,
Message: err.Error(),
}
return status
}
}
// RequestKind field is only present from Kubernetes 1.15 onwards, so
// use the regular 'kind' if RequestKind is not present
gvk := schema.GroupVersionKind{
Group: admissionSpec.Kind.Group,
Version: admissionSpec.Kind.Version,
Kind: admissionSpec.Kind.Kind,
}
if admissionSpec.RequestKind != nil {
gvk = schema.GroupVersionKind{
Group: admissionSpec.RequestKind.Group,
Version: admissionSpec.RequestKind.Version,
Kind: admissionSpec.RequestKind.Kind,
}
}
errs := field.ErrorList{}
if admissionSpec.Operation == admissionv1.Create {
// perform validation on new version of resource
errs = append(errs, r.registry.Validate(admissionSpec, obj, gvk)...)
} else if admissionSpec.Operation == admissionv1.Update {
// perform update validation on resource
errs = append(errs, r.registry.ValidateUpdate(admissionSpec, oldObj, obj, gvk)...)
}
// return with allowed = false if any errors occurred
if err := errs.ToAggregate(); err != nil {
status.Allowed = false
status.Result = &metav1.Status{
Status: metav1.StatusFailure, Code: http.StatusNotAcceptable, Reason: metav1.StatusReasonNotAcceptable,
Message: err.Error(),
}
return status
}
status.Allowed = true
return status
}
|
package cluster
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/user"
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/flynn/flynn/cli/config"
"github.com/flynn/flynn/discoverd/client"
"github.com/flynn/flynn/pkg/random"
"github.com/flynn/flynn/test/buildlog"
)
type ClusterType uint8
const (
ClusterTypeDefault ClusterType = iota
ClusterTypeRelease
ClusterTypeNone
)
type BootConfig struct {
User string
Kernel string
Network string
NatIface string
Backend string
}
type Cluster struct {
ID string `json:"id"`
Instances instances `json:"instances"`
BackoffPeriod time.Duration `json:"backoff_period"`
ClusterDomain string `json:"cluster_domain"`
ControllerPin string `json:"controller_pin"`
ControllerKey string `json:"controller_key"`
RouterIP string `json:"router_ip"`
defaultInstances []*Instance
releaseInstances []*Instance
discMtx sync.Mutex
disc *discoverd.Client
bc BootConfig
vm *VMManager
out io.Writer
bridge *Bridge
rootFS string
}
func (c *Cluster) ControllerDomain() string {
return "controller." + c.ClusterDomain
}
func (c *Cluster) GitDomain() string {
return "git." + c.ClusterDomain
}
type instances []*Instance
func (i instances) Get(id string) (*Instance, error) {
for _, inst := range i {
if inst.ID == id {
return inst, nil
}
}
return nil, fmt.Errorf("no such host: %s", id)
}
func (c *Cluster) discoverdClient(ip string) *discoverd.Client {
c.discMtx.Lock()
defer c.discMtx.Unlock()
if c.disc == nil {
c.disc = discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", ip))
}
return c.disc
}
type Streams struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
func New(bc BootConfig, out io.Writer) *Cluster {
return &Cluster{
ID: random.String(8),
bc: bc,
out: out,
}
}
func BuildFlynn(bc BootConfig, rootFS, commit string, merge bool, out io.Writer) (string, error) {
c := New(bc, out)
defer c.Shutdown()
return c.BuildFlynn(rootFS, commit, merge, false)
}
func (c *Cluster) log(a ...interface{}) (int, error) {
return fmt.Fprintln(c.out, append([]interface{}{"++", time.Now().Format("15:04:05.000")}, a...)...)
}
func (c *Cluster) logf(f string, a ...interface{}) (int, error) {
return fmt.Fprintf(c.out, strings.Join([]string{"++", time.Now().Format("15:04:05.000"), f}, " "), a...)
}
func (c *Cluster) BuildFlynn(rootFS, commit string, merge bool, runTests bool) (string, error) {
c.log("Building Flynn...")
if err := c.setup(); err != nil {
return "", err
}
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return "", err
}
build, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "4096",
Cores: 8,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: false},
},
})
if err != nil {
return build.Drive("hda").FS, err
}
c.log("Booting build instance...")
if err := build.Start(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error starting build instance: %s", err)
}
c.log("Waiting for instance to boot...")
if err := buildFlynn(build, commit, merge, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("error running build script: %s", err)
}
if runTests {
if err := runUnitTests(build, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("unit tests failed: %s", err)
}
}
if err := build.Shutdown(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error while stopping build instance: %s", err)
}
c.rootFS = build.Drive("hda").FS
return c.rootFS, nil
}
type BootResult struct {
ControllerDomain string
ControllerPin string
ControllerKey string
Instances []*Instance
}
func (c *Cluster) Boot(typ ClusterType, count int, buildLog *buildlog.Log, killOnFailure bool) (res *BootResult, err error) {
if err := c.setup(); err != nil {
return nil, err
}
defer func() {
if err != nil {
if buildLog != nil && len(c.Instances) > 0 {
c.DumpLogs(buildLog)
}
if killOnFailure {
c.Shutdown()
}
}
}()
c.log("Booting", count, "VMs")
instances, err := c.startVMs(typ, c.rootFS, count, true)
if err != nil {
return nil, err
}
for _, inst := range instances {
if err := c.startFlynnHost(inst, instances); err != nil {
return nil, err
}
}
c.log("Bootstrapping layer 1...")
if err := c.bootstrapLayer1(instances); err != nil {
return nil, err
}
return &BootResult{
ControllerDomain: c.ControllerDomain(),
ControllerPin: c.ControllerPin,
ControllerKey: c.ControllerKey,
Instances: instances,
}, nil
}
func (c *Cluster) BridgeIP() string {
if c.bridge == nil {
return ""
}
return c.bridge.IP()
}
func (c *Cluster) AddHost() (*Instance, error) {
if c.rootFS == "" {
return nil, errors.New("cluster not yet booted")
}
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeDefault, c.rootFS, 1, false)
if err != nil {
return nil, err
}
inst := instances[0]
if err := c.startFlynnHost(inst, c.defaultInstances); err != nil {
return nil, err
}
return inst, err
}
func (c *Cluster) AddVanillaHost(rootFS string) (*Instance, error) {
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeNone, rootFS, 1, false)
return instances[0], err
}
// RemoveHost stops flynn-host on the instance but leaves it running so the logs
// are still available if we need to dump them later.
func (c *Cluster) RemoveHost(id string) error {
inst, err := c.Instances.Get(id)
if err != nil {
return err
}
c.log("removing host", id)
// ssh into the host and tell the flynn-host daemon to stop
var cmd string
switch c.bc.Backend {
case "libvirt-lxc":
// manually kill containers after stopping flynn-host due to https://github.com/flynn/flynn/issues/1177
cmd = "sudo start-stop-daemon --stop --pidfile /var/run/flynn-host.pid --retry 15 && (virsh -c lxc:/// list --name | xargs -L 1 virsh -c lxc:/// destroy || true)"
}
return inst.Run(cmd, nil)
}
func (c *Cluster) Size() int {
return len(c.Instances)
}
func (c *Cluster) startVMs(typ ClusterType, rootFS string, count int, initial bool) ([]*Instance, error) {
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return nil, err
}
instances := make([]*Instance, count)
for i := 0; i < count; i++ {
memory := "2048"
if initial && i == 0 {
// give the first instance more memory as that is where
// the test binary runs, and the tests use a lot of memory
memory = "8192"
}
inst, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: memory,
Cores: 2,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: true},
},
})
if err != nil {
return nil, fmt.Errorf("error creating instance %d: %s", i, err)
}
if err = inst.Start(); err != nil {
return nil, fmt.Errorf("error starting instance %d: %s", i, err)
}
inst.initial = initial
instances[i] = inst
c.Instances = append(c.Instances, inst)
switch typ {
case ClusterTypeDefault:
c.defaultInstances = append(c.defaultInstances, inst)
case ClusterTypeRelease:
c.releaseInstances = append(c.releaseInstances, inst)
}
}
return instances, nil
}
func (c *Cluster) startFlynnHost(inst *Instance, peerInstances []*Instance) error {
tmpl, ok := flynnHostScripts[c.bc.Backend]
if !ok {
return fmt.Errorf("unknown host backend: %s", c.bc.Backend)
}
peers := make([]string, 0, len(peerInstances))
for _, inst := range peerInstances {
if !inst.initial {
continue
}
peers = append(peers, inst.IP)
}
var script bytes.Buffer
data := hostScriptData{
ID: inst.ID,
IP: inst.IP,
Peers: strings.Join(peers, ","),
}
tmpl.Execute(&script, data)
c.logf("Starting flynn-host on %s [id: %s]\n", inst.IP, inst.ID)
return inst.Run("bash", &Streams{Stdin: &script, Stdout: c.out, Stderr: os.Stderr})
}
func (c *Cluster) setup() error {
if _, err := os.Stat(c.bc.Kernel); os.IsNotExist(err) {
return fmt.Errorf("cluster: not a kernel file: %s", c.bc.Kernel)
}
if c.bridge == nil {
var err error
name := "flynnbr." + random.String(5)
c.logf("creating network bridge %s\n", name)
c.bridge, err = createBridge(name, c.bc.Network, c.bc.NatIface)
if err != nil {
return fmt.Errorf("could not create network bridge: %s", err)
}
}
c.vm = NewVMManager(c.bridge)
return nil
}
func (c *Cluster) Run(command string, s *Streams) error {
return c.run(command, s, nil)
}
func (c *Cluster) RunWithEnv(command string, s *Streams, env map[string]string) error {
return c.run(command, s, env)
}
func (c *Cluster) run(command string, s *Streams, env map[string]string) error {
if len(c.Instances) == 0 {
return errors.New("no booted servers in cluster")
}
return c.Instances[0].RunWithEnv(command, s, env)
}
func (c *Cluster) CLIConfig() (*config.Config, error) {
conf := &config.Config{}
s := &config.Cluster{
Name: "default",
ControllerURL: "https://controller." + c.ClusterDomain,
GitURL: "https://git." + c.ClusterDomain,
DockerPushURL: "https://docker." + c.ClusterDomain,
Key: c.ControllerKey,
TLSPin: c.ControllerPin,
}
if err := conf.Add(s, true /*force*/); err != nil {
return nil, err
}
return conf, nil
}
func (c *Cluster) Shutdown() {
for i, inst := range c.Instances {
c.logf("killing instance %d [id: %s]\n", i, inst.ID)
if err := inst.Kill(); err != nil {
c.logf("error killing instance %d: %s\n", i, err)
}
}
if c.bridge != nil {
c.logf("deleting network bridge %s\n", c.bridge.name)
if err := deleteBridge(c.bridge); err != nil {
c.logf("error deleting network bridge %s: %s\n", c.bridge.name, err)
}
c.bridge = nil
}
}
var flynnBuildScript = template.Must(template.New("flynn-build").Parse(`
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
if [ ! -d $flynn ]; then
git clone https://github.com/flynn/flynn $flynn
fi
cd $flynn
# Also fetch Github PR commits
if ! git config --get-all remote.origin.fetch | grep -q '^+refs/pull'; then
git config --add remote.origin.fetch '+refs/pull/*/head:refs/remotes/origin/pr/*'
fi
git fetch
git checkout --quiet {{ .Commit }}
{{ if .Merge }}
git config user.email "ci@flynn.io"
git config user.name "CI"
git merge origin/master
{{ end }}
test/scripts/wait-for-docker
# pull flynn/busybox before building to avoid the following Docker error when
# building images from scratch concurrently:
# "could not find image: no such id: flynn/busybox"
docker pull flynn/busybox
make
if [[ -f test/scripts/debug-info.sh ]]; then
sudo cp test/scripts/debug-info.sh /usr/local/bin/debug-info.sh
fi
sudo cp host/bin/flynn-* /usr/local/bin
sudo cp bootstrap/bin/manifest.json /etc/flynn-bootstrap.json
`[1:]))
type buildData struct {
Commit string
Merge bool
}
func buildFlynn(inst *Instance, commit string, merge bool, out io.Writer) error {
var b bytes.Buffer
flynnBuildScript.Execute(&b, buildData{commit, merge})
return inst.RunWithTimeout("bash", &Streams{Stdin: &b, Stdout: out, Stderr: out}, 30*time.Minute)
}
var flynnUnitTestScript = `
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
cd $flynn
if [[ -f test/scripts/test-unit.sh ]]; then
timeout --signal=QUIT --kill-after=10 5m test/scripts/test-unit.sh
fi
`[1:]
func runUnitTests(inst *Instance, out io.Writer) error {
return inst.Run("bash", &Streams{Stdin: bytes.NewBufferString(flynnUnitTestScript), Stdout: out, Stderr: out})
}
type hostScriptData struct {
ID string
IP string
Peers string
}
var flynnHostScripts = map[string]*template.Template{
"libvirt-lxc": template.Must(template.New("flynn-host-libvirt").Parse(`
if [[ -f /usr/local/bin/debug-info.sh ]]; then
/usr/local/bin/debug-info.sh &>/tmp/debug-info.log &
fi
sudo start-stop-daemon \
--start \
--background \
--no-close \
--make-pidfile \
--pidfile /var/run/flynn-host.pid \
--exec /usr/bin/env \
-- \
flynn-host \
daemon \
--id {{ .ID }} \
--external-ip {{ .IP }} \
--force \
--backend libvirt-lxc \
--peer-ips {{ .Peers }} \
--max-job-concurrency 8 \
&>/tmp/flynn-host.log
`[1:])),
}
type bootstrapMsg struct {
Id string `json:"id"`
State string `json:"state"`
Data json.RawMessage `json:"data"`
Error string `json:"error"`
}
type controllerCert struct {
Pin string `json:"pin"`
}
func (c *Cluster) bootstrapLayer1(instances []*Instance) error {
inst := instances[0]
c.ClusterDomain = fmt.Sprintf("flynn-%s.local", random.String(16))
c.ControllerKey = random.String(16)
c.BackoffPeriod = 5 * time.Second
rd, wr := io.Pipe()
ips := make([]string, len(instances))
for i, inst := range instances {
ips[i] = inst.IP
}
var cmdErr error
go func() {
command := fmt.Sprintf(
"CLUSTER_DOMAIN=%s CONTROLLER_KEY=%s BACKOFF_PERIOD=%fs flynn-host bootstrap --json --min-hosts=%d --peer-ips=%s /etc/flynn-bootstrap.json",
c.ClusterDomain, c.ControllerKey, c.BackoffPeriod.Seconds(), len(instances), strings.Join(ips, ","),
)
cmdErr = inst.Run(command, &Streams{Stdout: wr, Stderr: c.out})
wr.Close()
}()
// grab the controller tls pin from the bootstrap output
var cert controllerCert
dec := json.NewDecoder(rd)
for {
var msg bootstrapMsg
if err := dec.Decode(&msg); err == io.EOF {
break
} else if err != nil {
return fmt.Errorf("failed to parse bootstrap JSON output: %s", err)
}
c.log("bootstrap ===>", msg.Id, msg.State)
if msg.State == "error" {
c.log(msg.Error)
}
if msg.Id == "controller-cert" && msg.State == "done" {
json.Unmarshal(msg.Data, &cert)
}
}
if cmdErr != nil {
return cmdErr
}
if cert.Pin == "" {
return errors.New("could not determine controller cert from bootstrap output")
}
c.ControllerPin = cert.Pin
// grab the router IP from discoverd
disc := discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", inst.IP))
leader, err := disc.Service("router-api").Leader()
if err != nil {
return fmt.Errorf("could not detect router ip: %s", err)
}
c.RouterIP = leader.Host()
return nil
}
func lookupUser(name string) (int, int, error) {
u, err := user.Lookup(name)
if err != nil {
return 0, 0, err
}
uid, _ := strconv.Atoi(u.Uid)
gid, _ := strconv.Atoi(u.Gid)
return uid, gid, nil
}
func (c *Cluster) DumpLogs(buildLog *buildlog.Log) {
run := func(log string, inst *Instance, cmds ...string) error {
out, err := buildLog.NewFileWithTimeout(log, 60*time.Second)
if err != nil {
return err
}
for _, cmd := range cmds {
fmt.Fprintln(out, "HostID:", inst.ID, "-", cmd)
fmt.Fprintln(out)
err := inst.Run(cmd, &Streams{Stdout: out, Stderr: out})
fmt.Fprintln(out)
if err != nil {
return err
}
}
return nil
}
for _, inst := range c.Instances {
run(
fmt.Sprintf("host-logs-%s.log", inst.ID),
inst,
"ps faux",
"cat /var/log/flynn/flynn-host.log",
"cat /tmp/debug-info.log",
"sudo cat /var/log/libvirt/libvirtd.log",
)
}
printLogs := func(typ string, instances []*Instance) {
fallback := func(instances []*Instance) {
for _, inst := range instances {
run(fmt.Sprintf("%s-fallback-%s.log", typ, inst.ID), inst, "sudo bash -c 'tail -n +1 /var/log/flynn/*.log'")
}
}
run(fmt.Sprintf("%s-jobs.log", typ), instances[0], "flynn-host ps -a")
var out bytes.Buffer
cmd := `flynn-host ps -aqf '{{ metadata "flynn-controller.app_name" }}:{{ metadata "flynn-controller.type" }}:{{ .Job.ID }}'`
if err := instances[0].Run(cmd, &Streams{Stdout: &out, Stderr: &out}); err != nil {
fallback(instances)
return
}
// only fallback if all `flynn-host log` commands fail
shouldFallback := true
jobs := strings.Split(strings.TrimSpace(out.String()), "\n")
for _, job := range jobs {
fields := strings.Split(job, ":")
jobID := fields[2]
cmds := []string{
fmt.Sprintf("timeout 10s flynn-host inspect %s", jobID),
fmt.Sprintf("timeout 10s flynn-host log --init %s", jobID),
}
if err := run(fmt.Sprintf("%s-%s.log", typ, job), instances[0], cmds...); err != nil {
continue
}
shouldFallback = false
}
if shouldFallback {
fallback(instances)
}
// run the fallback on any stopped instances as their logs will
// not appear in `flynn-host ps`
stoppedInstances := make([]*Instance, 0, len(instances))
for _, inst := range instances {
if err := inst.Run("sudo kill -0 $(cat /var/run/flynn-host.pid)", nil); err != nil {
stoppedInstances = append(stoppedInstances, inst)
}
}
if len(stoppedInstances) > 0 {
fallback(stoppedInstances)
}
}
if len(c.defaultInstances) > 0 {
printLogs("default", c.defaultInstances)
}
if len(c.releaseInstances) > 0 {
printLogs("release", c.releaseInstances)
}
}
test: Increase VM memory
RAM is cheap, and CI has lots of it...
Signed-off-by: Lewis Marshall <748e1641a368164906d4a0c0e3965345453dcc93@lmars.net>
package cluster
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/user"
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/flynn/flynn/cli/config"
"github.com/flynn/flynn/discoverd/client"
"github.com/flynn/flynn/pkg/random"
"github.com/flynn/flynn/test/buildlog"
)
type ClusterType uint8
const (
ClusterTypeDefault ClusterType = iota
ClusterTypeRelease
ClusterTypeNone
)
type BootConfig struct {
User string
Kernel string
Network string
NatIface string
Backend string
}
type Cluster struct {
ID string `json:"id"`
Instances instances `json:"instances"`
BackoffPeriod time.Duration `json:"backoff_period"`
ClusterDomain string `json:"cluster_domain"`
ControllerPin string `json:"controller_pin"`
ControllerKey string `json:"controller_key"`
RouterIP string `json:"router_ip"`
defaultInstances []*Instance
releaseInstances []*Instance
discMtx sync.Mutex
disc *discoverd.Client
bc BootConfig
vm *VMManager
out io.Writer
bridge *Bridge
rootFS string
}
func (c *Cluster) ControllerDomain() string {
return "controller." + c.ClusterDomain
}
func (c *Cluster) GitDomain() string {
return "git." + c.ClusterDomain
}
type instances []*Instance
func (i instances) Get(id string) (*Instance, error) {
for _, inst := range i {
if inst.ID == id {
return inst, nil
}
}
return nil, fmt.Errorf("no such host: %s", id)
}
func (c *Cluster) discoverdClient(ip string) *discoverd.Client {
c.discMtx.Lock()
defer c.discMtx.Unlock()
if c.disc == nil {
c.disc = discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", ip))
}
return c.disc
}
type Streams struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
func New(bc BootConfig, out io.Writer) *Cluster {
return &Cluster{
ID: random.String(8),
bc: bc,
out: out,
}
}
func BuildFlynn(bc BootConfig, rootFS, commit string, merge bool, out io.Writer) (string, error) {
c := New(bc, out)
defer c.Shutdown()
return c.BuildFlynn(rootFS, commit, merge, false)
}
func (c *Cluster) log(a ...interface{}) (int, error) {
return fmt.Fprintln(c.out, append([]interface{}{"++", time.Now().Format("15:04:05.000")}, a...)...)
}
func (c *Cluster) logf(f string, a ...interface{}) (int, error) {
return fmt.Fprintf(c.out, strings.Join([]string{"++", time.Now().Format("15:04:05.000"), f}, " "), a...)
}
func (c *Cluster) BuildFlynn(rootFS, commit string, merge bool, runTests bool) (string, error) {
c.log("Building Flynn...")
if err := c.setup(); err != nil {
return "", err
}
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return "", err
}
build, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "16384",
Cores: 8,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: false},
},
})
if err != nil {
return build.Drive("hda").FS, err
}
c.log("Booting build instance...")
if err := build.Start(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error starting build instance: %s", err)
}
c.log("Waiting for instance to boot...")
if err := buildFlynn(build, commit, merge, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("error running build script: %s", err)
}
if runTests {
if err := runUnitTests(build, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("unit tests failed: %s", err)
}
}
if err := build.Shutdown(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error while stopping build instance: %s", err)
}
c.rootFS = build.Drive("hda").FS
return c.rootFS, nil
}
type BootResult struct {
ControllerDomain string
ControllerPin string
ControllerKey string
Instances []*Instance
}
func (c *Cluster) Boot(typ ClusterType, count int, buildLog *buildlog.Log, killOnFailure bool) (res *BootResult, err error) {
if err := c.setup(); err != nil {
return nil, err
}
defer func() {
if err != nil {
if buildLog != nil && len(c.Instances) > 0 {
c.DumpLogs(buildLog)
}
if killOnFailure {
c.Shutdown()
}
}
}()
c.log("Booting", count, "VMs")
instances, err := c.startVMs(typ, c.rootFS, count, true)
if err != nil {
return nil, err
}
for _, inst := range instances {
if err := c.startFlynnHost(inst, instances); err != nil {
return nil, err
}
}
c.log("Bootstrapping layer 1...")
if err := c.bootstrapLayer1(instances); err != nil {
return nil, err
}
return &BootResult{
ControllerDomain: c.ControllerDomain(),
ControllerPin: c.ControllerPin,
ControllerKey: c.ControllerKey,
Instances: instances,
}, nil
}
func (c *Cluster) BridgeIP() string {
if c.bridge == nil {
return ""
}
return c.bridge.IP()
}
func (c *Cluster) AddHost() (*Instance, error) {
if c.rootFS == "" {
return nil, errors.New("cluster not yet booted")
}
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeDefault, c.rootFS, 1, false)
if err != nil {
return nil, err
}
inst := instances[0]
if err := c.startFlynnHost(inst, c.defaultInstances); err != nil {
return nil, err
}
return inst, err
}
func (c *Cluster) AddVanillaHost(rootFS string) (*Instance, error) {
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeNone, rootFS, 1, false)
return instances[0], err
}
// RemoveHost stops flynn-host on the instance but leaves it running so the logs
// are still available if we need to dump them later.
func (c *Cluster) RemoveHost(id string) error {
inst, err := c.Instances.Get(id)
if err != nil {
return err
}
c.log("removing host", id)
// ssh into the host and tell the flynn-host daemon to stop
var cmd string
switch c.bc.Backend {
case "libvirt-lxc":
// manually kill containers after stopping flynn-host due to https://github.com/flynn/flynn/issues/1177
cmd = "sudo start-stop-daemon --stop --pidfile /var/run/flynn-host.pid --retry 15 && (virsh -c lxc:/// list --name | xargs -L 1 virsh -c lxc:/// destroy || true)"
}
return inst.Run(cmd, nil)
}
func (c *Cluster) Size() int {
return len(c.Instances)
}
func (c *Cluster) startVMs(typ ClusterType, rootFS string, count int, initial bool) ([]*Instance, error) {
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return nil, err
}
instances := make([]*Instance, count)
for i := 0; i < count; i++ {
memory := "8192"
if initial && i == 0 {
// give the first instance more memory as that is where
// the test binary runs, and the tests use a lot of memory
memory = "16384"
}
inst, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: memory,
Cores: 2,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: true},
},
})
if err != nil {
return nil, fmt.Errorf("error creating instance %d: %s", i, err)
}
if err = inst.Start(); err != nil {
return nil, fmt.Errorf("error starting instance %d: %s", i, err)
}
inst.initial = initial
instances[i] = inst
c.Instances = append(c.Instances, inst)
switch typ {
case ClusterTypeDefault:
c.defaultInstances = append(c.defaultInstances, inst)
case ClusterTypeRelease:
c.releaseInstances = append(c.releaseInstances, inst)
}
}
return instances, nil
}
func (c *Cluster) startFlynnHost(inst *Instance, peerInstances []*Instance) error {
tmpl, ok := flynnHostScripts[c.bc.Backend]
if !ok {
return fmt.Errorf("unknown host backend: %s", c.bc.Backend)
}
peers := make([]string, 0, len(peerInstances))
for _, inst := range peerInstances {
if !inst.initial {
continue
}
peers = append(peers, inst.IP)
}
var script bytes.Buffer
data := hostScriptData{
ID: inst.ID,
IP: inst.IP,
Peers: strings.Join(peers, ","),
}
tmpl.Execute(&script, data)
c.logf("Starting flynn-host on %s [id: %s]\n", inst.IP, inst.ID)
return inst.Run("bash", &Streams{Stdin: &script, Stdout: c.out, Stderr: os.Stderr})
}
func (c *Cluster) setup() error {
if _, err := os.Stat(c.bc.Kernel); os.IsNotExist(err) {
return fmt.Errorf("cluster: not a kernel file: %s", c.bc.Kernel)
}
if c.bridge == nil {
var err error
name := "flynnbr." + random.String(5)
c.logf("creating network bridge %s\n", name)
c.bridge, err = createBridge(name, c.bc.Network, c.bc.NatIface)
if err != nil {
return fmt.Errorf("could not create network bridge: %s", err)
}
}
c.vm = NewVMManager(c.bridge)
return nil
}
func (c *Cluster) Run(command string, s *Streams) error {
return c.run(command, s, nil)
}
func (c *Cluster) RunWithEnv(command string, s *Streams, env map[string]string) error {
return c.run(command, s, env)
}
func (c *Cluster) run(command string, s *Streams, env map[string]string) error {
if len(c.Instances) == 0 {
return errors.New("no booted servers in cluster")
}
return c.Instances[0].RunWithEnv(command, s, env)
}
func (c *Cluster) CLIConfig() (*config.Config, error) {
conf := &config.Config{}
s := &config.Cluster{
Name: "default",
ControllerURL: "https://controller." + c.ClusterDomain,
GitURL: "https://git." + c.ClusterDomain,
DockerPushURL: "https://docker." + c.ClusterDomain,
Key: c.ControllerKey,
TLSPin: c.ControllerPin,
}
if err := conf.Add(s, true /*force*/); err != nil {
return nil, err
}
return conf, nil
}
func (c *Cluster) Shutdown() {
for i, inst := range c.Instances {
c.logf("killing instance %d [id: %s]\n", i, inst.ID)
if err := inst.Kill(); err != nil {
c.logf("error killing instance %d: %s\n", i, err)
}
}
if c.bridge != nil {
c.logf("deleting network bridge %s\n", c.bridge.name)
if err := deleteBridge(c.bridge); err != nil {
c.logf("error deleting network bridge %s: %s\n", c.bridge.name, err)
}
c.bridge = nil
}
}
var flynnBuildScript = template.Must(template.New("flynn-build").Parse(`
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
if [ ! -d $flynn ]; then
git clone https://github.com/flynn/flynn $flynn
fi
cd $flynn
# Also fetch Github PR commits
if ! git config --get-all remote.origin.fetch | grep -q '^+refs/pull'; then
git config --add remote.origin.fetch '+refs/pull/*/head:refs/remotes/origin/pr/*'
fi
git fetch
git checkout --quiet {{ .Commit }}
{{ if .Merge }}
git config user.email "ci@flynn.io"
git config user.name "CI"
git merge origin/master
{{ end }}
test/scripts/wait-for-docker
# pull flynn/busybox before building to avoid the following Docker error when
# building images from scratch concurrently:
# "could not find image: no such id: flynn/busybox"
docker pull flynn/busybox
make
if [[ -f test/scripts/debug-info.sh ]]; then
sudo cp test/scripts/debug-info.sh /usr/local/bin/debug-info.sh
fi
sudo cp host/bin/flynn-* /usr/local/bin
sudo cp bootstrap/bin/manifest.json /etc/flynn-bootstrap.json
`[1:]))
type buildData struct {
Commit string
Merge bool
}
func buildFlynn(inst *Instance, commit string, merge bool, out io.Writer) error {
var b bytes.Buffer
flynnBuildScript.Execute(&b, buildData{commit, merge})
return inst.RunWithTimeout("bash", &Streams{Stdin: &b, Stdout: out, Stderr: out}, 30*time.Minute)
}
var flynnUnitTestScript = `
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
cd $flynn
if [[ -f test/scripts/test-unit.sh ]]; then
timeout --signal=QUIT --kill-after=10 5m test/scripts/test-unit.sh
fi
`[1:]
func runUnitTests(inst *Instance, out io.Writer) error {
return inst.Run("bash", &Streams{Stdin: bytes.NewBufferString(flynnUnitTestScript), Stdout: out, Stderr: out})
}
type hostScriptData struct {
ID string
IP string
Peers string
}
var flynnHostScripts = map[string]*template.Template{
"libvirt-lxc": template.Must(template.New("flynn-host-libvirt").Parse(`
if [[ -f /usr/local/bin/debug-info.sh ]]; then
/usr/local/bin/debug-info.sh &>/tmp/debug-info.log &
fi
sudo start-stop-daemon \
--start \
--background \
--no-close \
--make-pidfile \
--pidfile /var/run/flynn-host.pid \
--exec /usr/bin/env \
-- \
flynn-host \
daemon \
--id {{ .ID }} \
--external-ip {{ .IP }} \
--force \
--backend libvirt-lxc \
--peer-ips {{ .Peers }} \
--max-job-concurrency 8 \
&>/tmp/flynn-host.log
`[1:])),
}
type bootstrapMsg struct {
Id string `json:"id"`
State string `json:"state"`
Data json.RawMessage `json:"data"`
Error string `json:"error"`
}
type controllerCert struct {
Pin string `json:"pin"`
}
func (c *Cluster) bootstrapLayer1(instances []*Instance) error {
inst := instances[0]
c.ClusterDomain = fmt.Sprintf("flynn-%s.local", random.String(16))
c.ControllerKey = random.String(16)
c.BackoffPeriod = 5 * time.Second
rd, wr := io.Pipe()
ips := make([]string, len(instances))
for i, inst := range instances {
ips[i] = inst.IP
}
var cmdErr error
go func() {
command := fmt.Sprintf(
"CLUSTER_DOMAIN=%s CONTROLLER_KEY=%s BACKOFF_PERIOD=%fs flynn-host bootstrap --json --min-hosts=%d --peer-ips=%s /etc/flynn-bootstrap.json",
c.ClusterDomain, c.ControllerKey, c.BackoffPeriod.Seconds(), len(instances), strings.Join(ips, ","),
)
cmdErr = inst.Run(command, &Streams{Stdout: wr, Stderr: c.out})
wr.Close()
}()
// grab the controller tls pin from the bootstrap output
var cert controllerCert
dec := json.NewDecoder(rd)
for {
var msg bootstrapMsg
if err := dec.Decode(&msg); err == io.EOF {
break
} else if err != nil {
return fmt.Errorf("failed to parse bootstrap JSON output: %s", err)
}
c.log("bootstrap ===>", msg.Id, msg.State)
if msg.State == "error" {
c.log(msg.Error)
}
if msg.Id == "controller-cert" && msg.State == "done" {
json.Unmarshal(msg.Data, &cert)
}
}
if cmdErr != nil {
return cmdErr
}
if cert.Pin == "" {
return errors.New("could not determine controller cert from bootstrap output")
}
c.ControllerPin = cert.Pin
// grab the router IP from discoverd
disc := discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", inst.IP))
leader, err := disc.Service("router-api").Leader()
if err != nil {
return fmt.Errorf("could not detect router ip: %s", err)
}
c.RouterIP = leader.Host()
return nil
}
func lookupUser(name string) (int, int, error) {
u, err := user.Lookup(name)
if err != nil {
return 0, 0, err
}
uid, _ := strconv.Atoi(u.Uid)
gid, _ := strconv.Atoi(u.Gid)
return uid, gid, nil
}
func (c *Cluster) DumpLogs(buildLog *buildlog.Log) {
run := func(log string, inst *Instance, cmds ...string) error {
out, err := buildLog.NewFileWithTimeout(log, 60*time.Second)
if err != nil {
return err
}
for _, cmd := range cmds {
fmt.Fprintln(out, "HostID:", inst.ID, "-", cmd)
fmt.Fprintln(out)
err := inst.Run(cmd, &Streams{Stdout: out, Stderr: out})
fmt.Fprintln(out)
if err != nil {
return err
}
}
return nil
}
for _, inst := range c.Instances {
run(
fmt.Sprintf("host-logs-%s.log", inst.ID),
inst,
"ps faux",
"cat /var/log/flynn/flynn-host.log",
"cat /tmp/debug-info.log",
"sudo cat /var/log/libvirt/libvirtd.log",
)
}
printLogs := func(typ string, instances []*Instance) {
fallback := func(instances []*Instance) {
for _, inst := range instances {
run(fmt.Sprintf("%s-fallback-%s.log", typ, inst.ID), inst, "sudo bash -c 'tail -n +1 /var/log/flynn/*.log'")
}
}
run(fmt.Sprintf("%s-jobs.log", typ), instances[0], "flynn-host ps -a")
var out bytes.Buffer
cmd := `flynn-host ps -aqf '{{ metadata "flynn-controller.app_name" }}:{{ metadata "flynn-controller.type" }}:{{ .Job.ID }}'`
if err := instances[0].Run(cmd, &Streams{Stdout: &out, Stderr: &out}); err != nil {
fallback(instances)
return
}
// only fallback if all `flynn-host log` commands fail
shouldFallback := true
jobs := strings.Split(strings.TrimSpace(out.String()), "\n")
for _, job := range jobs {
fields := strings.Split(job, ":")
jobID := fields[2]
cmds := []string{
fmt.Sprintf("timeout 10s flynn-host inspect %s", jobID),
fmt.Sprintf("timeout 10s flynn-host log --init %s", jobID),
}
if err := run(fmt.Sprintf("%s-%s.log", typ, job), instances[0], cmds...); err != nil {
continue
}
shouldFallback = false
}
if shouldFallback {
fallback(instances)
}
// run the fallback on any stopped instances as their logs will
// not appear in `flynn-host ps`
stoppedInstances := make([]*Instance, 0, len(instances))
for _, inst := range instances {
if err := inst.Run("sudo kill -0 $(cat /var/run/flynn-host.pid)", nil); err != nil {
stoppedInstances = append(stoppedInstances, inst)
}
}
if len(stoppedInstances) > 0 {
fallback(stoppedInstances)
}
}
if len(c.defaultInstances) > 0 {
printLogs("default", c.defaultInstances)
}
if len(c.releaseInstances) > 0 {
printLogs("release", c.releaseInstances)
}
}
|
package cluster
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"os/user"
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/flynn/flynn/cli/config"
"github.com/flynn/flynn/discoverd/client"
"github.com/flynn/flynn/pkg/iotool"
"github.com/flynn/flynn/pkg/random"
)
type ClusterType uint8
const (
ClusterTypeDefault ClusterType = iota
ClusterTypeRelease
ClusterTypeNone
)
type BootConfig struct {
User string
Kernel string
Network string
NatIface string
Backend string
}
type Cluster struct {
ID string `json:"id"`
Instances instances `json:"instances"`
BackoffPeriod time.Duration `json:"backoff_period"`
ClusterDomain string `json:"cluster_domain"`
ControllerPin string `json:"controller_pin"`
ControllerKey string `json:"controller_key"`
RouterIP string `json:"router_ip"`
defaultInstances []*Instance
releaseInstances []*Instance
discMtx sync.Mutex
disc *discoverd.Client
bc BootConfig
vm *VMManager
out io.Writer
bridge *Bridge
rootFS string
}
func (c *Cluster) ControllerDomain() string {
return "controller." + c.ClusterDomain
}
type instances []*Instance
func (i instances) Get(id string) (*Instance, error) {
for _, inst := range i {
if inst.ID == id {
return inst, nil
}
}
return nil, fmt.Errorf("no such host: %s", id)
}
func (c *Cluster) discoverdClient(ip string) *discoverd.Client {
c.discMtx.Lock()
defer c.discMtx.Unlock()
if c.disc == nil {
c.disc = discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", ip))
}
return c.disc
}
type Streams struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
func New(bc BootConfig, out io.Writer) *Cluster {
return &Cluster{
ID: random.String(8),
bc: bc,
out: out,
}
}
func BuildFlynn(bc BootConfig, rootFS, commit string, merge bool, out io.Writer) (string, error) {
c := New(bc, out)
defer c.Shutdown()
return c.BuildFlynn(rootFS, commit, merge, false)
}
func (c *Cluster) log(a ...interface{}) (int, error) {
return fmt.Fprintln(c.out, append([]interface{}{"++", time.Now().Format("15:04:05.000")}, a...)...)
}
func (c *Cluster) logf(f string, a ...interface{}) (int, error) {
return fmt.Fprintf(c.out, strings.Join([]string{"++", time.Now().Format("15:04:05.000"), f}, " "), a...)
}
func (c *Cluster) BuildFlynn(rootFS, commit string, merge bool, runTests bool) (string, error) {
c.log("Building Flynn...")
if err := c.setup(); err != nil {
return "", err
}
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return "", err
}
build, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "2048",
Cores: 8,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: false},
},
})
if err != nil {
return build.Drive("hda").FS, err
}
c.log("Booting build instance...")
if err := build.Start(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error starting build instance: %s", err)
}
c.log("Waiting for instance to boot...")
if err := buildFlynn(build, commit, merge, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("error running build script: %s", err)
}
if runTests {
if err := runUnitTests(build, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("unit tests failed: %s", err)
}
}
if err := build.Shutdown(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error while stopping build instance: %s", err)
}
c.rootFS = build.Drive("hda").FS
return c.rootFS, nil
}
type BootResult struct {
ControllerDomain string
ControllerPin string
ControllerKey string
Instances []*Instance
}
func (c *Cluster) Boot(typ ClusterType, count int, dumpLogs io.Writer, killOnFailure bool) (res *BootResult, err error) {
if err := c.setup(); err != nil {
return nil, err
}
defer func() {
if err != nil {
if dumpLogs != nil && len(c.Instances) > 0 {
c.DumpLogs(dumpLogs)
}
if killOnFailure {
c.Shutdown()
}
}
}()
c.log("Booting", count, "VMs")
instances, err := c.startVMs(typ, c.rootFS, count, true)
if err != nil {
return nil, err
}
for _, inst := range instances {
if err := c.startFlynnHost(inst, instances); err != nil {
return nil, err
}
}
c.log("Bootstrapping layer 1...")
if err := c.bootstrapLayer1(instances); err != nil {
return nil, err
}
return &BootResult{
ControllerDomain: c.ControllerDomain(),
ControllerPin: c.ControllerPin,
ControllerKey: c.ControllerKey,
Instances: instances,
}, nil
}
func (c *Cluster) BridgeIP() string {
if c.bridge == nil {
return ""
}
return c.bridge.IP()
}
func (c *Cluster) AddHost() (*Instance, error) {
if c.rootFS == "" {
return nil, errors.New("cluster not yet booted")
}
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeDefault, c.rootFS, 1, false)
if err != nil {
return nil, err
}
inst := instances[0]
if err := c.startFlynnHost(inst, c.defaultInstances); err != nil {
return nil, err
}
return inst, err
}
func (c *Cluster) AddVanillaHost(rootFS string) (*Instance, error) {
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeNone, rootFS, 1, false)
return instances[0], err
}
// RemoveHost stops flynn-host on the instance but leaves it running so the logs
// are still available if we need to dump them later.
func (c *Cluster) RemoveHost(id string) error {
inst, err := c.Instances.Get(id)
if err != nil {
return err
}
c.log("removing host", id)
// Clean shutdown requires waiting for that host to unadvertise on discoverd.
// Specifically: Wait for router-api services to disappear to indicate host
// removal (rather than using StreamHostEvents), so that other
// tests won't try and connect to this host via service discovery.
ip := c.defaultInstances[0].IP
events := make(chan *discoverd.Event)
stream, err := c.discoverdClient(ip).Service("router-api").Watch(events)
if err != nil {
return err
}
defer stream.Close()
// ssh into the host and tell the flynn-host daemon to stop
var cmd string
switch c.bc.Backend {
case "libvirt-lxc":
cmd = "sudo start-stop-daemon --stop --pidfile /var/run/flynn-host.pid --retry 15"
}
if err := inst.Run(cmd, nil); err != nil {
return err
}
loop:
for {
select {
case event := <-events:
if event.Kind == discoverd.EventKindDown {
break loop
}
case <-time.After(20 * time.Second):
return fmt.Errorf("timed out waiting for host removal")
}
}
return nil
}
func (c *Cluster) Size() int {
return len(c.Instances)
}
func (c *Cluster) startVMs(typ ClusterType, rootFS string, count int, initial bool) ([]*Instance, error) {
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return nil, err
}
instances := make([]*Instance, count)
for i := 0; i < count; i++ {
inst, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "2048",
Cores: 2,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: true},
},
})
if err != nil {
return nil, fmt.Errorf("error creating instance %d: %s", i, err)
}
if err = inst.Start(); err != nil {
return nil, fmt.Errorf("error starting instance %d: %s", i, err)
}
inst.initial = initial
instances[i] = inst
c.Instances = append(c.Instances, inst)
switch typ {
case ClusterTypeDefault:
c.defaultInstances = append(c.defaultInstances, inst)
case ClusterTypeRelease:
c.releaseInstances = append(c.releaseInstances, inst)
}
}
return instances, nil
}
func (c *Cluster) startFlynnHost(inst *Instance, peerInstances []*Instance) error {
tmpl, ok := flynnHostScripts[c.bc.Backend]
if !ok {
return fmt.Errorf("unknown host backend: %s", c.bc.Backend)
}
peers := make([]string, 0, len(peerInstances))
for _, inst := range peerInstances {
if !inst.initial {
continue
}
peers = append(peers, fmt.Sprintf("%s=http://%s:2380", inst.ID, inst.IP))
}
var script bytes.Buffer
data := hostScriptData{
ID: inst.ID,
IP: inst.IP,
Peers: strings.Join(peers, ","),
EtcdProxy: !inst.initial,
}
tmpl.Execute(&script, data)
c.logf("Starting flynn-host on %s [id: %s]\n", inst.IP, inst.ID)
return inst.Run("bash", &Streams{Stdin: &script, Stdout: c.out, Stderr: os.Stderr})
}
func (c *Cluster) setup() error {
if _, err := os.Stat(c.bc.Kernel); os.IsNotExist(err) {
return fmt.Errorf("cluster: not a kernel file: %s", c.bc.Kernel)
}
if c.bridge == nil {
var err error
name := "flynnbr." + random.String(5)
c.logf("creating network bridge %s\n", name)
c.bridge, err = createBridge(name, c.bc.Network, c.bc.NatIface)
if err != nil {
return fmt.Errorf("could not create network bridge: %s", err)
}
}
c.vm = NewVMManager(c.bridge)
return nil
}
func (c *Cluster) Run(command string, s *Streams) error {
return c.run(command, s, nil)
}
func (c *Cluster) RunWithEnv(command string, s *Streams, env map[string]string) error {
return c.run(command, s, env)
}
func (c *Cluster) run(command string, s *Streams, env map[string]string) error {
if len(c.Instances) == 0 {
return errors.New("no booted servers in cluster")
}
return c.Instances[0].RunWithEnv(command, s, env)
}
func (c *Cluster) CLIConfig() (*config.Config, error) {
conf := &config.Config{}
s := &config.Cluster{
Name: "default",
URL: "https://" + c.ControllerDomain(),
Key: c.ControllerKey,
GitHost: c.ClusterDomain + ":2222",
TLSPin: c.ControllerPin,
}
if err := conf.Add(s, true /*force*/); err != nil {
return nil, err
}
return conf, nil
}
func (c *Cluster) Shutdown() {
for i, inst := range c.Instances {
c.logf("killing instance %d [id: %s]\n", i, inst.ID)
if err := inst.Kill(); err != nil {
c.logf("error killing instance %d: %s\n", i, err)
}
}
if c.bridge != nil {
c.logf("deleting network bridge %s\n", c.bridge.name)
if err := deleteBridge(c.bridge); err != nil {
c.logf("error deleting network bridge %s: %s\n", c.bridge.name, err)
}
c.bridge = nil
}
}
var flynnBuildScript = template.Must(template.New("flynn-build").Parse(`
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
if [ ! -d $flynn ]; then
git clone https://github.com/flynn/flynn $flynn
fi
cd $flynn
# Also fetch Github PR commits
if ! git config --get-all remote.origin.fetch | grep -q '^+refs/pull'; then
git config --add remote.origin.fetch '+refs/pull/*/head:refs/remotes/origin/pr/*'
fi
git fetch
git checkout --quiet {{ .Commit }}
{{ if .Merge }}
git config user.email "ci@flynn.io"
git config user.name "CI"
git merge origin/master
{{ end }}
test/scripts/wait-for-docker
make
if [[ -f test/scripts/debug-info.sh ]]; then
sudo cp test/scripts/debug-info.sh /usr/local/bin/debug-info.sh
fi
sudo cp host/bin/flynn-* /usr/local/bin
sudo cp host/bin/manifest.json /etc/flynn-host.json
sudo cp bootstrap/bin/manifest.json /etc/flynn-bootstrap.json
`[1:]))
type buildData struct {
Commit string
Merge bool
}
func buildFlynn(inst *Instance, commit string, merge bool, out io.Writer) error {
var b bytes.Buffer
flynnBuildScript.Execute(&b, buildData{commit, merge})
return inst.Run("bash", &Streams{Stdin: &b, Stdout: out, Stderr: out})
}
var flynnUnitTestScript = `
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
cd $flynn
if [[ -f test/scripts/test-unit.sh ]]; then
timeout --signal=QUIT --kill-after=10 5m test/scripts/test-unit.sh
fi
`[1:]
func runUnitTests(inst *Instance, out io.Writer) error {
return inst.Run("bash", &Streams{Stdin: bytes.NewBufferString(flynnUnitTestScript), Stdout: out, Stderr: out})
}
type hostScriptData struct {
ID string
IP string
Peers string
EtcdProxy bool
}
var flynnHostScripts = map[string]*template.Template{
"libvirt-lxc": template.Must(template.New("flynn-host-libvirt").Parse(`
if [[ -f /usr/local/bin/debug-info.sh ]]; then
/usr/local/bin/debug-info.sh &>/tmp/debug-info.log &
fi
sudo start-stop-daemon \
--start \
--background \
--no-close \
--make-pidfile \
--pidfile /var/run/flynn-host.pid \
--exec /usr/bin/env \
-- \
ETCD_NAME={{ .ID }} \
ETCD_INITIAL_CLUSTER={{ .Peers }} \
ETCD_INITIAL_CLUSTER_STATE=new \
{{ if .EtcdProxy }} ETCD_PROXY=on {{ end }} \
flynn-host \
daemon \
--id {{ .ID }} \
--manifest /etc/flynn-host.json \
--external {{ .IP }} \
--force \
--backend libvirt-lxc \
&>/tmp/flynn-host.log
`[1:])),
}
type bootstrapMsg struct {
Id string `json:"id"`
State string `json:"state"`
Data json.RawMessage `json:"data"`
Error string `json:"error"`
}
type controllerCert struct {
Pin string `json:"pin"`
}
func (c *Cluster) bootstrapLayer1(instances []*Instance) error {
inst := instances[0]
c.ClusterDomain = fmt.Sprintf("flynn-%s.local", random.String(16))
c.ControllerKey = random.String(16)
c.BackoffPeriod = 5 * time.Second
rd, wr := io.Pipe()
var cmdErr error
go func() {
command := fmt.Sprintf(
"DISCOVERD=%s:1111 CLUSTER_DOMAIN=%s CONTROLLER_KEY=%s BACKOFF_PERIOD=%fs flynn-host bootstrap --json --min-hosts=%d /etc/flynn-bootstrap.json",
inst.IP, c.ClusterDomain, c.ControllerKey, c.BackoffPeriod.Seconds(), len(instances),
)
cmdErr = inst.Run(command, &Streams{Stdout: wr, Stderr: os.Stderr})
wr.Close()
}()
// grab the controller tls pin from the bootstrap output
var cert controllerCert
dec := json.NewDecoder(rd)
for {
var msg bootstrapMsg
if err := dec.Decode(&msg); err == io.EOF {
break
} else if err != nil {
return fmt.Errorf("failed to parse bootstrap JSON output: %s", err)
}
c.log("bootstrap ===>", msg.Id, msg.State)
if msg.State == "error" {
c.log(msg.Error)
}
if msg.Id == "controller-cert" && msg.State == "done" {
json.Unmarshal(msg.Data, &cert)
}
}
if cmdErr != nil {
return cmdErr
}
if cert.Pin == "" {
return errors.New("could not determine controller cert from bootstrap output")
}
c.ControllerPin = cert.Pin
// grab the router IP from discoverd
disc := discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", inst.IP))
leader, err := disc.Service("router-api").Leader()
if err != nil {
return fmt.Errorf("could not detect router ip: %s", err)
}
if err = setLocalDNS([]string{c.ClusterDomain, c.ControllerDomain()}, leader.Host()); err != nil {
return fmt.Errorf("could not set cluster DNS entries: %s", err)
}
c.RouterIP = leader.Host()
return nil
}
func setLocalDNS(domains []string, ip string) error {
command := fmt.Sprintf(
`grep -q "^%[1]s" /etc/hosts && sed "s/^%[1]s.*/%[1]s %s/" -i /etc/hosts || echo %[1]s %s >> /etc/hosts`,
ip, strings.Join(domains, " "),
)
cmd := exec.Command("bash", "-c", command)
return cmd.Run()
}
func lookupUser(name string) (int, int, error) {
u, err := user.Lookup(name)
if err != nil {
return 0, 0, err
}
uid, _ := strconv.Atoi(u.Uid)
gid, _ := strconv.Atoi(u.Gid)
return uid, gid, nil
}
func (c *Cluster) DumpLogs(w io.Writer) {
tw := iotool.NewTimeoutWriter(w, 60*time.Second)
c.dumpLogs(tw)
tw.Finished()
}
func (c *Cluster) dumpLogs(w io.Writer) {
streams := &Streams{Stdout: w, Stderr: w}
run := func(inst *Instance, cmd string) error {
fmt.Fprint(w, "\n\n***** ***** ***** ***** ***** ***** ***** ***** ***** *****\n\n")
fmt.Fprintln(w, "HostID:", inst.ID, "-", cmd)
fmt.Fprintln(w)
err := inst.Run(cmd, streams)
fmt.Fprintln(w)
return err
}
fmt.Fprint(w, "\n\n***** ***** ***** DUMPING ALL LOGS ***** ***** *****\n\n")
for _, inst := range c.Instances {
run(inst, "ps faux")
run(inst, "cat /tmp/flynn-host.log")
run(inst, "cat /tmp/debug-info.log")
}
printLogs := func(instances []*Instance) {
fallback := func() {
fmt.Fprintf(w, "\n*** Error getting job logs via flynn-host, falling back to tail log dump\n\n")
for _, inst := range instances {
run(inst, "sudo bash -c 'tail -n +1 /tmp/flynn-host-logs/**/*.log'")
}
}
run(instances[0], "flynn-host ps -a")
var out bytes.Buffer
if err := instances[0].Run("flynn-host ps -a -q", &Streams{Stdout: &out, Stderr: w}); err != nil {
io.Copy(w, &out)
fallback()
return
}
ids := strings.Split(strings.TrimSpace(out.String()), "\n")
for _, id := range ids {
if err := run(instances[0], fmt.Sprintf("flynn-host inspect %s", id)); err != nil {
fallback()
return
}
run(instances[0], fmt.Sprintf("flynn-host log --init %s", id))
}
}
printLogs(c.defaultInstances)
if len(c.releaseInstances) > 0 {
printLogs(c.releaseInstances)
}
}
test: Manually kill containers after stopping flynn-host
Signed-off-by: Lewis Marshall <748e1641a368164906d4a0c0e3965345453dcc93@lmars.net>
package cluster
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"os/user"
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/flynn/flynn/cli/config"
"github.com/flynn/flynn/discoverd/client"
"github.com/flynn/flynn/pkg/iotool"
"github.com/flynn/flynn/pkg/random"
)
type ClusterType uint8
const (
ClusterTypeDefault ClusterType = iota
ClusterTypeRelease
ClusterTypeNone
)
type BootConfig struct {
User string
Kernel string
Network string
NatIface string
Backend string
}
type Cluster struct {
ID string `json:"id"`
Instances instances `json:"instances"`
BackoffPeriod time.Duration `json:"backoff_period"`
ClusterDomain string `json:"cluster_domain"`
ControllerPin string `json:"controller_pin"`
ControllerKey string `json:"controller_key"`
RouterIP string `json:"router_ip"`
defaultInstances []*Instance
releaseInstances []*Instance
discMtx sync.Mutex
disc *discoverd.Client
bc BootConfig
vm *VMManager
out io.Writer
bridge *Bridge
rootFS string
}
func (c *Cluster) ControllerDomain() string {
return "controller." + c.ClusterDomain
}
type instances []*Instance
func (i instances) Get(id string) (*Instance, error) {
for _, inst := range i {
if inst.ID == id {
return inst, nil
}
}
return nil, fmt.Errorf("no such host: %s", id)
}
func (c *Cluster) discoverdClient(ip string) *discoverd.Client {
c.discMtx.Lock()
defer c.discMtx.Unlock()
if c.disc == nil {
c.disc = discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", ip))
}
return c.disc
}
type Streams struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
func New(bc BootConfig, out io.Writer) *Cluster {
return &Cluster{
ID: random.String(8),
bc: bc,
out: out,
}
}
func BuildFlynn(bc BootConfig, rootFS, commit string, merge bool, out io.Writer) (string, error) {
c := New(bc, out)
defer c.Shutdown()
return c.BuildFlynn(rootFS, commit, merge, false)
}
func (c *Cluster) log(a ...interface{}) (int, error) {
return fmt.Fprintln(c.out, append([]interface{}{"++", time.Now().Format("15:04:05.000")}, a...)...)
}
func (c *Cluster) logf(f string, a ...interface{}) (int, error) {
return fmt.Fprintf(c.out, strings.Join([]string{"++", time.Now().Format("15:04:05.000"), f}, " "), a...)
}
func (c *Cluster) BuildFlynn(rootFS, commit string, merge bool, runTests bool) (string, error) {
c.log("Building Flynn...")
if err := c.setup(); err != nil {
return "", err
}
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return "", err
}
build, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "2048",
Cores: 8,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: false},
},
})
if err != nil {
return build.Drive("hda").FS, err
}
c.log("Booting build instance...")
if err := build.Start(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error starting build instance: %s", err)
}
c.log("Waiting for instance to boot...")
if err := buildFlynn(build, commit, merge, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("error running build script: %s", err)
}
if runTests {
if err := runUnitTests(build, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("unit tests failed: %s", err)
}
}
if err := build.Shutdown(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error while stopping build instance: %s", err)
}
c.rootFS = build.Drive("hda").FS
return c.rootFS, nil
}
type BootResult struct {
ControllerDomain string
ControllerPin string
ControllerKey string
Instances []*Instance
}
func (c *Cluster) Boot(typ ClusterType, count int, dumpLogs io.Writer, killOnFailure bool) (res *BootResult, err error) {
if err := c.setup(); err != nil {
return nil, err
}
defer func() {
if err != nil {
if dumpLogs != nil && len(c.Instances) > 0 {
c.DumpLogs(dumpLogs)
}
if killOnFailure {
c.Shutdown()
}
}
}()
c.log("Booting", count, "VMs")
instances, err := c.startVMs(typ, c.rootFS, count, true)
if err != nil {
return nil, err
}
for _, inst := range instances {
if err := c.startFlynnHost(inst, instances); err != nil {
return nil, err
}
}
c.log("Bootstrapping layer 1...")
if err := c.bootstrapLayer1(instances); err != nil {
return nil, err
}
return &BootResult{
ControllerDomain: c.ControllerDomain(),
ControllerPin: c.ControllerPin,
ControllerKey: c.ControllerKey,
Instances: instances,
}, nil
}
func (c *Cluster) BridgeIP() string {
if c.bridge == nil {
return ""
}
return c.bridge.IP()
}
func (c *Cluster) AddHost() (*Instance, error) {
if c.rootFS == "" {
return nil, errors.New("cluster not yet booted")
}
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeDefault, c.rootFS, 1, false)
if err != nil {
return nil, err
}
inst := instances[0]
if err := c.startFlynnHost(inst, c.defaultInstances); err != nil {
return nil, err
}
return inst, err
}
func (c *Cluster) AddVanillaHost(rootFS string) (*Instance, error) {
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeNone, rootFS, 1, false)
return instances[0], err
}
// RemoveHost stops flynn-host on the instance but leaves it running so the logs
// are still available if we need to dump them later.
func (c *Cluster) RemoveHost(id string) error {
inst, err := c.Instances.Get(id)
if err != nil {
return err
}
c.log("removing host", id)
// Clean shutdown requires waiting for that host to unadvertise on discoverd.
// Specifically: Wait for router-api services to disappear to indicate host
// removal (rather than using StreamHostEvents), so that other
// tests won't try and connect to this host via service discovery.
ip := c.defaultInstances[0].IP
events := make(chan *discoverd.Event)
stream, err := c.discoverdClient(ip).Service("router-api").Watch(events)
if err != nil {
return err
}
defer stream.Close()
// ssh into the host and tell the flynn-host daemon to stop
var cmd string
switch c.bc.Backend {
case "libvirt-lxc":
// manually kill containers after stopping flynn-host due to https://github.com/flynn/flynn/issues/1177
cmd = "sudo start-stop-daemon --stop --pidfile /var/run/flynn-host.pid --retry 15 && (virsh -c lxc:/// list --name | xargs -L 1 virsh -c lxc:/// destroy || true)"
}
if err := inst.Run(cmd, nil); err != nil {
return err
}
loop:
for {
select {
case event := <-events:
if event.Kind == discoverd.EventKindDown {
break loop
}
case <-time.After(20 * time.Second):
return fmt.Errorf("timed out waiting for host removal")
}
}
return nil
}
func (c *Cluster) Size() int {
return len(c.Instances)
}
func (c *Cluster) startVMs(typ ClusterType, rootFS string, count int, initial bool) ([]*Instance, error) {
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return nil, err
}
instances := make([]*Instance, count)
for i := 0; i < count; i++ {
inst, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "2048",
Cores: 2,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: true},
},
})
if err != nil {
return nil, fmt.Errorf("error creating instance %d: %s", i, err)
}
if err = inst.Start(); err != nil {
return nil, fmt.Errorf("error starting instance %d: %s", i, err)
}
inst.initial = initial
instances[i] = inst
c.Instances = append(c.Instances, inst)
switch typ {
case ClusterTypeDefault:
c.defaultInstances = append(c.defaultInstances, inst)
case ClusterTypeRelease:
c.releaseInstances = append(c.releaseInstances, inst)
}
}
return instances, nil
}
func (c *Cluster) startFlynnHost(inst *Instance, peerInstances []*Instance) error {
tmpl, ok := flynnHostScripts[c.bc.Backend]
if !ok {
return fmt.Errorf("unknown host backend: %s", c.bc.Backend)
}
peers := make([]string, 0, len(peerInstances))
for _, inst := range peerInstances {
if !inst.initial {
continue
}
peers = append(peers, fmt.Sprintf("%s=http://%s:2380", inst.ID, inst.IP))
}
var script bytes.Buffer
data := hostScriptData{
ID: inst.ID,
IP: inst.IP,
Peers: strings.Join(peers, ","),
EtcdProxy: !inst.initial,
}
tmpl.Execute(&script, data)
c.logf("Starting flynn-host on %s [id: %s]\n", inst.IP, inst.ID)
return inst.Run("bash", &Streams{Stdin: &script, Stdout: c.out, Stderr: os.Stderr})
}
func (c *Cluster) setup() error {
if _, err := os.Stat(c.bc.Kernel); os.IsNotExist(err) {
return fmt.Errorf("cluster: not a kernel file: %s", c.bc.Kernel)
}
if c.bridge == nil {
var err error
name := "flynnbr." + random.String(5)
c.logf("creating network bridge %s\n", name)
c.bridge, err = createBridge(name, c.bc.Network, c.bc.NatIface)
if err != nil {
return fmt.Errorf("could not create network bridge: %s", err)
}
}
c.vm = NewVMManager(c.bridge)
return nil
}
func (c *Cluster) Run(command string, s *Streams) error {
return c.run(command, s, nil)
}
func (c *Cluster) RunWithEnv(command string, s *Streams, env map[string]string) error {
return c.run(command, s, env)
}
func (c *Cluster) run(command string, s *Streams, env map[string]string) error {
if len(c.Instances) == 0 {
return errors.New("no booted servers in cluster")
}
return c.Instances[0].RunWithEnv(command, s, env)
}
func (c *Cluster) CLIConfig() (*config.Config, error) {
conf := &config.Config{}
s := &config.Cluster{
Name: "default",
URL: "https://" + c.ControllerDomain(),
Key: c.ControllerKey,
GitHost: c.ClusterDomain + ":2222",
TLSPin: c.ControllerPin,
}
if err := conf.Add(s, true /*force*/); err != nil {
return nil, err
}
return conf, nil
}
func (c *Cluster) Shutdown() {
for i, inst := range c.Instances {
c.logf("killing instance %d [id: %s]\n", i, inst.ID)
if err := inst.Kill(); err != nil {
c.logf("error killing instance %d: %s\n", i, err)
}
}
if c.bridge != nil {
c.logf("deleting network bridge %s\n", c.bridge.name)
if err := deleteBridge(c.bridge); err != nil {
c.logf("error deleting network bridge %s: %s\n", c.bridge.name, err)
}
c.bridge = nil
}
}
var flynnBuildScript = template.Must(template.New("flynn-build").Parse(`
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
if [ ! -d $flynn ]; then
git clone https://github.com/flynn/flynn $flynn
fi
cd $flynn
# Also fetch Github PR commits
if ! git config --get-all remote.origin.fetch | grep -q '^+refs/pull'; then
git config --add remote.origin.fetch '+refs/pull/*/head:refs/remotes/origin/pr/*'
fi
git fetch
git checkout --quiet {{ .Commit }}
{{ if .Merge }}
git config user.email "ci@flynn.io"
git config user.name "CI"
git merge origin/master
{{ end }}
test/scripts/wait-for-docker
make
if [[ -f test/scripts/debug-info.sh ]]; then
sudo cp test/scripts/debug-info.sh /usr/local/bin/debug-info.sh
fi
sudo cp host/bin/flynn-* /usr/local/bin
sudo cp host/bin/manifest.json /etc/flynn-host.json
sudo cp bootstrap/bin/manifest.json /etc/flynn-bootstrap.json
`[1:]))
type buildData struct {
Commit string
Merge bool
}
func buildFlynn(inst *Instance, commit string, merge bool, out io.Writer) error {
var b bytes.Buffer
flynnBuildScript.Execute(&b, buildData{commit, merge})
return inst.Run("bash", &Streams{Stdin: &b, Stdout: out, Stderr: out})
}
var flynnUnitTestScript = `
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
cd $flynn
if [[ -f test/scripts/test-unit.sh ]]; then
timeout --signal=QUIT --kill-after=10 5m test/scripts/test-unit.sh
fi
`[1:]
func runUnitTests(inst *Instance, out io.Writer) error {
return inst.Run("bash", &Streams{Stdin: bytes.NewBufferString(flynnUnitTestScript), Stdout: out, Stderr: out})
}
type hostScriptData struct {
ID string
IP string
Peers string
EtcdProxy bool
}
var flynnHostScripts = map[string]*template.Template{
"libvirt-lxc": template.Must(template.New("flynn-host-libvirt").Parse(`
if [[ -f /usr/local/bin/debug-info.sh ]]; then
/usr/local/bin/debug-info.sh &>/tmp/debug-info.log &
fi
sudo start-stop-daemon \
--start \
--background \
--no-close \
--make-pidfile \
--pidfile /var/run/flynn-host.pid \
--exec /usr/bin/env \
-- \
ETCD_NAME={{ .ID }} \
ETCD_INITIAL_CLUSTER={{ .Peers }} \
ETCD_INITIAL_CLUSTER_STATE=new \
{{ if .EtcdProxy }} ETCD_PROXY=on {{ end }} \
flynn-host \
daemon \
--id {{ .ID }} \
--manifest /etc/flynn-host.json \
--external {{ .IP }} \
--force \
--backend libvirt-lxc \
&>/tmp/flynn-host.log
`[1:])),
}
type bootstrapMsg struct {
Id string `json:"id"`
State string `json:"state"`
Data json.RawMessage `json:"data"`
Error string `json:"error"`
}
type controllerCert struct {
Pin string `json:"pin"`
}
func (c *Cluster) bootstrapLayer1(instances []*Instance) error {
inst := instances[0]
c.ClusterDomain = fmt.Sprintf("flynn-%s.local", random.String(16))
c.ControllerKey = random.String(16)
c.BackoffPeriod = 5 * time.Second
rd, wr := io.Pipe()
var cmdErr error
go func() {
command := fmt.Sprintf(
"DISCOVERD=%s:1111 CLUSTER_DOMAIN=%s CONTROLLER_KEY=%s BACKOFF_PERIOD=%fs flynn-host bootstrap --json --min-hosts=%d /etc/flynn-bootstrap.json",
inst.IP, c.ClusterDomain, c.ControllerKey, c.BackoffPeriod.Seconds(), len(instances),
)
cmdErr = inst.Run(command, &Streams{Stdout: wr, Stderr: os.Stderr})
wr.Close()
}()
// grab the controller tls pin from the bootstrap output
var cert controllerCert
dec := json.NewDecoder(rd)
for {
var msg bootstrapMsg
if err := dec.Decode(&msg); err == io.EOF {
break
} else if err != nil {
return fmt.Errorf("failed to parse bootstrap JSON output: %s", err)
}
c.log("bootstrap ===>", msg.Id, msg.State)
if msg.State == "error" {
c.log(msg.Error)
}
if msg.Id == "controller-cert" && msg.State == "done" {
json.Unmarshal(msg.Data, &cert)
}
}
if cmdErr != nil {
return cmdErr
}
if cert.Pin == "" {
return errors.New("could not determine controller cert from bootstrap output")
}
c.ControllerPin = cert.Pin
// grab the router IP from discoverd
disc := discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", inst.IP))
leader, err := disc.Service("router-api").Leader()
if err != nil {
return fmt.Errorf("could not detect router ip: %s", err)
}
if err = setLocalDNS([]string{c.ClusterDomain, c.ControllerDomain()}, leader.Host()); err != nil {
return fmt.Errorf("could not set cluster DNS entries: %s", err)
}
c.RouterIP = leader.Host()
return nil
}
func setLocalDNS(domains []string, ip string) error {
command := fmt.Sprintf(
`grep -q "^%[1]s" /etc/hosts && sed "s/^%[1]s.*/%[1]s %s/" -i /etc/hosts || echo %[1]s %s >> /etc/hosts`,
ip, strings.Join(domains, " "),
)
cmd := exec.Command("bash", "-c", command)
return cmd.Run()
}
func lookupUser(name string) (int, int, error) {
u, err := user.Lookup(name)
if err != nil {
return 0, 0, err
}
uid, _ := strconv.Atoi(u.Uid)
gid, _ := strconv.Atoi(u.Gid)
return uid, gid, nil
}
func (c *Cluster) DumpLogs(w io.Writer) {
tw := iotool.NewTimeoutWriter(w, 60*time.Second)
c.dumpLogs(tw)
tw.Finished()
}
func (c *Cluster) dumpLogs(w io.Writer) {
streams := &Streams{Stdout: w, Stderr: w}
run := func(inst *Instance, cmd string) error {
fmt.Fprint(w, "\n\n***** ***** ***** ***** ***** ***** ***** ***** ***** *****\n\n")
fmt.Fprintln(w, "HostID:", inst.ID, "-", cmd)
fmt.Fprintln(w)
err := inst.Run(cmd, streams)
fmt.Fprintln(w)
return err
}
fmt.Fprint(w, "\n\n***** ***** ***** DUMPING ALL LOGS ***** ***** *****\n\n")
for _, inst := range c.Instances {
run(inst, "ps faux")
run(inst, "cat /tmp/flynn-host.log")
run(inst, "cat /tmp/debug-info.log")
}
printLogs := func(instances []*Instance) {
fallback := func() {
fmt.Fprintf(w, "\n*** Error getting job logs via flynn-host, falling back to tail log dump\n\n")
for _, inst := range instances {
run(inst, "sudo bash -c 'tail -n +1 /tmp/flynn-host-logs/**/*.log'")
}
}
run(instances[0], "flynn-host ps -a")
var out bytes.Buffer
if err := instances[0].Run("flynn-host ps -a -q", &Streams{Stdout: &out, Stderr: w}); err != nil {
io.Copy(w, &out)
fallback()
return
}
ids := strings.Split(strings.TrimSpace(out.String()), "\n")
for _, id := range ids {
if err := run(instances[0], fmt.Sprintf("flynn-host inspect %s", id)); err != nil {
fallback()
return
}
run(instances[0], fmt.Sprintf("flynn-host log --init %s", id))
}
}
printLogs(c.defaultInstances)
if len(c.releaseInstances) > 0 {
printLogs(c.releaseInstances)
}
}
|
/*
Package sse implements the primary inner workings of the SSE Reporter.
The primary function is Run(), which starts a scheduler after initialization and registration of the
reporter with the mothership.
*/
package sse
import (
"bytes"
"collector"
"encoding/json"
"fmt"
"helper"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"strings"
"time"
)
var (
mothership_url = "http://mothership.serverstatusmonitoring.com"
register_uri = "/register-service"
collector_uri = "/collector"
status_uri = "/status"
collect_frequency_in_seconds = 1 // When to collect a snapshot and store in cache
report_frequency_in_seconds = 5 // When to report all snapshots in cache
version = "1.0.0" // The version of SSE this is
hostname = ""
ipAddress = ""
log_file = "/var/log/sphire-sse.log"
configuration_file = "/etc/sse/sse.conf"
configuration = new(Configuration)
server = new(Server)
CPU collector.CPU = collector.CPU{}
Disks collector.Disks = collector.Disks{}
Memory collector.Memory = collector.Memory{}
Network collector.Network = collector.Network{}
System collector.System = collector.System{}
httpClient = &http.Client{}
)
/*
Configuration struct is a direct map to the configuration located in the configuration JSON file.
*/
type Configuration struct {
Identification struct {
AccountID string `json:"account_id"`
OrganizationID string `json:"organization_id"`
OrganizationName string `json:"organization_name"`
MachineNickname string `json:"machine_nickname"`
} `json:"identification"`
}
/*
StatusBody struct is a direct map to the status reply from the mothership
*/
type StatusBody struct {
Status string `json:"status"`
}
/*
Snapshot struct is a collection of other structs which are relayed from the different segments
of the collector package.
*/
type Snapshot struct {
CPU *collector.CPU `json:"cpu"`
Disks *collector.Disks `json:"disks"`
Memory *collector.Memory `json:"memory"`
Network *collector.Network `json:"network"`
System *collector.System `json:"system"`
Time time.Time `json:"system_time"`
}
/*
Server struct implements identifying data about the server.
*/
type Server struct {
IpAddress string `json:"ip_address"`
Hostname string `json:"hostname"`
OperatingSystem struct {
// grepped from cat /etc/issue
Distributor string `json:"distributor_id"`
// cat /proc/version_signature
VersionSignature string `json:"version_signature"`
// cat /proc/version
Version string `json:"version"`
} `json:"operating_system"`
Hardware struct {
// grepped from lscpu
Architecture string `json:"architecture"`
CPUOpMode string `json:"cpu_op_mode"`
CPUCount string `json:"cpu_count"`
CPUFamily string `json:"cpu_family"`
CPUModel string `json:"cpu_model"`
CPUMhz string `json:"cpu_mhz"`
} `json:"hardware"`
}
/*
Cache struct implements multiple Snapshot structs. This is cleared after it is reported to the mothership.
Also includes the program Version and AccountId - the latter of which is gleaned from the configuration.
*/
type Cache struct {
Node []*Snapshot `json:"node"`
Server *Server `json:"server"`
AccountId string `json:"account_id"`
Version string `json:"version"`
OrganizationID string `json:"organization_id"`
OrganizationName string `json:"organization_name"`
MachineNickname string `json:"machine_nickname"`
}
/*
Run Program entry point which initializes, registers and runs the main scheduler of the
program. Also handles initialization of the global logger.
*/
func Run() {
// Define the global logger
logger, err := os.OpenFile(log_file, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Println(helper.Trace("Unable to secure log: "+log_file, "ERROR"))
os.Exit(1)
}
defer logger.Close()
log.SetOutput(logger)
log.Println(helper.Trace("**** Starting program ****", "OK"))
status := checkStatus()
if status == false {
log.Println(helper.Trace("Mothership unreachable. Check your internet connection.", "ERROR"))
os.Exit(1)
}
// Perform system initialization
_, err = Initialize()
if err != nil {
log.Println(helper.Trace("Exiting.", "ERROR"))
os.Exit(1)
}
// Perform the system registration
log.Println(helper.Trace("Performing registration.", "OK"))
body, err := Register()
if err != nil {
log.Println(helper.Trace("Unable to register this machine"+string(body), "ERROR"))
os.Exit(1)
}
var counter int = 0
var snapshot Snapshot = Snapshot{}
var cache Cache = Cache{
AccountId: configuration.Identification.AccountID,
OrganizationID: configuration.Identification.OrganizationID,
OrganizationName: configuration.Identification.OrganizationName,
MachineNickname: configuration.Identification.MachineNickname,
Version: version,
Server: server}
ticker := time.NewTicker(time.Duration(collect_frequency_in_seconds) * time.Second)
for {
<-ticker.C // send the updated time back via the channel
// fill in the Snapshot struct and add to the cache
cache.Node = append(cache.Node, snapshot.Collector())
counter++
if counter > 0 && counter%report_frequency_in_seconds == 0 {
go cache.Sender()
cache.Node = nil // Clear the Node Cache
counter = 0
}
}
}
/*
Initialize attempts to gather all the data for correct program initialization. Loads config, etc.
returns bool and error - if ever false, error will be set, otherwise if bool is true, error is nil.
*/
func Initialize() (bool, error) {
var err error = nil
var err_hstnm error = nil
var architecture string
var cpuOpMode string
var cpuCount string
var cpuFamily string
var cpuModel string
var cpuMhz string
// Attempt to get the server IP address
ipAddress, err = helper.GetServerExternalIPAddress()
if err != nil {
log.Println(helper.Trace("Initialization failed, IP Address unattainable.", "ERROR"))
return false, err
}
// Get the hostname
hostname, err_hstnm = os.Hostname()
if err_hstnm != nil {
hostname_bt, err_hstnm_exec := exec.Command("hostname").Output()
if err_hstnm_exec == nil {
hostname = string(hostname_bt)
}
}
// Load and parse configuration file
file, _ := os.Open(configuration_file)
err = json.NewDecoder(file).Decode(configuration)
// Get data about the server and store it in the struct
distributor, err_distributor := exec.Command("cat", "/etc/issue").Output()
if err_distributor != nil {
distributor = []byte{}
}
versionSignature, err_versig := exec.Command("cat", "/proc/version_signature").Output()
if err_versig != nil {
versionSignature = []byte{}
}
version, err_ver := exec.Command("cat", "/proc/version").Output()
if err_ver != nil {
version = []byte{}
}
hardware_out, err_hwd := exec.Command("lscpu").Output()
hardware := []string{}
if err_hwd == nil {
hardware = strings.Split(string(hardware_out), "\n")
}
max := len(hardware) - 1
for index, line := range hardware {
split_line := strings.Split(line, ":")
if index < max { // because index out of range if we dont have this
key := string(strings.TrimSpace(split_line[0]))
value := string(strings.TrimSpace(split_line[1]))
switch key {
case "Architecture":
architecture = value
case "CPU op-mode(s)":
cpuOpMode = value
case "CPU(s)":
cpuCount = value
case "CPU family":
cpuFamily = value
case "Model":
cpuModel = value
case "CPU MHz":
cpuMhz = value
}
}
}
server = &Server{
IpAddress: ipAddress,
Hostname: hostname,
OperatingSystem: struct {
Distributor string `json:"distributor_id"`
VersionSignature string `json:"version_signature"`
Version string `json:"version"`
}{
Distributor: string(distributor),
VersionSignature: string(versionSignature),
Version: string(version),
},
Hardware: struct {
Architecture string `json:"architecture"`
CPUOpMode string `json:"cpu_op_mode"`
CPUCount string `json:"cpu_count"`
CPUFamily string `json:"cpu_family"`
CPUModel string `json:"cpu_model"`
CPUMhz string `json:"cpu_mhz"`
}{
Architecture: architecture,
CPUOpMode: cpuOpMode,
CPUCount: cpuCount,
CPUFamily: cpuFamily,
CPUModel: cpuModel,
CPUMhz: cpuMhz,
},
}
if err != nil {
log.Println(helper.Trace("Initialization failed - could not load configuration.", "ERROR"))
return false, err
}
log.Println(helper.Trace("Initialization complete.", "OK"))
return true, err
}
/*
Register performs a registration of this instance with the mothership
*/
func Register() (string, error) {
log.Println(helper.Trace("Starting registration.", "OK"))
var jsonStr = []byte(`{}`)
// local struct
registrationObject := map[string]interface{}{
"configuration": configuration,
"mothership_url": mothership_url,
"register_uri": register_uri,
"version": version,
"collect_frequency": collect_frequency_in_seconds,
"report_frequency": report_frequency_in_seconds,
"hostname": hostname,
"ip_address": ipAddress,
"log_file": log_file,
"config_file": configuration_file,
}
jsonStr, _ = json.Marshal(registrationObject)
req, err := http.NewRequest("POST", mothership_url+register_uri+"/"+version, bytes.NewBuffer(jsonStr))
req.Header.Set("X-Custom-Header", "REG")
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
var status StatusBody
_ = json.Unmarshal(body, &status)
if status.Status == "upgrade" {
fmt.Println("There is a new version available. Please consider upgrading.")
log.Println(helper.Trace("There is a new version available. Please consider upgrading.", "OK"))
}
log.Println(helper.Trace("Registration complete.", "OK"))
return string(body), nil
}
/*
Collector collects a snapshot of the system at the time of calling and stores it in
Snapshot struct.
*/
func (Snapshot *Snapshot) Collector() *Snapshot {
Snapshot.Time = time.Now().Local()
Snapshot.CPU = CPU.Collect()
Snapshot.Disks = Disks.Collect()
Snapshot.Memory = Memory.Collect()
Snapshot.Network = Network.Collect()
Snapshot.System = System.Collect()
return Snapshot
}
/*
Sender sends the data in Cache to the mothership, then clears the Cache struct so that it can
accept new data.
*/
func (Cache *Cache) Sender() bool {
var jsonStr = []byte(`{}`)
jsonStr, _ = json.Marshal(Cache)
req, err := http.NewRequest("POST", mothership_url+collector_uri, bytes.NewBuffer(jsonStr))
req.Header.Set("X-Custom-Header", "SND")
req.Header.Set("Content-Type", "application/json")
resp, err := httpClient.Do(req)
if err != nil {
log.Println(helper.Trace("Unable to complete request", "ERROR"))
return false
}
defer resp.Body.Close()
read_body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Println(helper.Trace("Unable to complete request"+string(read_body), "ERROR"))
return false
}
return true
}
/*
checkStatus checks the status of the mothership
*/
func checkStatus() bool {
var status_body StatusBody
resp, err := http.Get(mothership_url + status_uri)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
json.Unmarshal(body, &status_body)
if err == nil && status_body.Status == "ok" {
return true
} else {
log.Println(helper.Trace("Unable to complete status request", "ERROR"))
return false
}
}
update report time, print to stdout on error
/*
Package sse implements the primary inner workings of the SSE Reporter.
The primary function is Run(), which starts a scheduler after initialization and registration of the
reporter with the mothership.
*/
package sse
import (
"bytes"
"collector"
"encoding/json"
"fmt"
"helper"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"strings"
"time"
)
var (
mothership_url = "http://mothership.serverstatusmonitoring.com"
register_uri = "/register-service"
collector_uri = "/collector"
status_uri = "/status"
collect_frequency_in_seconds = 1 // When to collect a snapshot and store in cache
report_frequency_in_seconds = 60 // When to report all snapshots in cache
version = "1.0.0" // The version of SSE this is
hostname = ""
ipAddress = ""
log_file = "/var/log/sphire-sse.log"
configuration_file = "/etc/sse/sse.conf"
configuration = new(Configuration)
server = new(Server)
CPU collector.CPU = collector.CPU{}
Disks collector.Disks = collector.Disks{}
Memory collector.Memory = collector.Memory{}
Network collector.Network = collector.Network{}
System collector.System = collector.System{}
httpClient = &http.Client{}
)
/*
Configuration struct is a direct map to the configuration located in the configuration JSON file.
*/
type Configuration struct {
Identification struct {
AccountID string `json:"account_id"`
OrganizationID string `json:"organization_id"`
OrganizationName string `json:"organization_name"`
MachineNickname string `json:"machine_nickname"`
} `json:"identification"`
}
/*
StatusBody struct is a direct map to the status reply from the mothership
*/
type StatusBody struct {
Status string `json:"status"`
}
/*
Snapshot struct is a collection of other structs which are relayed from the different segments
of the collector package.
*/
type Snapshot struct {
CPU *collector.CPU `json:"cpu"`
Disks *collector.Disks `json:"disks"`
Memory *collector.Memory `json:"memory"`
Network *collector.Network `json:"network"`
System *collector.System `json:"system"`
Time time.Time `json:"system_time"`
}
/*
Server struct implements identifying data about the server.
*/
type Server struct {
IpAddress string `json:"ip_address"`
Hostname string `json:"hostname"`
OperatingSystem struct {
// grepped from cat /etc/issue
Distributor string `json:"distributor_id"`
// cat /proc/version_signature
VersionSignature string `json:"version_signature"`
// cat /proc/version
Version string `json:"version"`
} `json:"operating_system"`
Hardware struct {
// grepped from lscpu
Architecture string `json:"architecture"`
CPUOpMode string `json:"cpu_op_mode"`
CPUCount string `json:"cpu_count"`
CPUFamily string `json:"cpu_family"`
CPUModel string `json:"cpu_model"`
CPUMhz string `json:"cpu_mhz"`
} `json:"hardware"`
}
/*
Cache struct implements multiple Snapshot structs. This is cleared after it is reported to the mothership.
Also includes the program Version and AccountId - the latter of which is gleaned from the configuration.
*/
type Cache struct {
Node []*Snapshot `json:"node"`
Server *Server `json:"server"`
AccountId string `json:"account_id"`
Version string `json:"version"`
OrganizationID string `json:"organization_id"`
OrganizationName string `json:"organization_name"`
MachineNickname string `json:"machine_nickname"`
}
/*
Run Program entry point which initializes, registers and runs the main scheduler of the
program. Also handles initialization of the global logger.
*/
func Run() {
// Define the global logger
logger, err := os.OpenFile(log_file, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Println(helper.Trace("Unable to secure log: "+log_file, "ERROR"))
fmt.Println("Unable to secure log: "+log_file, "ERROR")
os.Exit(1)
}
defer logger.Close()
log.SetOutput(logger)
log.Println(helper.Trace("**** Starting program ****", "OK"))
status := checkStatus()
if status == false {
log.Println(helper.Trace("Mothership unreachable. Check your internet connection.", "ERROR"))
fmt.Println("Mothership unreachable. Check your internet connection.", "ERROR")
os.Exit(1)
}
// Perform system initialization
_, err = Initialize()
if err != nil {
log.Println(helper.Trace("Exiting.", "ERROR"))
fmt.Println("Exiting.", "ERROR")
os.Exit(1)
}
// Perform the system registration
log.Println(helper.Trace("Performing registration.", "OK"))
body, err := Register()
if err != nil {
log.Println(helper.Trace("Unable to register this machine"+string(body), "ERROR"))
fmt.Println("Unable to register this machine"+string(body), "ERROR")
os.Exit(1)
}
var counter int = 0
var snapshot Snapshot = Snapshot{}
var cache Cache = Cache{
AccountId: configuration.Identification.AccountID,
OrganizationID: configuration.Identification.OrganizationID,
OrganizationName: configuration.Identification.OrganizationName,
MachineNickname: configuration.Identification.MachineNickname,
Version: version,
Server: server}
ticker := time.NewTicker(time.Duration(collect_frequency_in_seconds) * time.Second)
for {
<-ticker.C // send the updated time back via the channel
// fill in the Snapshot struct and add to the cache
cache.Node = append(cache.Node, snapshot.Collector())
counter++
if counter > 0 && counter%report_frequency_in_seconds == 0 {
go cache.Sender()
cache.Node = nil // Clear the Node Cache
counter = 0
}
}
}
/*
Initialize attempts to gather all the data for correct program initialization. Loads config, etc.
returns bool and error - if ever false, error will be set, otherwise if bool is true, error is nil.
*/
func Initialize() (bool, error) {
var err error = nil
var err_hstnm error = nil
var architecture string
var cpuOpMode string
var cpuCount string
var cpuFamily string
var cpuModel string
var cpuMhz string
// Attempt to get the server IP address
ipAddress, err = helper.GetServerExternalIPAddress()
if err != nil {
log.Println(helper.Trace("Initialization failed, IP Address unattainable.", "ERROR"))
fmt.Println("Initialization failed, IP Address unattainable.", "ERROR")
return false, err
}
// Get the hostname
hostname, err_hstnm = os.Hostname()
if err_hstnm != nil {
hostname_bt, err_hstnm_exec := exec.Command("hostname").Output()
if err_hstnm_exec == nil {
hostname = string(hostname_bt)
}
}
// Load and parse configuration file
file, _ := os.Open(configuration_file)
err = json.NewDecoder(file).Decode(configuration)
// Get data about the server and store it in the struct
distributor, err_distributor := exec.Command("cat", "/etc/issue").Output()
if err_distributor != nil {
distributor = []byte{}
}
versionSignature, err_versig := exec.Command("cat", "/proc/version_signature").Output()
if err_versig != nil {
versionSignature = []byte{}
}
version, err_ver := exec.Command("cat", "/proc/version").Output()
if err_ver != nil {
version = []byte{}
}
hardware_out, err_hwd := exec.Command("lscpu").Output()
hardware := []string{}
if err_hwd == nil {
hardware = strings.Split(string(hardware_out), "\n")
}
max := len(hardware) - 1
for index, line := range hardware {
split_line := strings.Split(line, ":")
if index < max { // because index out of range if we dont have this
key := string(strings.TrimSpace(split_line[0]))
value := string(strings.TrimSpace(split_line[1]))
switch key {
case "Architecture":
architecture = value
case "CPU op-mode(s)":
cpuOpMode = value
case "CPU(s)":
cpuCount = value
case "CPU family":
cpuFamily = value
case "Model":
cpuModel = value
case "CPU MHz":
cpuMhz = value
}
}
}
server = &Server{
IpAddress: ipAddress,
Hostname: hostname,
OperatingSystem: struct {
Distributor string `json:"distributor_id"`
VersionSignature string `json:"version_signature"`
Version string `json:"version"`
}{
Distributor: string(distributor),
VersionSignature: string(versionSignature),
Version: string(version),
},
Hardware: struct {
Architecture string `json:"architecture"`
CPUOpMode string `json:"cpu_op_mode"`
CPUCount string `json:"cpu_count"`
CPUFamily string `json:"cpu_family"`
CPUModel string `json:"cpu_model"`
CPUMhz string `json:"cpu_mhz"`
}{
Architecture: architecture,
CPUOpMode: cpuOpMode,
CPUCount: cpuCount,
CPUFamily: cpuFamily,
CPUModel: cpuModel,
CPUMhz: cpuMhz,
},
}
if err != nil {
log.Println(helper.Trace("Initialization failed - could not load configuration.", "ERROR"))
fmt.Println("Initialization failed - could not load configuration.", "ERROR")
return false, err
}
log.Println(helper.Trace("Initialization complete.", "OK"))
return true, err
}
/*
Register performs a registration of this instance with the mothership
*/
func Register() (string, error) {
log.Println(helper.Trace("Starting registration.", "OK"))
var jsonStr = []byte(`{}`)
// local struct
registrationObject := map[string]interface{}{
"configuration": configuration,
"mothership_url": mothership_url,
"register_uri": register_uri,
"version": version,
"collect_frequency": collect_frequency_in_seconds,
"report_frequency": report_frequency_in_seconds,
"hostname": hostname,
"ip_address": ipAddress,
"log_file": log_file,
"config_file": configuration_file,
}
jsonStr, _ = json.Marshal(registrationObject)
req, err := http.NewRequest("POST", mothership_url+register_uri+"/"+version, bytes.NewBuffer(jsonStr))
req.Header.Set("X-Custom-Header", "REG")
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
var status StatusBody
_ = json.Unmarshal(body, &status)
if status.Status == "upgrade" {
log.Println(helper.Trace("There is a new version available. Please consider upgrading.", "OK"))
fmt.Println("There is a new version available. Please consider upgrading.")
}
log.Println(helper.Trace("Registration complete.", "OK"))
return string(body), nil
}
/*
Collector collects a snapshot of the system at the time of calling and stores it in
Snapshot struct.
*/
func (Snapshot *Snapshot) Collector() *Snapshot {
Snapshot.Time = time.Now().Local()
Snapshot.CPU = CPU.Collect()
Snapshot.Disks = Disks.Collect()
Snapshot.Memory = Memory.Collect()
Snapshot.Network = Network.Collect()
Snapshot.System = System.Collect()
return Snapshot
}
/*
Sender sends the data in Cache to the mothership, then clears the Cache struct so that it can
accept new data.
*/
func (Cache *Cache) Sender() bool {
var jsonStr = []byte(`{}`)
jsonStr, _ = json.Marshal(Cache)
req, err := http.NewRequest("POST", mothership_url+collector_uri, bytes.NewBuffer(jsonStr))
req.Header.Set("X-Custom-Header", "SND")
req.Header.Set("Content-Type", "application/json")
resp, err := httpClient.Do(req)
if err != nil {
log.Println(helper.Trace("Unable to complete request", "ERROR"))
return false
}
defer resp.Body.Close()
read_body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Println(helper.Trace("Unable to complete request"+string(read_body), "ERROR"))
fmt.Println("Unable to complete request"+string(read_body), "ERROR")
return false
}
return true
}
/*
checkStatus checks the status of the mothership
*/
func checkStatus() bool {
var status_body StatusBody
resp, err := http.Get(mothership_url + status_uri)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
json.Unmarshal(body, &status_body)
if err == nil && status_body.Status == "ok" {
return true
} else {
log.Println(helper.Trace("Unable to complete status request", "ERROR"))
fmt.Println("Unable to complete status request", "ERROR")
return false
}
}
|
package testdata
import (
"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/tendermint/tendermint/account"
ctypes "github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/tendermint/tendermint/rpc/core/types"
stypes "github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/tendermint/tendermint/state/types"
"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/tendermint/tendermint/types"
edb "github.com/eris-ltd/eris-db/erisdb"
ep "github.com/eris-ltd/eris-db/erisdb/pipe"
)
var testDataJson = `{
"chain_data": {
"priv_validator": {
"address": "37236DF251AB70022B1DA351F08A20FB52443E37",
"pub_key": [
1,
"CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906"
],
"priv_key": [
1,
"6B72D45EB65F619F11CE580C8CAED9E0BADC774E9C9C334687A65DCBAD2C4151CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906"
],
"last_height": 0,
"last_round": 0,
"last_step": 0
},
"genesis": {
"chain_id": "my_tests",
"accounts": [
{
"address": "F81CB9ED0A868BD961C4F5BBC0E39B763B89FCB6",
"amount": 690000000000
},
{
"address": "0000000000000000000000000000000000000002",
"amount": 565000000000
},
{
"address": "9E54C9ECA9A3FD5D4496696818DA17A9E17F69DA",
"amount": 525000000000
},
{
"address": "0000000000000000000000000000000000000004",
"amount": 110000000000
},
{
"address": "37236DF251AB70022B1DA351F08A20FB52443E37",
"amount": 110000000000
}
],
"validators": [
{
"pub_key": [
1,
"CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906"
],
"amount": 5000000000,
"unbond_to": [
{
"address": "93E243AC8A01F723DE353A4FA1ED911529CCB6E5",
"amount": 5000000000
}
]
}
]
}
},
"GetAccount": {
"input": {
"address": "9FC1ECFCAE2A554D4D1A000D0D80F748E66359E3"
},
"output": {
"address": "9FC1ECFCAE2A554D4D1A000D0D80F748E66359E3",
"pub_key": null,
"sequence": 0,
"balance": 0,
"code": "",
"storage_root": "",
"permissions": {
"base": {
"perms": 0,
"set": 0
},
"roles": []
}
}
},
"GetAccounts": {
"input": {
"filters": []
},
"output": {
"accounts": [
{
"address": "0000000000000000000000000000000000000000",
"pub_key": null,
"sequence": 0,
"balance": 1337,
"code": "",
"storage_root": "",
"permissions": {
"base": {
"perms": 2302,
"set": 16383
},
"roles": [
]
}
},
{
"address": "0000000000000000000000000000000000000002",
"pub_key": null,
"sequence": 0,
"balance": 565000000000,
"code": "",
"storage_root": "",
"permissions": {
"base": {
"perms": 0,
"set": 0
},
"roles": []
}
},
{
"address": "0000000000000000000000000000000000000004",
"pub_key": null,
"sequence": 0,
"balance": 110000000000,
"code": "",
"storage_root": "",
"permissions": {
"base": {
"perms": 0,
"set": 0
},
"roles": []
}
},
{
"address": "37236DF251AB70022B1DA351F08A20FB52443E37",
"pub_key": null,
"sequence": 0,
"balance": 110000000000,
"code": "",
"storage_root": "",
"permissions": {
"base": {
"perms": 0,
"set": 0
},
"roles": []
}
},
{
"address": "9E54C9ECA9A3FD5D4496696818DA17A9E17F69DA",
"pub_key": null,
"sequence": 0,
"balance": 525000000000,
"code": "",
"storage_root": "",
"permissions": {
"base": {
"perms": 0,
"set": 0
},
"roles": []
}
},
{
"address": "F81CB9ED0A868BD961C4F5BBC0E39B763B89FCB6",
"pub_key": null,
"sequence": 0,
"balance": 690000000000,
"code": "",
"storage_root": "",
"permissions": {
"base": {
"perms": 0,
"set": 0
},
"roles": []
}
}
]
}
},
"GetStorage": {
"input": {
"address": "9FC1ECFCAE2A554D4D1A000D0D80F748E66359E3"
},
"output": {
"storage_root": "",
"storage_items": []
}
},
"GetStorageAt": {
"input": {
"address": "9FC1ECFCAE2A554D4D1A000D0D80F748E66359E3",
"key": "00"
},
"output": {
"key": "00",
"value": ""
}
},
"GenPrivAccount": {
"output": {
"address": "",
"pub_key": [
1,
"0000000000000000000000000000000000000000000000000000000000000000"
],
"priv_key": [
1,
"00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
]
}
},
"GetBlockchainInfo": {
"output": {
"chain_id": "my_tests",
"genesis_hash": "0A8C453DB67BE52D32F9451212E8CE0E172AE56C",
"latest_block_height": 0,
"latest_block": null
}
},
"GetChainId": {
"output": {
"chain_id": "my_tests"
}
},
"GetGenesisHash": {
"output": {
"hash": "0A8C453DB67BE52D32F9451212E8CE0E172AE56C"
}
},
"GetLatestBlockHeight": {
"output": {
"height": 0
}
},
"GetLatestBlock": {
"output": {}
},
"GetBlock": {
"input": {"height": 0},
"output": null
},
"GetBlocks": {
"input": {
"filters": []
},
"output": {
"min_height": 0,
"max_height": 0,
"block_metas": []
}
},
"GetConsensusState": {
"output": {
"height": 1,
"round": 0,
"step": 1,
"start_time": "",
"commit_time": "0001-01-01 00:00:00 +0000 UTC",
"validators": [
{
"address": "37236DF251AB70022B1DA351F08A20FB52443E37",
"pub_key": [
1,
"CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906"
],
"bond_height": 0,
"unbond_height": 0,
"last_commit_height": 0,
"voting_power": 5000000000,
"accum": 0
}
],
"proposal": null
}
},
"GetValidators": {
"output": {
"block_height": 0,
"bonded_validators": [
{
"address": "37236DF251AB70022B1DA351F08A20FB52443E37",
"pub_key": [
1,
"CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906"
],
"bond_height": 0,
"unbond_height": 0,
"last_commit_height": 0,
"voting_power": 5000000000,
"accum": 0
}
],
"unbonding_validators": []
}
},
"GetNetworkInfo": {
"output": {
"client_version": "0.5.0",
"moniker": "__MONIKER__",
"listening": false,
"listeners": [],
"peers": []
}
},
"GetClientVersion": {
"output": {
"client_version": "0.5.0"
}
},
"GetMoniker": {
"output": {
"moniker": "__MONIKER__"
}
},
"IsListening": {
"output": {
"listening": false
}
},
"GetListeners": {
"output": {
"listeners": []
}
},
"GetPeers": {
"output": []
},
"GetPeer": {
"input": {"address": "127.0.0.1:30000"},
"output": {
"is_outbound": false,
"node_info": null
}
},
"Transact": {
"input": {
"address": "9FC1ECFCAE2A554D4D1A000D0D80F748E66359E3",
"priv_key": "6B72D45EB65F619F11CE580C8CAED9E0BADC774E9C9C334687A65DCBAD2C4151CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906",
"data": "",
"fee": 0,
"gas_limit": 1000000
},
"output": {
"tx_hash": "240E5BDCC0E4F7C1F29A66CA20E3F7A0D6F7EF51",
"creates_contract": 0,
"contract_addr": ""
}
},
"TransactCreate": {
"input": {
"address": "",
"priv_key": "6B72D45EB65F619F11CE580C8CAED9E0BADC774E9C9C334687A65DCBAD2C4151CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906",
"data": "5B33600060006101000A81548173FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF021916908302179055505B6102828061003B6000396000F3006000357C01000000000000000000000000000000000000000000000000000000009004806337F428411461004557806340C10F191461005A578063D0679D341461006E57005B610050600435610244565B8060005260206000F35B610068600435602435610082565B60006000F35B61007C600435602435610123565B60006000F35B600060009054906101000A900473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1673FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF163373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1614156100DD576100E2565B61011F565B80600160005060008473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020019081526020016000206000828282505401925050819055505B5050565B80600160005060003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF168152602001908152602001600020600050541061015E57610163565B610240565B80600160005060003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF16815260200190815260200160002060008282825054039250508190555080600160005060008473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020019081526020016000206000828282505401925050819055507F93EB3C629EB575EDAF0252E4F9FC0C5CCADA50496F8C1D32F0F93A65A8257EB560003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020018373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020018281526020016000A15B5050565B6000600160005060008373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF16815260200190815260200160002060005054905061027D565B91905056",
"fee": 0,
"gas_limit": 1000000
},
"output": {
"tx_hash": "BD5D35871770DB04726843A4C07A26CDE69EB860",
"creates_contract": 1,
"contract_addr": "576439CD5C22EB6F3AE1AC1EC5101C5CE1E120D8"
}
},
"GetUnconfirmedTxs": {
"output": {
"txs": [
[
2,
{
"input": {
"address": "37236DF251AB70022B1DA351F08A20FB52443E37",
"amount": 1,
"sequence": 1,
"signature": [
1,
"2FE1C5EA3B0A05560073D7BF145C0997803113D27618CBCD71985806255E6492C7DC574AF373D3807068164AF4FE51D8CDA7DCC995E088375B83AEA3F8F6F204"
],
"pub_key": [
1,
"CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906"
]
},
"address": "",
"gas_limit": 1000000,
"fee": 0,
"data": "5B33600060006101000A81548173FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF021916908302179055505B6102828061003B6000396000F3006000357C01000000000000000000000000000000000000000000000000000000009004806337F428411461004557806340C10F191461005A578063D0679D341461006E57005B610050600435610244565B8060005260206000F35B610068600435602435610082565B60006000F35B61007C600435602435610123565B60006000F35B600060009054906101000A900473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1673FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF163373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1614156100DD576100E2565B61011F565B80600160005060008473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020019081526020016000206000828282505401925050819055505B5050565B80600160005060003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF168152602001908152602001600020600050541061015E57610163565B610240565B80600160005060003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF16815260200190815260200160002060008282825054039250508190555080600160005060008473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020019081526020016000206000828282505401925050819055507F93EB3C629EB575EDAF0252E4F9FC0C5CCADA50496F8C1D32F0F93A65A8257EB560003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020018373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020018281526020016000A15B5050565B6000600160005060008373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF16815260200190815260200160002060005054905061027D565B91905056"
}
],
[
2,
{
"input": {
"address": "37236DF251AB70022B1DA351F08A20FB52443E37",
"amount": 1,
"sequence": 3,
"signature": [
1,
"425A4D50350EEB597C48F82924E83F24640F9ECB3886A2B85D0073911AE02FC06F3D0FD480D59140B1D2DA669A9BD0227B31026EF3E0AAD534DCF50784984B01"
],
"pub_key": null
},
"address": "9FC1ECFCAE2A554D4D1A000D0D80F748E66359E3",
"gas_limit": 1000000,
"fee": 0,
"data": ""
}
]
]
}
},
"CallCode": {
"input": {
"from": "DEADBEEF",
"code": "5B33600060006101000A81548173FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF021916908302179055505B6102828061003B6000396000F3006000357C01000000000000000000000000000000000000000000000000000000009004806337F428411461004557806340C10F191461005A578063D0679D341461006E57005B610050600435610244565B8060005260206000F35B610068600435602435610082565B60006000F35B61007C600435602435610123565B60006000F35B600060009054906101000A900473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1673FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF163373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1614156100DD576100E2565B61011F565B80600160005060008473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020019081526020016000206000828282505401925050819055505B5050565B80600160005060003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF168152602001908152602001600020600050541061015E57610163565B610240565B80600160005060003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF16815260200190815260200160002060008282825054039250508190555080600160005060008473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020019081526020016000206000828282505401925050819055507F93EB3C629EB575EDAF0252E4F9FC0C5CCADA50496F8C1D32F0F93A65A8257EB560003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020018373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020018281526020016000A15B5050565B6000600160005060008373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF16815260200190815260200160002060005054905061027D565B91905056",
"data": ""
},
"output": {
"return": "6000357c01000000000000000000000000000000000000000000000000000000009004806337f428411461004557806340c10f191461005a578063d0679d341461006e57005b610050600435610244565b8060005260206000f35b610068600435602435610082565b60006000f35b61007c600435602435610123565b60006000f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614156100dd576100e2565b61011f565b80600160005060008473ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828282505401925050819055505b5050565b80600160005060003373ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600050541061015e57610163565b610240565b80600160005060003373ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282825054039250508190555080600160005060008473ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828282505401925050819055507f93eb3c629eb575edaf0252e4f9fc0c5ccada50496f8c1d32f0f93a65a8257eb560003373ffffffffffffffffffffffffffffffffffffffff1681526020018373ffffffffffffffffffffffffffffffffffffffff1681526020018281526020016000a15b5050565b6000600160005060008373ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060005054905061027d565b91905056",
"gas_used": 0
}
},
"Call": {
"input": {"address": "9FC1ECFCAE2A554D4D1A000D0D80F748E66359E3", "from": "DEADBEEF", "data": ""},
"output": {
"return": "6000357c01000000000000000000000000000000000000000000000000000000009004806337f428411461004557806340c10f191461005a578063d0679d341461006e57005b610050600435610244565b8060005260206000f35b610068600435602435610082565b60006000f35b61007c600435602435610123565b60006000f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614156100dd576100e2565b61011f565b80600160005060008473ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828282505401925050819055505b5050565b80600160005060003373ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600050541061015e57610163565b610240565b80600160005060003373ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282825054039250508190555080600160005060008473ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828282505401925050819055507f93eb3c629eb575edaf0252e4f9fc0c5ccada50496f8c1d32f0f93a65a8257eb560003373ffffffffffffffffffffffffffffffffffffffff1681526020018373ffffffffffffffffffffffffffffffffffffffff1681526020018281526020016000a15b5050565b6000600160005060008373ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060005054905061027d565b91905056",
"gas_used": 0
}
},
"EventSubscribe": {
"input": {
"event_id": "testId"
},
"output": {
"sub_id": "1234123412341234123412341234123412341234123412341234123412341234"
}
},
"EventUnsubscribe": {
"input": {
"event_sub": "1234123412341234123412341234123412341234123412341234123412341234"
},
"output": {
"result": true
}
},
"EventPoll": {
"input": {
"event_sub": "1234123412341234123412341234123412341234123412341234123412341234"
},
"output": {
"events": [
{
"address": "0000000000000000000000009FC1ECFCAE2A554D4D1A000D0D80F748E66359E3",
"topics": [
"0FC28FCE5E54AC6458756FC24DC51A931CA7AD21440CFCA44933AE774ED5F70C",
"0000000000000000000000000000000000000000000000000000000000000005",
"0000000000000000000000000000000000000000000000000000000000000019",
"000000000000000000000000000000000000000000000000000000000000001E"
],
"data": "41646465642074776F206E756D62657273000000000000000000000000000000",
"height": 1
}
]
}
},
"TransactNameReg": {
"input": {
"priv_key": "6B72D45EB65F619F11CE580C8CAED9E0BADC774E9C9C334687A65DCBAD2C4151CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906",
"name": "testKey",
"data": "testValue",
"amount": 10000,
"fee": 0
},
"output": {
"tx_hash": "98B0D5162C7CB86FF94BE2C00469107B7CA51CF3",
"creates_contract": 0,
"contract_addr": ""
}
},
"GetNameRegEntry": {
"input": {
"name": "testKey"
},
"output": {
"name": "testKey",
"owner": "37236DF251AB70022B1DA351F08A20FB52443E37",
"data": "testData",
"expires": 250 }
},
"GetNameRegEntries": {
"input": {
"filters": []
},
"output": [11, {
"block_height": 1,
"names":[ {
"name": "testKey",
"owner": "37236DF251AB70022B1DA351F08A20FB52443E37",
"data": "testData",
"expires": 250
} ]
}]
}
}`
var serverDuration uint = 100
type (
ChainData struct {
PrivValidator *types.PrivValidator `json:"priv_validator"`
Genesis *stypes.GenesisDoc `json:"genesis"`
}
GetAccountData struct {
Input *edb.AddressParam `json:"input"`
Output *account.Account `json:"output"`
}
GetAccountsData struct {
Input *edb.AccountsParam `json:"input"`
Output *ep.AccountList `json:"output"`
}
GetStorageData struct {
Input *edb.AddressParam `json:"input"`
Output *ep.Storage `json:"output"`
}
GetStorageAtData struct {
Input *edb.StorageAtParam `json:"input"`
Output *ep.StorageItem `json:"output"`
}
GenPrivAccountData struct {
Output *account.PrivAccount `json:"output"`
}
GetBlockchainInfoData struct {
Output *ep.BlockchainInfo `json:"output"`
}
GetChainIdData struct {
Output *ep.ChainId `json:"output"`
}
GetGenesisHashData struct {
Output *ep.GenesisHash `json:"output"`
}
GetLatestBlockHeightData struct {
Output *ep.LatestBlockHeight `json:"output"`
}
GetLatestBlockData struct {
Output *types.Block `json:"output"`
}
GetBlockData struct {
Input *edb.HeightParam `json:"input"`
Output *types.Block `json:"output"`
}
GetBlocksData struct {
Input *edb.BlocksParam `json:"input"`
Output *ep.Blocks `json:"output"`
}
GetConsensusStateData struct {
Output *ep.ConsensusState `json:"output"`
}
GetValidatorsData struct {
Output *ep.ValidatorList `json:"output"`
}
GetNetworkInfoData struct {
Output *ep.NetworkInfo `json:"output"`
}
GetClientVersionData struct {
Output *ep.ClientVersion `json:"output"`
}
GetMonikerData struct {
Output *ep.Moniker `json:"output"`
}
IsListeningData struct {
Output *ep.Listening `json:"output"`
}
GetListenersData struct {
Output *ep.Listeners `json:"output"`
}
GetPeersData struct {
Output []*ep.Peer `json:"output"`
}
GetPeerData struct {
Input *edb.PeerParam `json:"input"`
Output *ep.Peer `json:"output"`
}
TransactData struct {
Input *edb.TransactParam `json:"input"`
Output *ep.Receipt `json:"output"`
}
TransactCreateData struct {
Input *edb.TransactParam `json:"input"`
Output *ep.Receipt `json:"output"`
}
GetUnconfirmedTxsData struct {
Output *ep.UnconfirmedTxs `json:"output"`
}
CallCodeData struct {
Input *edb.CallCodeParam `json:"input"`
Output *ep.Call `json:"output"`
}
CallData struct {
Input *edb.CallParam `json:"input"`
Output *ep.Call `json:"output"`
}
EventSubscribeData struct {
Input *edb.EventIdParam `json:"input"`
Output *ep.EventSub `json:"output"`
}
EventUnsubscribeData struct {
Input *edb.SubIdParam `json:"input"`
Output *ep.EventUnsub `json:"output"`
}
TransactNameRegData struct {
Input *edb.TransactNameRegParam `json:"input"`
Output *ep.Receipt `json:"output"`
}
GetNameRegEntryData struct {
Input *edb.NameRegEntryParam `json:"input"`
Output *types.NameRegEntry `json:"output"`
}
GetNameRegEntriesData struct {
Input *edb.FilterListParam `json:"input"`
Output *ctypes.ResultListNames `json:"output"`
}
/*
EventPollData struct {
Input *edb.SubIdParam `json:"input"`
Output *ep.PollResponse `json:"output"`
}
*/
TestData struct {
ChainData *ChainData `json:"chain_data"`
GetAccount *GetAccountData
GetAccounts *GetAccountsData
GetStorage *GetStorageData
GetStorageAt *GetStorageAtData
GenPrivAccount *GenPrivAccountData
GetBlockchainInfo *GetBlockchainInfoData
GetChainId *GetChainIdData
GetGenesisHash *GetGenesisHashData
GetLatestBlockHeight *GetLatestBlockHeightData
GetLatestBlock *GetLatestBlockData
GetBlock *GetBlockData
GetBlocks *GetBlocksData
GetConsensusState *GetConsensusStateData
GetValidators *GetValidatorsData
GetNetworkInfo *GetNetworkInfoData
GetClientVersion *GetClientVersionData
GetMoniker *GetMonikerData
IsListening *IsListeningData
GetListeners *GetListenersData
GetPeers *GetPeersData
Transact *TransactData
TransactCreate *TransactCreateData
TransactNameReg *TransactNameRegData
GetUnconfirmedTxs *GetUnconfirmedTxsData
CallCode *CallCodeData
Call *CallData
EventSubscribe *EventSubscribeData
EventUnsubscribe *EventUnsubscribeData
GetNameRegEntry *GetNameRegEntryData
GetNameRegEntries *GetNameRegEntriesData
// GetPeer *GetPeerData
// EventPoll *EventPollData
}
)
func LoadTestData() *TestData {
codec := edb.NewTCodec()
testData := &TestData{}
err := codec.DecodeBytes(testData, []byte(testDataJson))
if err != nil {
panic(err)
}
return testData
}
update tendermint version in testdata
package testdata
import (
"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/tendermint/tendermint/account"
ctypes "github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/tendermint/tendermint/rpc/core/types"
stypes "github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/tendermint/tendermint/state/types"
"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/tendermint/tendermint/types"
edb "github.com/eris-ltd/eris-db/erisdb"
ep "github.com/eris-ltd/eris-db/erisdb/pipe"
)
var testDataJson = `{
"chain_data": {
"priv_validator": {
"address": "37236DF251AB70022B1DA351F08A20FB52443E37",
"pub_key": [
1,
"CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906"
],
"priv_key": [
1,
"6B72D45EB65F619F11CE580C8CAED9E0BADC774E9C9C334687A65DCBAD2C4151CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906"
],
"last_height": 0,
"last_round": 0,
"last_step": 0
},
"genesis": {
"chain_id": "my_tests",
"accounts": [
{
"address": "F81CB9ED0A868BD961C4F5BBC0E39B763B89FCB6",
"amount": 690000000000
},
{
"address": "0000000000000000000000000000000000000002",
"amount": 565000000000
},
{
"address": "9E54C9ECA9A3FD5D4496696818DA17A9E17F69DA",
"amount": 525000000000
},
{
"address": "0000000000000000000000000000000000000004",
"amount": 110000000000
},
{
"address": "37236DF251AB70022B1DA351F08A20FB52443E37",
"amount": 110000000000
}
],
"validators": [
{
"pub_key": [
1,
"CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906"
],
"amount": 5000000000,
"unbond_to": [
{
"address": "93E243AC8A01F723DE353A4FA1ED911529CCB6E5",
"amount": 5000000000
}
]
}
]
}
},
"GetAccount": {
"input": {
"address": "9FC1ECFCAE2A554D4D1A000D0D80F748E66359E3"
},
"output": {
"address": "9FC1ECFCAE2A554D4D1A000D0D80F748E66359E3",
"pub_key": null,
"sequence": 0,
"balance": 0,
"code": "",
"storage_root": "",
"permissions": {
"base": {
"perms": 0,
"set": 0
},
"roles": []
}
}
},
"GetAccounts": {
"input": {
"filters": []
},
"output": {
"accounts": [
{
"address": "0000000000000000000000000000000000000000",
"pub_key": null,
"sequence": 0,
"balance": 1337,
"code": "",
"storage_root": "",
"permissions": {
"base": {
"perms": 2302,
"set": 16383
},
"roles": [
]
}
},
{
"address": "0000000000000000000000000000000000000002",
"pub_key": null,
"sequence": 0,
"balance": 565000000000,
"code": "",
"storage_root": "",
"permissions": {
"base": {
"perms": 0,
"set": 0
},
"roles": []
}
},
{
"address": "0000000000000000000000000000000000000004",
"pub_key": null,
"sequence": 0,
"balance": 110000000000,
"code": "",
"storage_root": "",
"permissions": {
"base": {
"perms": 0,
"set": 0
},
"roles": []
}
},
{
"address": "37236DF251AB70022B1DA351F08A20FB52443E37",
"pub_key": null,
"sequence": 0,
"balance": 110000000000,
"code": "",
"storage_root": "",
"permissions": {
"base": {
"perms": 0,
"set": 0
},
"roles": []
}
},
{
"address": "9E54C9ECA9A3FD5D4496696818DA17A9E17F69DA",
"pub_key": null,
"sequence": 0,
"balance": 525000000000,
"code": "",
"storage_root": "",
"permissions": {
"base": {
"perms": 0,
"set": 0
},
"roles": []
}
},
{
"address": "F81CB9ED0A868BD961C4F5BBC0E39B763B89FCB6",
"pub_key": null,
"sequence": 0,
"balance": 690000000000,
"code": "",
"storage_root": "",
"permissions": {
"base": {
"perms": 0,
"set": 0
},
"roles": []
}
}
]
}
},
"GetStorage": {
"input": {
"address": "9FC1ECFCAE2A554D4D1A000D0D80F748E66359E3"
},
"output": {
"storage_root": "",
"storage_items": []
}
},
"GetStorageAt": {
"input": {
"address": "9FC1ECFCAE2A554D4D1A000D0D80F748E66359E3",
"key": "00"
},
"output": {
"key": "00",
"value": ""
}
},
"GenPrivAccount": {
"output": {
"address": "",
"pub_key": [
1,
"0000000000000000000000000000000000000000000000000000000000000000"
],
"priv_key": [
1,
"00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
]
}
},
"GetBlockchainInfo": {
"output": {
"chain_id": "my_tests",
"genesis_hash": "0A8C453DB67BE52D32F9451212E8CE0E172AE56C",
"latest_block_height": 0,
"latest_block": null
}
},
"GetChainId": {
"output": {
"chain_id": "my_tests"
}
},
"GetGenesisHash": {
"output": {
"hash": "0A8C453DB67BE52D32F9451212E8CE0E172AE56C"
}
},
"GetLatestBlockHeight": {
"output": {
"height": 0
}
},
"GetLatestBlock": {
"output": {}
},
"GetBlock": {
"input": {"height": 0},
"output": null
},
"GetBlocks": {
"input": {
"filters": []
},
"output": {
"min_height": 0,
"max_height": 0,
"block_metas": []
}
},
"GetConsensusState": {
"output": {
"height": 1,
"round": 0,
"step": 1,
"start_time": "",
"commit_time": "0001-01-01 00:00:00 +0000 UTC",
"validators": [
{
"address": "37236DF251AB70022B1DA351F08A20FB52443E37",
"pub_key": [
1,
"CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906"
],
"bond_height": 0,
"unbond_height": 0,
"last_commit_height": 0,
"voting_power": 5000000000,
"accum": 0
}
],
"proposal": null
}
},
"GetValidators": {
"output": {
"block_height": 0,
"bonded_validators": [
{
"address": "37236DF251AB70022B1DA351F08A20FB52443E37",
"pub_key": [
1,
"CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906"
],
"bond_height": 0,
"unbond_height": 0,
"last_commit_height": 0,
"voting_power": 5000000000,
"accum": 0
}
],
"unbonding_validators": []
}
},
"GetNetworkInfo": {
"output": {
"client_version": "0.5.2",
"moniker": "__MONIKER__",
"listening": false,
"listeners": [],
"peers": []
}
},
"GetClientVersion": {
"output": {
"client_version": "0.5.2"
}
},
"GetMoniker": {
"output": {
"moniker": "__MONIKER__"
}
},
"IsListening": {
"output": {
"listening": false
}
},
"GetListeners": {
"output": {
"listeners": []
}
},
"GetPeers": {
"output": []
},
"GetPeer": {
"input": {"address": "127.0.0.1:30000"},
"output": {
"is_outbound": false,
"node_info": null
}
},
"Transact": {
"input": {
"address": "9FC1ECFCAE2A554D4D1A000D0D80F748E66359E3",
"priv_key": "6B72D45EB65F619F11CE580C8CAED9E0BADC774E9C9C334687A65DCBAD2C4151CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906",
"data": "",
"fee": 0,
"gas_limit": 1000000
},
"output": {
"tx_hash": "240E5BDCC0E4F7C1F29A66CA20E3F7A0D6F7EF51",
"creates_contract": 0,
"contract_addr": ""
}
},
"TransactCreate": {
"input": {
"address": "",
"priv_key": "6B72D45EB65F619F11CE580C8CAED9E0BADC774E9C9C334687A65DCBAD2C4151CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906",
"data": "5B33600060006101000A81548173FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF021916908302179055505B6102828061003B6000396000F3006000357C01000000000000000000000000000000000000000000000000000000009004806337F428411461004557806340C10F191461005A578063D0679D341461006E57005B610050600435610244565B8060005260206000F35B610068600435602435610082565B60006000F35B61007C600435602435610123565B60006000F35B600060009054906101000A900473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1673FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF163373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1614156100DD576100E2565B61011F565B80600160005060008473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020019081526020016000206000828282505401925050819055505B5050565B80600160005060003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF168152602001908152602001600020600050541061015E57610163565B610240565B80600160005060003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF16815260200190815260200160002060008282825054039250508190555080600160005060008473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020019081526020016000206000828282505401925050819055507F93EB3C629EB575EDAF0252E4F9FC0C5CCADA50496F8C1D32F0F93A65A8257EB560003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020018373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020018281526020016000A15B5050565B6000600160005060008373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF16815260200190815260200160002060005054905061027D565B91905056",
"fee": 0,
"gas_limit": 1000000
},
"output": {
"tx_hash": "BD5D35871770DB04726843A4C07A26CDE69EB860",
"creates_contract": 1,
"contract_addr": "576439CD5C22EB6F3AE1AC1EC5101C5CE1E120D8"
}
},
"GetUnconfirmedTxs": {
"output": {
"txs": [
[
2,
{
"input": {
"address": "37236DF251AB70022B1DA351F08A20FB52443E37",
"amount": 1,
"sequence": 1,
"signature": [
1,
"2FE1C5EA3B0A05560073D7BF145C0997803113D27618CBCD71985806255E6492C7DC574AF373D3807068164AF4FE51D8CDA7DCC995E088375B83AEA3F8F6F204"
],
"pub_key": [
1,
"CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906"
]
},
"address": "",
"gas_limit": 1000000,
"fee": 0,
"data": "5B33600060006101000A81548173FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF021916908302179055505B6102828061003B6000396000F3006000357C01000000000000000000000000000000000000000000000000000000009004806337F428411461004557806340C10F191461005A578063D0679D341461006E57005B610050600435610244565B8060005260206000F35B610068600435602435610082565B60006000F35B61007C600435602435610123565B60006000F35B600060009054906101000A900473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1673FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF163373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1614156100DD576100E2565B61011F565B80600160005060008473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020019081526020016000206000828282505401925050819055505B5050565B80600160005060003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF168152602001908152602001600020600050541061015E57610163565B610240565B80600160005060003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF16815260200190815260200160002060008282825054039250508190555080600160005060008473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020019081526020016000206000828282505401925050819055507F93EB3C629EB575EDAF0252E4F9FC0C5CCADA50496F8C1D32F0F93A65A8257EB560003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020018373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020018281526020016000A15B5050565B6000600160005060008373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF16815260200190815260200160002060005054905061027D565B91905056"
}
],
[
2,
{
"input": {
"address": "37236DF251AB70022B1DA351F08A20FB52443E37",
"amount": 1,
"sequence": 3,
"signature": [
1,
"425A4D50350EEB597C48F82924E83F24640F9ECB3886A2B85D0073911AE02FC06F3D0FD480D59140B1D2DA669A9BD0227B31026EF3E0AAD534DCF50784984B01"
],
"pub_key": null
},
"address": "9FC1ECFCAE2A554D4D1A000D0D80F748E66359E3",
"gas_limit": 1000000,
"fee": 0,
"data": ""
}
]
]
}
},
"CallCode": {
"input": {
"from": "DEADBEEF",
"code": "5B33600060006101000A81548173FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF021916908302179055505B6102828061003B6000396000F3006000357C01000000000000000000000000000000000000000000000000000000009004806337F428411461004557806340C10F191461005A578063D0679D341461006E57005B610050600435610244565B8060005260206000F35B610068600435602435610082565B60006000F35B61007C600435602435610123565B60006000F35B600060009054906101000A900473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1673FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF163373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1614156100DD576100E2565B61011F565B80600160005060008473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020019081526020016000206000828282505401925050819055505B5050565B80600160005060003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF168152602001908152602001600020600050541061015E57610163565B610240565B80600160005060003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF16815260200190815260200160002060008282825054039250508190555080600160005060008473FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020019081526020016000206000828282505401925050819055507F93EB3C629EB575EDAF0252E4F9FC0C5CCADA50496F8C1D32F0F93A65A8257EB560003373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020018373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1681526020018281526020016000A15B5050565B6000600160005060008373FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF16815260200190815260200160002060005054905061027D565B91905056",
"data": ""
},
"output": {
"return": "6000357c01000000000000000000000000000000000000000000000000000000009004806337f428411461004557806340c10f191461005a578063d0679d341461006e57005b610050600435610244565b8060005260206000f35b610068600435602435610082565b60006000f35b61007c600435602435610123565b60006000f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614156100dd576100e2565b61011f565b80600160005060008473ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828282505401925050819055505b5050565b80600160005060003373ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600050541061015e57610163565b610240565b80600160005060003373ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282825054039250508190555080600160005060008473ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828282505401925050819055507f93eb3c629eb575edaf0252e4f9fc0c5ccada50496f8c1d32f0f93a65a8257eb560003373ffffffffffffffffffffffffffffffffffffffff1681526020018373ffffffffffffffffffffffffffffffffffffffff1681526020018281526020016000a15b5050565b6000600160005060008373ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060005054905061027d565b91905056",
"gas_used": 0
}
},
"Call": {
"input": {"address": "9FC1ECFCAE2A554D4D1A000D0D80F748E66359E3", "from": "DEADBEEF", "data": ""},
"output": {
"return": "6000357c01000000000000000000000000000000000000000000000000000000009004806337f428411461004557806340c10f191461005a578063d0679d341461006e57005b610050600435610244565b8060005260206000f35b610068600435602435610082565b60006000f35b61007c600435602435610123565b60006000f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614156100dd576100e2565b61011f565b80600160005060008473ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828282505401925050819055505b5050565b80600160005060003373ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600050541061015e57610163565b610240565b80600160005060003373ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282825054039250508190555080600160005060008473ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828282505401925050819055507f93eb3c629eb575edaf0252e4f9fc0c5ccada50496f8c1d32f0f93a65a8257eb560003373ffffffffffffffffffffffffffffffffffffffff1681526020018373ffffffffffffffffffffffffffffffffffffffff1681526020018281526020016000a15b5050565b6000600160005060008373ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060005054905061027d565b91905056",
"gas_used": 0
}
},
"EventSubscribe": {
"input": {
"event_id": "testId"
},
"output": {
"sub_id": "1234123412341234123412341234123412341234123412341234123412341234"
}
},
"EventUnsubscribe": {
"input": {
"event_sub": "1234123412341234123412341234123412341234123412341234123412341234"
},
"output": {
"result": true
}
},
"EventPoll": {
"input": {
"event_sub": "1234123412341234123412341234123412341234123412341234123412341234"
},
"output": {
"events": [
{
"address": "0000000000000000000000009FC1ECFCAE2A554D4D1A000D0D80F748E66359E3",
"topics": [
"0FC28FCE5E54AC6458756FC24DC51A931CA7AD21440CFCA44933AE774ED5F70C",
"0000000000000000000000000000000000000000000000000000000000000005",
"0000000000000000000000000000000000000000000000000000000000000019",
"000000000000000000000000000000000000000000000000000000000000001E"
],
"data": "41646465642074776F206E756D62657273000000000000000000000000000000",
"height": 1
}
]
}
},
"TransactNameReg": {
"input": {
"priv_key": "6B72D45EB65F619F11CE580C8CAED9E0BADC774E9C9C334687A65DCBAD2C4151CB3688B7561D488A2A4834E1AEE9398BEF94844D8BDBBCA980C11E3654A45906",
"name": "testKey",
"data": "testValue",
"amount": 10000,
"fee": 0
},
"output": {
"tx_hash": "98B0D5162C7CB86FF94BE2C00469107B7CA51CF3",
"creates_contract": 0,
"contract_addr": ""
}
},
"GetNameRegEntry": {
"input": {
"name": "testKey"
},
"output": {
"name": "testKey",
"owner": "37236DF251AB70022B1DA351F08A20FB52443E37",
"data": "testData",
"expires": 250 }
},
"GetNameRegEntries": {
"input": {
"filters": []
},
"output": [11, {
"block_height": 1,
"names":[ {
"name": "testKey",
"owner": "37236DF251AB70022B1DA351F08A20FB52443E37",
"data": "testData",
"expires": 250
} ]
}]
}
}`
var serverDuration uint = 100
type (
ChainData struct {
PrivValidator *types.PrivValidator `json:"priv_validator"`
Genesis *stypes.GenesisDoc `json:"genesis"`
}
GetAccountData struct {
Input *edb.AddressParam `json:"input"`
Output *account.Account `json:"output"`
}
GetAccountsData struct {
Input *edb.AccountsParam `json:"input"`
Output *ep.AccountList `json:"output"`
}
GetStorageData struct {
Input *edb.AddressParam `json:"input"`
Output *ep.Storage `json:"output"`
}
GetStorageAtData struct {
Input *edb.StorageAtParam `json:"input"`
Output *ep.StorageItem `json:"output"`
}
GenPrivAccountData struct {
Output *account.PrivAccount `json:"output"`
}
GetBlockchainInfoData struct {
Output *ep.BlockchainInfo `json:"output"`
}
GetChainIdData struct {
Output *ep.ChainId `json:"output"`
}
GetGenesisHashData struct {
Output *ep.GenesisHash `json:"output"`
}
GetLatestBlockHeightData struct {
Output *ep.LatestBlockHeight `json:"output"`
}
GetLatestBlockData struct {
Output *types.Block `json:"output"`
}
GetBlockData struct {
Input *edb.HeightParam `json:"input"`
Output *types.Block `json:"output"`
}
GetBlocksData struct {
Input *edb.BlocksParam `json:"input"`
Output *ep.Blocks `json:"output"`
}
GetConsensusStateData struct {
Output *ep.ConsensusState `json:"output"`
}
GetValidatorsData struct {
Output *ep.ValidatorList `json:"output"`
}
GetNetworkInfoData struct {
Output *ep.NetworkInfo `json:"output"`
}
GetClientVersionData struct {
Output *ep.ClientVersion `json:"output"`
}
GetMonikerData struct {
Output *ep.Moniker `json:"output"`
}
IsListeningData struct {
Output *ep.Listening `json:"output"`
}
GetListenersData struct {
Output *ep.Listeners `json:"output"`
}
GetPeersData struct {
Output []*ep.Peer `json:"output"`
}
GetPeerData struct {
Input *edb.PeerParam `json:"input"`
Output *ep.Peer `json:"output"`
}
TransactData struct {
Input *edb.TransactParam `json:"input"`
Output *ep.Receipt `json:"output"`
}
TransactCreateData struct {
Input *edb.TransactParam `json:"input"`
Output *ep.Receipt `json:"output"`
}
GetUnconfirmedTxsData struct {
Output *ep.UnconfirmedTxs `json:"output"`
}
CallCodeData struct {
Input *edb.CallCodeParam `json:"input"`
Output *ep.Call `json:"output"`
}
CallData struct {
Input *edb.CallParam `json:"input"`
Output *ep.Call `json:"output"`
}
EventSubscribeData struct {
Input *edb.EventIdParam `json:"input"`
Output *ep.EventSub `json:"output"`
}
EventUnsubscribeData struct {
Input *edb.SubIdParam `json:"input"`
Output *ep.EventUnsub `json:"output"`
}
TransactNameRegData struct {
Input *edb.TransactNameRegParam `json:"input"`
Output *ep.Receipt `json:"output"`
}
GetNameRegEntryData struct {
Input *edb.NameRegEntryParam `json:"input"`
Output *types.NameRegEntry `json:"output"`
}
GetNameRegEntriesData struct {
Input *edb.FilterListParam `json:"input"`
Output *ctypes.ResultListNames `json:"output"`
}
/*
EventPollData struct {
Input *edb.SubIdParam `json:"input"`
Output *ep.PollResponse `json:"output"`
}
*/
TestData struct {
ChainData *ChainData `json:"chain_data"`
GetAccount *GetAccountData
GetAccounts *GetAccountsData
GetStorage *GetStorageData
GetStorageAt *GetStorageAtData
GenPrivAccount *GenPrivAccountData
GetBlockchainInfo *GetBlockchainInfoData
GetChainId *GetChainIdData
GetGenesisHash *GetGenesisHashData
GetLatestBlockHeight *GetLatestBlockHeightData
GetLatestBlock *GetLatestBlockData
GetBlock *GetBlockData
GetBlocks *GetBlocksData
GetConsensusState *GetConsensusStateData
GetValidators *GetValidatorsData
GetNetworkInfo *GetNetworkInfoData
GetClientVersion *GetClientVersionData
GetMoniker *GetMonikerData
IsListening *IsListeningData
GetListeners *GetListenersData
GetPeers *GetPeersData
Transact *TransactData
TransactCreate *TransactCreateData
TransactNameReg *TransactNameRegData
GetUnconfirmedTxs *GetUnconfirmedTxsData
CallCode *CallCodeData
Call *CallData
EventSubscribe *EventSubscribeData
EventUnsubscribe *EventUnsubscribeData
GetNameRegEntry *GetNameRegEntryData
GetNameRegEntries *GetNameRegEntriesData
// GetPeer *GetPeerData
// EventPoll *EventPollData
}
)
func LoadTestData() *TestData {
codec := edb.NewTCodec()
testData := &TestData{}
err := codec.DecodeBytes(testData, []byte(testDataJson))
if err != nil {
panic(err)
}
return testData
}
|
package jvm
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestEmptyValueAsString(t *testing.T) {
cst := JConst{}
assert.Equal(t, cst.valueAsString(), "")
}
func TestValueAsString(t *testing.T) {
cst := JConst{value: []byte{'h', 'e', 'l', 'l', 'o'}}
assert.Equal(t, cst.valueAsString(), "hello")
}
func TestDumpValueNilArg(t *testing.T) {
cst := JConst{value: []byte{'a'}}
err := cst.dumpValue(nil)
assert.NotNil(t, err)
}
func TestDumpValueTooSmallArgType(t *testing.T) {
}
empty test filled
package jvm
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestEmptyValueAsString(t *testing.T) {
cst := JConst{}
assert.Equal(t, cst.valueAsString(), "")
}
func TestValueAsString(t *testing.T) {
cst := JConst{value: []byte{'h', 'e', 'l', 'l', 'o'}}
assert.Equal(t, cst.valueAsString(), "hello")
}
func TestDumpValueNilArg(t *testing.T) {
cst := JConst{value: []byte{'a'}}
err := cst.dumpValue(nil)
assert.NotNil(t, err)
}
func TestDumpValueTooSmallArgType(t *testing.T) {
cst := JConst{value: []byte{1, 1, 1, 1}}
var res int16
err := cst.dumpValue(&res)
assert.NotNil(t, err)
}
|
// Tideland Go REST Server Library - JSON Web Token - Unit Tests
//
// Copyright (C) 2016 Frank Mueller / Tideland / Oldenburg / Germany
//
// All rights reserved. Use of this source code is governed
// by the new BSD license.
package jwt_test
//--------------------
// IMPORTS
//--------------------
import (
"github.com/tideland/golib/audit"
"github.com/tideland/gorest/jwt"
)
//--------------------
// TESTS
//--------------------
// TestClaimsBasic tests the low level operations
// on claims.
func TestClaimsBasic(t *testing.T) {
assert := audit.NewTestingAssertion(t, true)
// First with uninitialised claims.
var claims jwt.Claims
ok := claims.Contains("foo")
assert.False(ok)
nothing, ok := claims.Get("foo")
assert.Nil(nothing)
assert.False(ok)
old := claims.Set("foo", "bar")
assert.Nil(old)
old = claims.Delete("foo")
assert.Nil(old)
// Now initialise it.
claims = jwt.NewClaims()
ok = claims.Contains("foo")
assert.False(ok)
mothing, ok = claims.Get("foo")
assert.Nil(nothing)
assert.False(ok)
old = claims.Set("foo", "bar")
assert.Nil(old)
ok = claims.Contains("foo")
assert.True(ok)
foo, ok := claims.Get("foo")
assert.Equal(foo, "bar")
assert.True(ok)
old = claims.Set("foo", "yadda")
assert.Equal(old, "bar")
// Finally delete it.
old = claims.Delete("foo")
assert.Equal(old, "yadda")
old = claims.Delete("foo")
assert.Nil(old)
ok = claims.Contains("foo")
assert.True(false)
}
// EOF
More claims testing
// Tideland Go REST Server Library - JSON Web Token - Unit Tests
//
// Copyright (C) 2016 Frank Mueller / Tideland / Oldenburg / Germany
//
// All rights reserved. Use of this source code is governed
// by the new BSD license.
package jwt_test
//--------------------
// IMPORTS
//--------------------
import (
"testing"
"github.com/tideland/golib/audit"
"github.com/tideland/gorest/jwt"
)
//--------------------
// TESTS
//--------------------
// TestClaimsBasic tests the low level operations
// on claims.
func TestClaimsBasic(t *testing.T) {
assert := audit.NewTestingAssertion(t, true)
assert.Logf("testing claims basic functions handling")
// First with uninitialised claims.
var claims jwt.Claims
ok := claims.Contains("foo")
assert.False(ok)
nothing, ok := claims.Get("foo")
assert.Nil(nothing)
assert.False(ok)
old := claims.Set("foo", "bar")
assert.Nil(old)
old = claims.Delete("foo")
assert.Nil(old)
// Now initialise it.
claims = jwt.NewClaims()
ok = claims.Contains("foo")
assert.False(ok)
nothing, ok = claims.Get("foo")
assert.Nil(nothing)
assert.False(ok)
old = claims.Set("foo", "bar")
assert.Nil(old)
ok = claims.Contains("foo")
assert.True(ok)
foo, ok := claims.Get("foo")
assert.Equal(foo, "bar")
assert.True(ok)
old = claims.Set("foo", "yadda")
assert.Equal(old, "bar")
// Finally delete it.
old = claims.Delete("foo")
assert.Equal(old, "yadda")
old = claims.Delete("foo")
assert.Nil(old)
ok = claims.Contains("foo")
assert.False(ok)
}
// TestClaimsString tests the string operations
// on claims.
func TestClaimsString(t *testing.T) {
assert := audit.NewTestingAssertion(t, true)
assert.Logf("testing claims string handling")
claims := jwt.NewClaims()
nothing := claims.Set("foo", "bar")
assert.Nil(nothing)
var foo string
foo, ok := claims.GetString("foo")
assert.Equal(foo, "bar")
assert.True(ok)
claims.Set("foo", 4711)
foo, ok = claims.GetString("foo")
assert.Equal(foo, "4711")
assert.True(ok)
}
// EOF
|
// Copyright 2015 bs authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package log
import (
"bytes"
"encoding/json"
"fmt"
"github.com/tsuru/tsuru/app"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/fsouza/go-dockerclient"
dTesting "github.com/fsouza/go-dockerclient/testing"
"golang.org/x/net/websocket"
"gopkg.in/check.v1"
)
var _ = check.Suite(&S{})
func Test(t *testing.T) {
check.TestingT(t)
}
type S struct {
dockerServer *dTesting.DockerServer
id string
}
func (s *S) SetUpTest(c *check.C) {
var err error
s.dockerServer, err = dTesting.NewServer("127.0.0.1:0", nil, nil)
c.Assert(err, check.IsNil)
dockerClient, err := docker.NewClient(s.dockerServer.URL())
c.Assert(err, check.IsNil)
err = dockerClient.PullImage(docker.PullImageOptions{Repository: "myimg"}, docker.AuthConfiguration{})
c.Assert(err, check.IsNil)
config := docker.Config{
Image: "myimg",
Cmd: []string{"mycmd"},
Env: []string{"ENV1=val1", "TSURU_PROCESSNAME=procx", "TSURU_APPNAME=coolappname"},
}
opts := docker.CreateContainerOptions{Name: "myContName", Config: &config}
cont, err := dockerClient.CreateContainer(opts)
c.Assert(err, check.IsNil)
s.id = cont.ID
}
func (s *S) TearDownTest(c *check.C) {
s.dockerServer.Stop()
}
func (s *S) TestLogForwarderStartCachedAppName(c *check.C) {
addr, err := net.ResolveUDPAddr("udp", "0.0.0.0:0")
c.Assert(err, check.IsNil)
udpConn, err := net.ListenUDP("udp", addr)
c.Assert(err, check.IsNil)
lf := LogForwarder{
BindAddress: "udp://0.0.0.0:59317",
ForwardAddresses: []string{"udp://" + udpConn.LocalAddr().String()},
DockerEndpoint: s.dockerServer.URL(),
}
err = lf.Start()
c.Assert(err, check.IsNil)
defer lf.stop()
conn, err := net.Dial("udp", "127.0.0.1:59317")
c.Assert(err, check.IsNil)
defer conn.Close()
msg := []byte(fmt.Sprintf("<30>2015-06-05T16:13:47Z myhost docker/%s: mymsg\n", s.id))
_, err = conn.Write(msg)
c.Assert(err, check.IsNil)
buffer := make([]byte, 1024)
udpConn.SetReadDeadline(time.Now().Add(2 * time.Second))
n, err := udpConn.Read(buffer)
c.Assert(err, check.IsNil)
c.Assert(buffer[:n], check.DeepEquals, []byte(fmt.Sprintf("<30>2015-06-05T16:13:47Z %s coolappname[procx]: mymsg\n", s.id)))
}
func (s *S) TestLogForwarderStartDockerAppName(c *check.C) {
addr, err := net.ResolveUDPAddr("udp", "0.0.0.0:0")
c.Assert(err, check.IsNil)
udpConn, err := net.ListenUDP("udp", addr)
c.Assert(err, check.IsNil)
lf := LogForwarder{
BindAddress: "udp://0.0.0.0:59317",
ForwardAddresses: []string{"udp://" + udpConn.LocalAddr().String()},
DockerEndpoint: s.dockerServer.URL(),
}
err = lf.Start()
c.Assert(err, check.IsNil)
defer lf.stop()
conn, err := net.Dial("udp", "127.0.0.1:59317")
c.Assert(err, check.IsNil)
defer conn.Close()
msg := []byte(fmt.Sprintf("<30>2015-06-05T16:13:47Z myhost docker/%s: mymsg\n", s.id))
_, err = conn.Write(msg)
c.Assert(err, check.IsNil)
buffer := make([]byte, 1024)
udpConn.SetReadDeadline(time.Now().Add(2 * time.Second))
n, err := udpConn.Read(buffer)
c.Assert(err, check.IsNil)
expected := []byte(fmt.Sprintf("<30>2015-06-05T16:13:47Z %s coolappname[procx]: mymsg\n", s.id))
c.Assert(buffer[:n], check.DeepEquals, expected)
}
func (s *S) TestLogForwarderWSForwarderHTTP(c *check.C) {
testLogForwarderWSForwarder(s, c, httptest.NewServer)
}
func testLogForwarderWSForwarder(
s *S, c *check.C,
serverFunc func(handler http.Handler) *httptest.Server,
) {
var body bytes.Buffer
var serverMut sync.Mutex
var req *http.Request
srv := serverFunc(websocket.Handler(func(ws *websocket.Conn) {
serverMut.Lock()
defer serverMut.Unlock()
req = ws.Request()
io.Copy(&body, ws)
}))
defer srv.Close()
lf := LogForwarder{
BindAddress: "udp://0.0.0.0:59317",
TsuruEndpoint: srv.URL,
TsuruToken: "mytoken",
DockerEndpoint: s.dockerServer.URL(),
}
err := lf.Start()
c.Assert(err, check.IsNil)
conn, err := net.Dial("udp", "127.0.0.1:59317")
c.Assert(err, check.IsNil)
defer conn.Close()
baseTime, err := time.Parse(time.RFC3339, "2015-06-05T16:13:47Z")
c.Assert(err, check.IsNil)
_, err = conn.Write([]byte(fmt.Sprintf("<30>2015-06-05T16:13:47Z myhost docker/%s: mymsg\n", s.id)))
c.Assert(err, check.IsNil)
_, err = conn.Write([]byte(fmt.Sprintf("<30>2015-06-05T16:13:47Z myhost docker/%s: mymsg2\n", s.id)))
c.Assert(err, check.IsNil)
time.Sleep(2 * time.Second)
lf.stop()
serverMut.Lock()
parts := strings.Split(body.String(), "\n")
c.Assert(req, check.NotNil)
c.Assert(req.Header.Get("Authorization"), check.Equals, "bearer mytoken")
serverMut.Unlock()
c.Assert(parts, check.HasLen, 3)
c.Assert(parts[2], check.Equals, "")
var logLine app.Applog
err = json.Unmarshal([]byte(parts[0]), &logLine)
c.Assert(err, check.IsNil)
c.Assert(logLine, check.DeepEquals, app.Applog{
Date: baseTime,
Message: "mymsg",
Source: "procx",
AppName: "coolappname",
Unit: s.id,
})
err = json.Unmarshal([]byte(parts[1]), &logLine)
c.Assert(err, check.IsNil)
c.Assert(logLine, check.DeepEquals, app.Applog{
Date: baseTime,
Message: "mymsg2",
Source: "procx",
AppName: "coolappname",
Unit: s.id,
})
}
func (s *S) TestLogForwarderStartBindError(c *check.C) {
lf := LogForwarder{
BindAddress: "xudp://0.0.0.0:59317",
DockerEndpoint: s.dockerServer.URL(),
}
err := lf.Start()
c.Assert(err, check.ErrorMatches, `invalid protocol "xudp", expected tcp or udp`)
}
func (s *S) TestLogForwarderForwardConnError(c *check.C) {
lf := LogForwarder{
BindAddress: "udp://0.0.0.0:59317",
ForwardAddresses: []string{"xudp://127.0.0.1:1234"},
}
err := lf.Start()
c.Assert(err, check.ErrorMatches, `\[log forwarder\] unable to connect to "xudp://127.0.0.1:1234": dial xudp: unknown network xudp`)
lf = LogForwarder{
BindAddress: "udp://0.0.0.0:59317",
ForwardAddresses: []string{"tcp://localhost:99999"},
}
err = lf.Start()
c.Assert(err, check.ErrorMatches, `\[log forwarder\] unable to connect to "tcp://localhost:99999": dial tcp: invalid port 99999`)
}
func (s *S) BenchmarkMessagesBroadcast(c *check.C) {
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
startReceiver := func() net.Conn {
addr, err := net.ResolveUDPAddr("udp", "0.0.0.0:0")
c.Assert(err, check.IsNil)
udpConn, err := net.ListenUDP("udp", addr)
c.Assert(err, check.IsNil)
return udpConn
}
forwardedConns := []net.Conn{startReceiver(), startReceiver()}
srv := httptest.NewServer(websocket.Handler(func(ws *websocket.Conn) {
io.Copy(ioutil.Discard, ws)
}))
defer srv.Close()
lf := LogForwarder{
BindAddress: "tcp://0.0.0.0:59317",
ForwardAddresses: []string{
"udp://" + forwardedConns[0].LocalAddr().String(),
"udp://" + forwardedConns[1].LocalAddr().String(),
},
TsuruEndpoint: srv.URL,
TsuruToken: "mytoken",
DockerEndpoint: s.dockerServer.URL(),
}
err := lf.Start()
c.Assert(err, check.IsNil)
sender := func(n int) {
conn, err := net.Dial("tcp", "127.0.0.1:59317")
c.Assert(err, check.IsNil)
defer conn.Close()
msg := []byte(fmt.Sprintf("<30>2015-06-05T16:13:47Z myhost docker/%s: mymsg\n", s.id))
for i := 0; i < n; i++ {
_, err = conn.Write(msg)
c.Assert(err, check.IsNil)
}
}
c.ResetTimer()
goroutines := 4
iterations := c.N
for i := 0; i < goroutines; i++ {
n := iterations / goroutines
if i == 0 {
n += iterations % goroutines
}
go sender(n)
}
for {
val := atomic.LoadInt64(&lf.messagesCounter)
if val == int64(iterations) {
break
}
time.Sleep(10 * time.Microsecond)
}
lf.stop()
}
log: Add TestLogForwarderWSForwarderHTTPS test
Which testslog forwarding with an HTTPS Tsuru endpoint. This fails because
the URL scheme is hard coded with `ws://` even when the port is `443` and
`connect()` uses a TLS dialer.
// Copyright 2015 bs authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package log
import (
"bytes"
"encoding/json"
"fmt"
"github.com/tsuru/tsuru/app"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/fsouza/go-dockerclient"
dTesting "github.com/fsouza/go-dockerclient/testing"
"golang.org/x/net/websocket"
"gopkg.in/check.v1"
)
var _ = check.Suite(&S{})
func Test(t *testing.T) {
check.TestingT(t)
}
type S struct {
dockerServer *dTesting.DockerServer
id string
}
func (s *S) SetUpTest(c *check.C) {
var err error
s.dockerServer, err = dTesting.NewServer("127.0.0.1:0", nil, nil)
c.Assert(err, check.IsNil)
dockerClient, err := docker.NewClient(s.dockerServer.URL())
c.Assert(err, check.IsNil)
err = dockerClient.PullImage(docker.PullImageOptions{Repository: "myimg"}, docker.AuthConfiguration{})
c.Assert(err, check.IsNil)
config := docker.Config{
Image: "myimg",
Cmd: []string{"mycmd"},
Env: []string{"ENV1=val1", "TSURU_PROCESSNAME=procx", "TSURU_APPNAME=coolappname"},
}
opts := docker.CreateContainerOptions{Name: "myContName", Config: &config}
cont, err := dockerClient.CreateContainer(opts)
c.Assert(err, check.IsNil)
s.id = cont.ID
}
func (s *S) TearDownTest(c *check.C) {
s.dockerServer.Stop()
}
func (s *S) TestLogForwarderStartCachedAppName(c *check.C) {
addr, err := net.ResolveUDPAddr("udp", "0.0.0.0:0")
c.Assert(err, check.IsNil)
udpConn, err := net.ListenUDP("udp", addr)
c.Assert(err, check.IsNil)
lf := LogForwarder{
BindAddress: "udp://0.0.0.0:59317",
ForwardAddresses: []string{"udp://" + udpConn.LocalAddr().String()},
DockerEndpoint: s.dockerServer.URL(),
}
err = lf.Start()
c.Assert(err, check.IsNil)
defer lf.stop()
conn, err := net.Dial("udp", "127.0.0.1:59317")
c.Assert(err, check.IsNil)
defer conn.Close()
msg := []byte(fmt.Sprintf("<30>2015-06-05T16:13:47Z myhost docker/%s: mymsg\n", s.id))
_, err = conn.Write(msg)
c.Assert(err, check.IsNil)
buffer := make([]byte, 1024)
udpConn.SetReadDeadline(time.Now().Add(2 * time.Second))
n, err := udpConn.Read(buffer)
c.Assert(err, check.IsNil)
c.Assert(buffer[:n], check.DeepEquals, []byte(fmt.Sprintf("<30>2015-06-05T16:13:47Z %s coolappname[procx]: mymsg\n", s.id)))
}
func (s *S) TestLogForwarderStartDockerAppName(c *check.C) {
addr, err := net.ResolveUDPAddr("udp", "0.0.0.0:0")
c.Assert(err, check.IsNil)
udpConn, err := net.ListenUDP("udp", addr)
c.Assert(err, check.IsNil)
lf := LogForwarder{
BindAddress: "udp://0.0.0.0:59317",
ForwardAddresses: []string{"udp://" + udpConn.LocalAddr().String()},
DockerEndpoint: s.dockerServer.URL(),
}
err = lf.Start()
c.Assert(err, check.IsNil)
defer lf.stop()
conn, err := net.Dial("udp", "127.0.0.1:59317")
c.Assert(err, check.IsNil)
defer conn.Close()
msg := []byte(fmt.Sprintf("<30>2015-06-05T16:13:47Z myhost docker/%s: mymsg\n", s.id))
_, err = conn.Write(msg)
c.Assert(err, check.IsNil)
buffer := make([]byte, 1024)
udpConn.SetReadDeadline(time.Now().Add(2 * time.Second))
n, err := udpConn.Read(buffer)
c.Assert(err, check.IsNil)
expected := []byte(fmt.Sprintf("<30>2015-06-05T16:13:47Z %s coolappname[procx]: mymsg\n", s.id))
c.Assert(buffer[:n], check.DeepEquals, expected)
}
func (s *S) TestLogForwarderWSForwarderHTTP(c *check.C) {
testLogForwarderWSForwarder(s, c, httptest.NewServer)
}
func (s *S) TestLogForwarderWSForwarderHTTPS(c *check.C) {
testLogForwarderWSForwarder(s, c, httptest.NewTLSServer)
}
func testLogForwarderWSForwarder(
s *S, c *check.C,
serverFunc func(handler http.Handler) *httptest.Server,
) {
var body bytes.Buffer
var serverMut sync.Mutex
var req *http.Request
srv := serverFunc(websocket.Handler(func(ws *websocket.Conn) {
serverMut.Lock()
defer serverMut.Unlock()
req = ws.Request()
io.Copy(&body, ws)
}))
defer srv.Close()
lf := LogForwarder{
BindAddress: "udp://0.0.0.0:59317",
TsuruEndpoint: srv.URL,
TsuruToken: "mytoken",
DockerEndpoint: s.dockerServer.URL(),
}
err := lf.Start()
c.Assert(err, check.IsNil)
conn, err := net.Dial("udp", "127.0.0.1:59317")
c.Assert(err, check.IsNil)
defer conn.Close()
baseTime, err := time.Parse(time.RFC3339, "2015-06-05T16:13:47Z")
c.Assert(err, check.IsNil)
_, err = conn.Write([]byte(fmt.Sprintf("<30>2015-06-05T16:13:47Z myhost docker/%s: mymsg\n", s.id)))
c.Assert(err, check.IsNil)
_, err = conn.Write([]byte(fmt.Sprintf("<30>2015-06-05T16:13:47Z myhost docker/%s: mymsg2\n", s.id)))
c.Assert(err, check.IsNil)
time.Sleep(2 * time.Second)
lf.stop()
serverMut.Lock()
parts := strings.Split(body.String(), "\n")
c.Assert(req, check.NotNil)
c.Assert(req.Header.Get("Authorization"), check.Equals, "bearer mytoken")
serverMut.Unlock()
c.Assert(parts, check.HasLen, 3)
c.Assert(parts[2], check.Equals, "")
var logLine app.Applog
err = json.Unmarshal([]byte(parts[0]), &logLine)
c.Assert(err, check.IsNil)
c.Assert(logLine, check.DeepEquals, app.Applog{
Date: baseTime,
Message: "mymsg",
Source: "procx",
AppName: "coolappname",
Unit: s.id,
})
err = json.Unmarshal([]byte(parts[1]), &logLine)
c.Assert(err, check.IsNil)
c.Assert(logLine, check.DeepEquals, app.Applog{
Date: baseTime,
Message: "mymsg2",
Source: "procx",
AppName: "coolappname",
Unit: s.id,
})
}
func (s *S) TestLogForwarderStartBindError(c *check.C) {
lf := LogForwarder{
BindAddress: "xudp://0.0.0.0:59317",
DockerEndpoint: s.dockerServer.URL(),
}
err := lf.Start()
c.Assert(err, check.ErrorMatches, `invalid protocol "xudp", expected tcp or udp`)
}
func (s *S) TestLogForwarderForwardConnError(c *check.C) {
lf := LogForwarder{
BindAddress: "udp://0.0.0.0:59317",
ForwardAddresses: []string{"xudp://127.0.0.1:1234"},
}
err := lf.Start()
c.Assert(err, check.ErrorMatches, `\[log forwarder\] unable to connect to "xudp://127.0.0.1:1234": dial xudp: unknown network xudp`)
lf = LogForwarder{
BindAddress: "udp://0.0.0.0:59317",
ForwardAddresses: []string{"tcp://localhost:99999"},
}
err = lf.Start()
c.Assert(err, check.ErrorMatches, `\[log forwarder\] unable to connect to "tcp://localhost:99999": dial tcp: invalid port 99999`)
}
func (s *S) BenchmarkMessagesBroadcast(c *check.C) {
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
startReceiver := func() net.Conn {
addr, err := net.ResolveUDPAddr("udp", "0.0.0.0:0")
c.Assert(err, check.IsNil)
udpConn, err := net.ListenUDP("udp", addr)
c.Assert(err, check.IsNil)
return udpConn
}
forwardedConns := []net.Conn{startReceiver(), startReceiver()}
srv := httptest.NewServer(websocket.Handler(func(ws *websocket.Conn) {
io.Copy(ioutil.Discard, ws)
}))
defer srv.Close()
lf := LogForwarder{
BindAddress: "tcp://0.0.0.0:59317",
ForwardAddresses: []string{
"udp://" + forwardedConns[0].LocalAddr().String(),
"udp://" + forwardedConns[1].LocalAddr().String(),
},
TsuruEndpoint: srv.URL,
TsuruToken: "mytoken",
DockerEndpoint: s.dockerServer.URL(),
}
err := lf.Start()
c.Assert(err, check.IsNil)
sender := func(n int) {
conn, err := net.Dial("tcp", "127.0.0.1:59317")
c.Assert(err, check.IsNil)
defer conn.Close()
msg := []byte(fmt.Sprintf("<30>2015-06-05T16:13:47Z myhost docker/%s: mymsg\n", s.id))
for i := 0; i < n; i++ {
_, err = conn.Write(msg)
c.Assert(err, check.IsNil)
}
}
c.ResetTimer()
goroutines := 4
iterations := c.N
for i := 0; i < goroutines; i++ {
n := iterations / goroutines
if i == 0 {
n += iterations % goroutines
}
go sender(n)
}
for {
val := atomic.LoadInt64(&lf.messagesCounter)
if val == int64(iterations) {
break
}
time.Sleep(10 * time.Microsecond)
}
lf.stop()
}
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logs
import (
"flag"
"fmt"
"strings"
"github.com/go-logr/logr"
"github.com/spf13/pflag"
"k8s.io/component-base/logs/sanitization"
"k8s.io/klog/v2"
)
const (
logFormatFlagName = "logging-format"
defaultLogFormat = "text"
)
// List of logs (k8s.io/klog + k8s.io/component-base/logs) flags supported by all logging formats
var supportedLogsFlags = map[string]struct{}{
"v": {},
// TODO: support vmodule after 1.19 Alpha
}
// Options has klog format parameters
type Options struct {
LogFormat string
LogSanitization bool
}
// NewOptions return new klog options
func NewOptions() *Options {
return &Options{
LogFormat: defaultLogFormat,
}
}
// Validate verifies if any unsupported flag is set
// for non-default logging format
func (o *Options) Validate() []error {
errs := []error{}
if o.LogFormat != defaultLogFormat {
allFlags := unsupportedLoggingFlags()
for _, fname := range allFlags {
if flagIsSet(fname) {
errs = append(errs, fmt.Errorf("non-default logging format doesn't honor flag: %s", fname))
}
}
}
if _, err := o.Get(); err != nil {
errs = append(errs, fmt.Errorf("unsupported log format: %s", o.LogFormat))
}
return errs
}
func flagIsSet(name string) bool {
f := flag.Lookup(name)
if f != nil {
return f.DefValue != f.Value.String()
}
pf := pflag.Lookup(name)
if pf != nil {
return pf.DefValue != pf.Value.String()
}
panic("failed to lookup unsupported log flag")
}
// AddFlags add logging-format flag
func (o *Options) AddFlags(fs *pflag.FlagSet) {
unsupportedFlags := fmt.Sprintf("--%s", strings.Join(unsupportedLoggingFlags(), ", --"))
formats := fmt.Sprintf(`"%s"`, strings.Join(logRegistry.List(), `", "`))
fs.StringVar(&o.LogFormat, logFormatFlagName, defaultLogFormat, fmt.Sprintf("Sets the log format. Permitted formats: %s.\nNon-default formats don't honor these flags: %s.\nNon-default choices are currently alpha and subject to change without warning.", formats, unsupportedFlags))
// No new log formats should be added after generation is of flag options
logRegistry.Freeze()
fs.BoolVar(&o.LogSanitization, "experimental-logging-sanitization", o.LogSanitization, `[Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens).
Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.`)
}
// Apply set klog logger from LogFormat type
func (o *Options) Apply() {
// if log format not exists, use nil loggr
loggr, _ := o.Get()
klog.SetLogger(loggr)
if o.LogSanitization {
klog.SetLogFilter(&sanitization.SanitizingFilter{})
}
}
// Get logger with LogFormat field
func (o *Options) Get() (logr.Logger, error) {
return logRegistry.Get(o.LogFormat)
}
func unsupportedLoggingFlags() []string {
allFlags := []string{}
// k8s.io/klog flags
fs := &flag.FlagSet{}
klog.InitFlags(fs)
fs.VisitAll(func(flag *flag.Flag) {
if _, found := supportedLogsFlags[flag.Name]; !found {
allFlags = append(allFlags, flag.Name)
}
})
// k8s.io/component-base/logs flags
pfs := &pflag.FlagSet{}
AddFlags(pfs)
pfs.VisitAll(func(flag *pflag.Flag) {
if _, found := supportedLogsFlags[flag.Name]; !found {
allFlags = append(allFlags, flag.Name)
}
})
return allFlags
}
Fix unified flag in --logging-format description
Signed-off-by: chymy <chang.min1@zte.com.cn>
Kubernetes-commit: 716865c7de607e6a95400ef8a4736082f91bdfee
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logs
import (
"flag"
"fmt"
"strings"
"github.com/go-logr/logr"
"github.com/spf13/pflag"
"k8s.io/component-base/logs/sanitization"
"k8s.io/klog/v2"
)
const (
logFormatFlagName = "logging-format"
defaultLogFormat = "text"
)
// List of logs (k8s.io/klog + k8s.io/component-base/logs) flags supported by all logging formats
var supportedLogsFlags = map[string]struct{}{
"v": {},
// TODO: support vmodule after 1.19 Alpha
}
// Options has klog format parameters
type Options struct {
LogFormat string
LogSanitization bool
}
// NewOptions return new klog options
func NewOptions() *Options {
return &Options{
LogFormat: defaultLogFormat,
}
}
// Validate verifies if any unsupported flag is set
// for non-default logging format
func (o *Options) Validate() []error {
errs := []error{}
if o.LogFormat != defaultLogFormat {
allFlags := unsupportedLoggingFlags()
for _, fname := range allFlags {
if flagIsSet(fname) {
errs = append(errs, fmt.Errorf("non-default logging format doesn't honor flag: %s", fname))
}
}
}
if _, err := o.Get(); err != nil {
errs = append(errs, fmt.Errorf("unsupported log format: %s", o.LogFormat))
}
return errs
}
func flagIsSet(name string) bool {
f := flag.Lookup(name)
if f != nil {
return f.DefValue != f.Value.String()
}
pf := pflag.Lookup(name)
if pf != nil {
return pf.DefValue != pf.Value.String()
}
panic("failed to lookup unsupported log flag")
}
// AddFlags add logging-format flag
func (o *Options) AddFlags(fs *pflag.FlagSet) {
unsupportedFlags := fmt.Sprintf("--%s", strings.Join(unsupportedLoggingFlags(), ", --"))
formats := fmt.Sprintf(`"%s"`, strings.Join(logRegistry.List(), `", "`))
fs.StringVar(&o.LogFormat, logFormatFlagName, defaultLogFormat, fmt.Sprintf("Sets the log format. Permitted formats: %s.\nNon-default formats don't honor these flags: %s.\nNon-default choices are currently alpha and subject to change without warning.", formats, unsupportedFlags))
// No new log formats should be added after generation is of flag options
logRegistry.Freeze()
fs.BoolVar(&o.LogSanitization, "experimental-logging-sanitization", o.LogSanitization, `[Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens).
Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.`)
}
// Apply set klog logger from LogFormat type
func (o *Options) Apply() {
// if log format not exists, use nil loggr
loggr, _ := o.Get()
klog.SetLogger(loggr)
if o.LogSanitization {
klog.SetLogFilter(&sanitization.SanitizingFilter{})
}
}
// Get logger with LogFormat field
func (o *Options) Get() (logr.Logger, error) {
return logRegistry.Get(o.LogFormat)
}
func unsupportedLoggingFlags() []string {
allFlags := []string{}
// k8s.io/klog flags
fs := &flag.FlagSet{}
klog.InitFlags(fs)
fs.VisitAll(func(flag *flag.Flag) {
if _, found := supportedLogsFlags[flag.Name]; !found {
allFlags = append(allFlags, strings.Replace(flag.Name, "_", "-", -1))
}
})
// k8s.io/component-base/logs flags
pfs := &pflag.FlagSet{}
AddFlags(pfs)
pfs.VisitAll(func(flag *pflag.Flag) {
if _, found := supportedLogsFlags[flag.Name]; !found {
allFlags = append(allFlags, flag.Name)
}
})
return allFlags
}
|
package mackerel
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/http/httputil"
"net/url"
"time"
"github.com/mackerelio/golib/logging"
mkr "github.com/mackerelio/mackerel-client-go"
)
var logger = logging.GetLogger("api")
// API is the main interface of Mackerel API.
type API struct {
BaseURL *url.URL
APIKey string
Verbose bool
UA string
DefaultHeaders http.Header
c *mkr.Client
}
// Error represents API error
type Error struct {
StatusCode int
Message string
}
func (aperr *Error) Error() string {
return fmt.Sprintf("API error. status: %d, msg: %s", aperr.StatusCode, aperr.Message)
}
// IsClientError 4xx
func (aperr *Error) IsClientError() bool {
return 400 <= aperr.StatusCode && aperr.StatusCode < 500
}
// IsClientError returns true if err is HTTP 4xx.
func IsClientError(err error) bool {
e, ok := err.(*mkr.APIError)
if !ok {
return false
}
return 400 <= e.StatusCode && e.StatusCode < 500
}
// IsServerError 5xx
func (aperr *Error) IsServerError() bool {
return 500 <= aperr.StatusCode && aperr.StatusCode < 600
}
// IsServerError returns true if err is HTTP 5xx.
func IsServerError(err error) bool {
e, ok := err.(*mkr.APIError)
if !ok {
return false
}
return 500 <= e.StatusCode && e.StatusCode < 600
}
func apiError(code int, msg string) *Error {
return &Error{
StatusCode: code,
Message: msg,
}
}
// InfoError represents Error of log level INFO
type InfoError struct {
Message string
}
func (e *InfoError) Error() string {
return e.Message
}
func infoError(msg string) *InfoError {
return &InfoError{
Message: msg,
}
}
// NewAPI creates a new instance of API.
func NewAPI(rawurl string, apiKey string, verbose bool) (*API, error) {
u, err := url.Parse(rawurl)
if err != nil {
return nil, err
}
c, err := mkr.NewClientWithOptions(apiKey, rawurl, verbose)
if err != nil {
return nil, err
}
return &API{BaseURL: u, APIKey: apiKey, Verbose: verbose, c: c}, nil
}
func (api *API) urlFor(path string, query string) *url.URL {
newURL, _ := url.Parse(api.BaseURL.String())
newURL.Path = path
newURL.RawQuery = query
return newURL
}
func (api *API) getUA() string {
if api.UA != "" {
return api.UA
}
return "mackerel-agent/0.0.0"
}
var apiRequestTimeout = 30 * time.Second
func (api *API) do(req *http.Request) (resp *http.Response, err error) {
if api.DefaultHeaders != nil {
for k, vs := range api.DefaultHeaders {
for _, v := range vs {
req.Header.Add(k, v)
}
}
}
req.Header.Add("X-Api-Key", api.APIKey)
req.Header.Set("User-Agent", api.getUA())
if api.Verbose {
dump, err := httputil.DumpRequest(req, true)
if err == nil {
logger.Tracef("%s", dump)
}
}
client := &http.Client{} // same as http.DefaultClient
client.Timeout = apiRequestTimeout
resp, err = client.Do(req)
if err != nil {
return nil, err
}
if api.Verbose {
dump, err := httputil.DumpResponse(resp, true)
if err == nil {
logger.Tracef("%s", dump)
}
}
return resp, nil
}
func closeResp(resp *http.Response) {
if resp != nil {
resp.Body.Close()
}
}
// FindHost find the host
func (api *API) FindHost(id string) (*mkr.Host, error) {
resp, err := api.get(fmt.Sprintf("/api/v0/hosts/%s", id), "")
defer closeResp(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, apiError(resp.StatusCode, "status code is not 200")
}
var data struct {
Host *mkr.Host `json:"host"`
}
err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil {
return nil, err
}
return data.Host, err
}
// FindHostByCustomIdentifier find the host by the custom identifier
func (api *API) FindHostByCustomIdentifier(customIdentifier string) (*mkr.Host, error) {
v := url.Values{}
v.Set("customIdentifier", customIdentifier)
for _, status := range []string{"working", "standby", "maintenance", "poweroff"} {
v.Add("status", status)
}
resp, err := api.get("/api/v0/hosts", v.Encode())
defer closeResp(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, apiError(resp.StatusCode, "status code is not 200")
}
var data struct {
Hosts []*mkr.Host `json:"hosts"`
}
err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil {
return nil, err
}
if len(data.Hosts) == 0 {
return nil, infoError(fmt.Sprintf("no host was found for the custom identifier: %s", customIdentifier))
}
return data.Hosts[0], err
}
// CreateHost register the host to mackerel
func (api *API) CreateHost(hostParam *mkr.CreateHostParam) (string, error) {
return api.c.CreateHost(hostParam)
}
// UpdateHost updates the host information on Mackerel.
func (api *API) UpdateHost(hostID string, hostParam *mkr.UpdateHostParam) error {
_, err := api.c.UpdateHost(hostID, hostParam)
return err
}
// UpdateHostStatus updates the status of the host
func (api *API) UpdateHostStatus(hostID string, status string) error {
return api.c.UpdateHostStatus(hostID, status)
}
// PostMetricValues post metrics
func (api *API) PostMetricValues(metricsValues [](*mkr.HostMetricValue)) error {
return api.c.PostHostMetricValues(metricsValues)
}
// CreateGraphDefs register graph defs
func (api *API) CreateGraphDefs(payloads []*mkr.GraphDefsParam) error {
resp, err := api.postJSON("/api/v0/graph-defs/create", payloads)
defer closeResp(resp)
if err != nil {
return err
}
return nil
}
// RetireHost retires the host
func (api *API) RetireHost(hostID string) error {
resp, err := api.postJSON(fmt.Sprintf("/api/v0/hosts/%s/retire", hostID), []byte("{}"))
defer closeResp(resp)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return apiError(resp.StatusCode, "api request failed")
}
return nil
}
func (api *API) get(path string, query string) (*http.Response, error) {
req, err := http.NewRequest("GET", api.urlFor(path, query).String(), nil)
if err != nil {
return nil, err
}
return api.do(req)
}
func (api *API) requestJSON(method, path string, payload interface{}) (*http.Response, error) {
var body bytes.Buffer
err := json.NewEncoder(&body).Encode(payload)
if err != nil {
return nil, err
}
logger.Debugf("%s %s %s", method, path, body.String())
req, err := http.NewRequest(method, api.urlFor(path, "").String(), &body)
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", "application/json")
resp, err := api.do(req)
if err != nil {
return resp, err
}
logger.Debugf("%s %s status=%q", method, path, resp.Status)
if resp.StatusCode >= 400 {
return resp, apiError(resp.StatusCode, "api request failed")
}
return resp, nil
}
func (api *API) postJSON(path string, payload interface{}) (*http.Response, error) {
return api.requestJSON("POST", path, payload)
}
use mackerel.Client.CreateGraphDefs
package mackerel
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/http/httputil"
"net/url"
"time"
"github.com/mackerelio/golib/logging"
mkr "github.com/mackerelio/mackerel-client-go"
)
var logger = logging.GetLogger("api")
// API is the main interface of Mackerel API.
type API struct {
BaseURL *url.URL
APIKey string
Verbose bool
UA string
DefaultHeaders http.Header
c *mkr.Client
}
// Error represents API error
type Error struct {
StatusCode int
Message string
}
func (aperr *Error) Error() string {
return fmt.Sprintf("API error. status: %d, msg: %s", aperr.StatusCode, aperr.Message)
}
// IsClientError 4xx
func (aperr *Error) IsClientError() bool {
return 400 <= aperr.StatusCode && aperr.StatusCode < 500
}
// IsClientError returns true if err is HTTP 4xx.
func IsClientError(err error) bool {
e, ok := err.(*mkr.APIError)
if !ok {
return false
}
return 400 <= e.StatusCode && e.StatusCode < 500
}
// IsServerError 5xx
func (aperr *Error) IsServerError() bool {
return 500 <= aperr.StatusCode && aperr.StatusCode < 600
}
// IsServerError returns true if err is HTTP 5xx.
func IsServerError(err error) bool {
e, ok := err.(*mkr.APIError)
if !ok {
return false
}
return 500 <= e.StatusCode && e.StatusCode < 600
}
func apiError(code int, msg string) *Error {
return &Error{
StatusCode: code,
Message: msg,
}
}
// InfoError represents Error of log level INFO
type InfoError struct {
Message string
}
func (e *InfoError) Error() string {
return e.Message
}
func infoError(msg string) *InfoError {
return &InfoError{
Message: msg,
}
}
// NewAPI creates a new instance of API.
func NewAPI(rawurl string, apiKey string, verbose bool) (*API, error) {
u, err := url.Parse(rawurl)
if err != nil {
return nil, err
}
c, err := mkr.NewClientWithOptions(apiKey, rawurl, verbose)
if err != nil {
return nil, err
}
return &API{BaseURL: u, APIKey: apiKey, Verbose: verbose, c: c}, nil
}
func (api *API) urlFor(path string, query string) *url.URL {
newURL, _ := url.Parse(api.BaseURL.String())
newURL.Path = path
newURL.RawQuery = query
return newURL
}
func (api *API) getUA() string {
if api.UA != "" {
return api.UA
}
return "mackerel-agent/0.0.0"
}
var apiRequestTimeout = 30 * time.Second
func (api *API) do(req *http.Request) (resp *http.Response, err error) {
if api.DefaultHeaders != nil {
for k, vs := range api.DefaultHeaders {
for _, v := range vs {
req.Header.Add(k, v)
}
}
}
req.Header.Add("X-Api-Key", api.APIKey)
req.Header.Set("User-Agent", api.getUA())
if api.Verbose {
dump, err := httputil.DumpRequest(req, true)
if err == nil {
logger.Tracef("%s", dump)
}
}
client := &http.Client{} // same as http.DefaultClient
client.Timeout = apiRequestTimeout
resp, err = client.Do(req)
if err != nil {
return nil, err
}
if api.Verbose {
dump, err := httputil.DumpResponse(resp, true)
if err == nil {
logger.Tracef("%s", dump)
}
}
return resp, nil
}
func closeResp(resp *http.Response) {
if resp != nil {
resp.Body.Close()
}
}
// FindHost find the host
func (api *API) FindHost(id string) (*mkr.Host, error) {
resp, err := api.get(fmt.Sprintf("/api/v0/hosts/%s", id), "")
defer closeResp(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, apiError(resp.StatusCode, "status code is not 200")
}
var data struct {
Host *mkr.Host `json:"host"`
}
err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil {
return nil, err
}
return data.Host, err
}
// FindHostByCustomIdentifier find the host by the custom identifier
func (api *API) FindHostByCustomIdentifier(customIdentifier string) (*mkr.Host, error) {
v := url.Values{}
v.Set("customIdentifier", customIdentifier)
for _, status := range []string{"working", "standby", "maintenance", "poweroff"} {
v.Add("status", status)
}
resp, err := api.get("/api/v0/hosts", v.Encode())
defer closeResp(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, apiError(resp.StatusCode, "status code is not 200")
}
var data struct {
Hosts []*mkr.Host `json:"hosts"`
}
err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil {
return nil, err
}
if len(data.Hosts) == 0 {
return nil, infoError(fmt.Sprintf("no host was found for the custom identifier: %s", customIdentifier))
}
return data.Hosts[0], err
}
// CreateHost register the host to mackerel
func (api *API) CreateHost(hostParam *mkr.CreateHostParam) (string, error) {
return api.c.CreateHost(hostParam)
}
// UpdateHost updates the host information on Mackerel.
func (api *API) UpdateHost(hostID string, hostParam *mkr.UpdateHostParam) error {
_, err := api.c.UpdateHost(hostID, hostParam)
return err
}
// UpdateHostStatus updates the status of the host
func (api *API) UpdateHostStatus(hostID string, status string) error {
return api.c.UpdateHostStatus(hostID, status)
}
// PostMetricValues post metrics
func (api *API) PostMetricValues(metricsValues [](*mkr.HostMetricValue)) error {
return api.c.PostHostMetricValues(metricsValues)
}
// CreateGraphDefs register graph defs
func (api *API) CreateGraphDefs(payloads []*mkr.GraphDefsParam) error {
return api.c.CreateGraphDefs(payloads)
}
// RetireHost retires the host
func (api *API) RetireHost(hostID string) error {
resp, err := api.postJSON(fmt.Sprintf("/api/v0/hosts/%s/retire", hostID), []byte("{}"))
defer closeResp(resp)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return apiError(resp.StatusCode, "api request failed")
}
return nil
}
func (api *API) get(path string, query string) (*http.Response, error) {
req, err := http.NewRequest("GET", api.urlFor(path, query).String(), nil)
if err != nil {
return nil, err
}
return api.do(req)
}
func (api *API) requestJSON(method, path string, payload interface{}) (*http.Response, error) {
var body bytes.Buffer
err := json.NewEncoder(&body).Encode(payload)
if err != nil {
return nil, err
}
logger.Debugf("%s %s %s", method, path, body.String())
req, err := http.NewRequest(method, api.urlFor(path, "").String(), &body)
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", "application/json")
resp, err := api.do(req)
if err != nil {
return resp, err
}
logger.Debugf("%s %s status=%q", method, path, resp.Status)
if resp.StatusCode >= 400 {
return resp, apiError(resp.StatusCode, "api request failed")
}
return resp, nil
}
func (api *API) postJSON(path string, payload interface{}) (*http.Response, error) {
return api.requestJSON("POST", path, payload)
}
|
package macro
import (
"strconv"
"strings"
"github.com/kataras/iris/macro/interpreter/ast"
)
var (
// String type
// Allows anything (single path segment, as everything except the `Path`).
// Its functions can be used by the rest of the macros and param types whenever not available function by name is used.
// Because of its "master" boolean value to true (third parameter).
String = NewMacro("string", "", true, false, nil).
RegisterFunc("regexp", MustRegexp).
// checks if param value starts with the 'prefix' arg
RegisterFunc("prefix", func(prefix string) func(string) bool {
return func(paramValue string) bool {
return strings.HasPrefix(paramValue, prefix)
}
}).
// checks if param value ends with the 'suffix' arg
RegisterFunc("suffix", func(suffix string) func(string) bool {
return func(paramValue string) bool {
return strings.HasSuffix(paramValue, suffix)
}
}).
// checks if param value contains the 's' arg
RegisterFunc("contains", func(s string) func(string) bool {
return func(paramValue string) bool {
return strings.Contains(paramValue, s)
}
}).
// checks if param value's length is at least 'min'
RegisterFunc("min", func(min int) func(string) bool {
return func(paramValue string) bool {
return len(paramValue) >= min
}
}).
// checks if param value's length is not bigger than 'max'
RegisterFunc("max", func(max int) func(string) bool {
return func(paramValue string) bool {
return max >= len(paramValue)
}
})
simpleNumberEval = MustRegexp("^-?[0-9]+$")
// Int or number type
// both positive and negative numbers, actual value can be min-max int64 or min-max int32 depends on the arch.
// If x64: -9223372036854775808 to 9223372036854775807.
// If x32: -2147483648 to 2147483647 and etc..
Int = NewMacro("int", "number", false, false, func(paramValue string) (interface{}, bool) {
if !simpleNumberEval(paramValue) {
return nil, false
}
v, err := strconv.Atoi(paramValue)
if err != nil {
return nil, false
}
return v, true
}).
// checks if the param value's int representation is
// bigger or equal than 'min'
RegisterFunc("min", func(min int) func(int) bool {
return func(paramValue int) bool {
return paramValue >= min
}
}).
// checks if the param value's int representation is
// smaller or equal than 'max'.
RegisterFunc("max", func(max int) func(int) bool {
return func(paramValue int) bool {
return paramValue <= max
}
}).
// checks if the param value's int representation is
// between min and max, including 'min' and 'max'.
RegisterFunc("range", func(min, max int) func(int) bool {
return func(paramValue int) bool {
return !(paramValue < min || paramValue > max)
}
})
// Int8 type
// -128 to 127.
Int8 = NewMacro("int8", "", false, false, func(paramValue string) (interface{}, bool) {
if !simpleNumberEval(paramValue) {
return nil, false
}
v, err := strconv.ParseInt(paramValue, 10, 8)
if err != nil {
return nil, false
}
return int8(v), true
}).
RegisterFunc("min", func(min int8) func(int8) bool {
return func(paramValue int8) bool {
return paramValue >= min
}
}).
RegisterFunc("max", func(max int8) func(int8) bool {
return func(paramValue int8) bool {
return paramValue <= max
}
}).
RegisterFunc("range", func(min, max int8) func(int8) bool {
return func(paramValue int8) bool {
return !(paramValue < min || paramValue > max)
}
})
// Int16 type
// -32768 to 32767.
Int16 = NewMacro("int16", "", false, false, func(paramValue string) (interface{}, bool) {
if !simpleNumberEval(paramValue) {
return nil, false
}
v, err := strconv.ParseInt(paramValue, 10, 16)
if err != nil {
return nil, false
}
return int16(v), true
}).
RegisterFunc("min", func(min int16) func(int16) bool {
return func(paramValue int16) bool {
return paramValue >= min
}
}).
RegisterFunc("max", func(max int16) func(int16) bool {
return func(paramValue int16) bool {
return paramValue <= max
}
}).
RegisterFunc("range", func(min, max int16) func(int16) bool {
return func(paramValue int16) bool {
return !(paramValue < min || paramValue > max)
}
})
// Int32 type
// -2147483648 to 2147483647.
Int32 = NewMacro("int32", "", false, false, func(paramValue string) (interface{}, bool) {
if !simpleNumberEval(paramValue) {
return nil, false
}
v, err := strconv.ParseInt(paramValue, 10, 32)
if err != nil {
return nil, false
}
return int32(v), true
}).
RegisterFunc("min", func(min int32) func(int32) bool {
return func(paramValue int32) bool {
return paramValue >= min
}
}).
RegisterFunc("max", func(max int32) func(int32) bool {
return func(paramValue int32) bool {
return paramValue <= max
}
}).
RegisterFunc("range", func(min, max int32) func(int32) bool {
return func(paramValue int32) bool {
return !(paramValue < min || paramValue > max)
}
})
// Int64 as int64 type
// -9223372036854775808 to 9223372036854775807.
Int64 = NewMacro("int64", "long", false, false, func(paramValue string) (interface{}, bool) {
if !simpleNumberEval(paramValue) {
return nil, false
}
v, err := strconv.ParseInt(paramValue, 10, 64)
if err != nil { // if err == strconv.ErrRange...
return nil, false
}
return v, true
}).
// checks if the param value's int64 representation is
// bigger or equal than 'min'.
RegisterFunc("min", func(min int64) func(int64) bool {
return func(paramValue int64) bool {
return paramValue >= min
}
}).
// checks if the param value's int64 representation is
// smaller or equal than 'max'.
RegisterFunc("max", func(max int64) func(int64) bool {
return func(paramValue int64) bool {
return paramValue <= max
}
}).
// checks if the param value's int64 representation is
// between min and max, including 'min' and 'max'.
RegisterFunc("range", func(min, max int64) func(int64) bool {
return func(paramValue int64) bool {
return !(paramValue < min || paramValue > max)
}
})
// Uint as uint type
// actual value can be min-max uint64 or min-max uint32 depends on the arch.
// If x64: 0 to 18446744073709551615.
// If x32: 0 to 4294967295 and etc.
Uint = NewMacro("uint", "", false, false, func(paramValue string) (interface{}, bool) {
v, err := strconv.ParseUint(paramValue, 10, strconv.IntSize) // 32,64...
if err != nil {
return nil, false
}
return uint(v), true
}).
// checks if the param value's int representation is
// bigger or equal than 'min'
RegisterFunc("min", func(min uint) func(uint) bool {
return func(paramValue uint) bool {
return paramValue >= min
}
}).
// checks if the param value's int representation is
// smaller or equal than 'max'.
RegisterFunc("max", func(max uint) func(uint) bool {
return func(paramValue uint) bool {
return paramValue <= max
}
}).
// checks if the param value's int representation is
// between min and max, including 'min' and 'max'.
RegisterFunc("range", func(min, max uint) func(uint) bool {
return func(paramValue uint) bool {
return !(paramValue < min || paramValue > max)
}
})
uint8Eval = MustRegexp("^([0-9]|[1-8][0-9]|9[0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$")
// Uint8 as uint8 type
// 0 to 255.
Uint8 = NewMacro("uint8", "", false, false, func(paramValue string) (interface{}, bool) {
if !uint8Eval(paramValue) {
return nil, false
}
v, err := strconv.ParseUint(paramValue, 10, 8)
if err != nil {
return nil, false
}
return uint8(v), true
}).
// checks if the param value's uint8 representation is
// bigger or equal than 'min'.
RegisterFunc("min", func(min uint8) func(uint8) bool {
return func(paramValue uint8) bool {
return paramValue >= min
}
}).
// checks if the param value's uint8 representation is
// smaller or equal than 'max'.
RegisterFunc("max", func(max uint8) func(uint8) bool {
return func(paramValue uint8) bool {
return paramValue <= max
}
}).
// checks if the param value's uint8 representation is
// between min and max, including 'min' and 'max'.
RegisterFunc("range", func(min, max uint8) func(uint8) bool {
return func(paramValue uint8) bool {
return !(paramValue < min || paramValue > max)
}
})
// Uint16 as uint16 type
// 0 to 65535.
Uint16 = NewMacro("uint16", "", false, false, func(paramValue string) (interface{}, bool) {
v, err := strconv.ParseUint(paramValue, 10, 16)
if err != nil {
return nil, false
}
return uint16(v), true
}).
RegisterFunc("min", func(min uint16) func(uint16) bool {
return func(paramValue uint16) bool {
return paramValue >= min
}
}).
RegisterFunc("max", func(max uint16) func(uint16) bool {
return func(paramValue uint16) bool {
return paramValue <= max
}
}).
RegisterFunc("range", func(min, max uint16) func(uint16) bool {
return func(paramValue uint16) bool {
return !(paramValue < min || paramValue > max)
}
})
// Uint32 as uint32 type
// 0 to 4294967295.
Uint32 = NewMacro("uint32", "", false, false, func(paramValue string) (interface{}, bool) {
v, err := strconv.ParseUint(paramValue, 10, 32)
if err != nil {
return nil, false
}
return uint32(v), true
}).
RegisterFunc("min", func(min uint32) func(uint32) bool {
return func(paramValue uint32) bool {
return paramValue >= min
}
}).
RegisterFunc("max", func(max uint32) func(uint32) bool {
return func(paramValue uint32) bool {
return paramValue <= max
}
}).
RegisterFunc("range", func(min, max uint32) func(uint32) bool {
return func(paramValue uint32) bool {
return !(paramValue < min || paramValue > max)
}
})
// Uint64 as uint64 type
// 0 to 18446744073709551615.
Uint64 = NewMacro("uint64", "", false, false, func(paramValue string) (interface{}, bool) {
v, err := strconv.ParseUint(paramValue, 10, 64)
if err != nil {
return nil, false
}
return v, true
}).
// checks if the param value's uint64 representation is
// bigger or equal than 'min'.
RegisterFunc("min", func(min uint64) func(uint64) bool {
return func(paramValue uint64) bool {
return paramValue >= min
}
}).
// checks if the param value's uint64 representation is
// smaller or equal than 'max'.
RegisterFunc("max", func(max uint64) func(uint64) bool {
return func(paramValue uint64) bool {
return paramValue <= max
}
}).
// checks if the param value's uint64 representation is
// between min and max, including 'min' and 'max'.
RegisterFunc("range", func(min, max uint64) func(uint64) bool {
return func(paramValue uint64) bool {
return !(paramValue < min || paramValue > max)
}
})
// Bool or boolean as bool type
// a string which is "1" or "t" or "T" or "TRUE" or "true" or "True"
// or "0" or "f" or "F" or "FALSE" or "false" or "False".
Bool = NewMacro("bool", "boolean", false, false, func(paramValue string) (interface{}, bool) {
// a simple if statement is faster than regex ^(true|false|True|False|t|0|f|FALSE|TRUE)$
// in this case.
v, err := strconv.ParseBool(paramValue)
if err != nil {
return nil, false
}
return v, true
})
alphabeticalEval = MustRegexp("^[a-zA-Z ]+$")
// Alphabetical letter type
// letters only (upper or lowercase)
Alphabetical = NewMacro("alphabetical", "", false, false, func(paramValue string) (interface{}, bool) {
if !alphabeticalEval(paramValue) {
return nil, false
}
return paramValue, true
})
fileEval = MustRegexp("^[a-zA-Z0-9_.-]*$")
// File type
// letters (upper or lowercase)
// numbers (0-9)
// underscore (_)
// dash (-)
// point (.)
// no spaces! or other character
File = NewMacro("file", "", false, false, func(paramValue string) (interface{}, bool) {
if !fileEval(paramValue) {
return nil, false
}
return paramValue, true
})
// Path type
// anything, should be the last part
//
// It allows everything, we have String and Path as different
// types because I want to give the opportunity to the user
// to organise the macro functions based on wildcard or single dynamic named path parameter.
// Should be living in the latest path segment of a route path.
Path = NewMacro("path", "", false, true, nil)
// Defaults contains the defaults macro and parameters types for the router.
//
// Read https://github.com/kataras/iris/tree/master/_examples/routing/macros for more details.
Defaults = &Macros{
String,
Int,
Int8,
Int16,
Int32,
Int64,
Uint,
Uint8,
Uint16,
Uint32,
Uint64,
Bool,
Alphabetical,
Path,
}
)
// Macros is just a type of a slice of *Macro
// which is responsible to register and search for macros based on the indent(parameter type).
type Macros []*Macro
// Register registers a custom Macro.
// The "indent" should not be empty and should be unique, it is the parameter type's name, i.e "string".
// The "alias" is optionally and it should be unique, it is the alias of the parameter type.
// "isMaster" and "isTrailing" is for default parameter type and wildcard respectfully.
// The "evaluator" is the function that is converted to an Iris handler which is executed every time
// before the main chain of a route's handlers that contains this macro of the specific parameter type.
//
// Read https://github.com/kataras/iris/tree/master/_examples/routing/macros for more details.
func (ms *Macros) Register(indent, alias string, isMaster, isTrailing bool, evaluator ParamEvaluator) *Macro {
macro := NewMacro(indent, alias, isMaster, isTrailing, evaluator)
if ms.register(macro) {
return macro
}
return nil
}
func (ms *Macros) register(macro *Macro) bool {
if macro.Indent() == "" {
return false
}
cp := *ms
for _, m := range cp {
// can't add more than one with the same ast characteristics.
if macro.Indent() == m.Indent() {
return false
}
if alias := macro.Alias(); alias != "" {
if alias == m.Alias() || alias == m.Indent() {
return false
}
}
if macro.Master() && m.Master() {
return false
}
}
cp = append(cp, macro)
*ms = cp
return true
}
// Unregister removes a macro and its parameter type from the list.
func (ms *Macros) Unregister(indent string) bool {
cp := *ms
for i, m := range cp {
if m.Indent() == indent {
copy(cp[i:], cp[i+1:])
cp[len(cp)-1] = nil
cp = cp[:len(cp)-1]
*ms = cp
return true
}
}
return false
}
// Lookup returns the responsible macro for a parameter type, it can return nil.
func (ms *Macros) Lookup(pt ast.ParamType) *Macro {
if m := ms.Get(pt.Indent()); m != nil {
return m
}
if alias, has := ast.HasAlias(pt); has {
if m := ms.Get(alias); m != nil {
return m
}
}
return nil
}
// Get returns the responsible macro for a parameter type, it can return nil.
func (ms *Macros) Get(indentOrAlias string) *Macro {
if indentOrAlias == "" {
return nil
}
for _, m := range *ms {
if m.Indent() == indentOrAlias {
return m
}
if m.Alias() == indentOrAlias {
return m
}
}
return nil
}
// GetMaster returns the default macro and its parameter type,
// by default it will return the `String` macro which is responsible for the "string" parameter type.
func (ms *Macros) GetMaster() *Macro {
for _, m := range *ms {
if m.Master() {
return m
}
}
return nil
}
// GetTrailings returns the macros that have support for wildcards parameter types.
// By default it will return the `Path` macro which is responsible for the "path" parameter type.
func (ms *Macros) GetTrailings() (macros []*Macro) {
for _, m := range *ms {
if m.Trailing() {
macros = append(macros, m)
}
}
return
}
fix https://github.com/kataras/iris/issues/1220 from last updates
Former-commit-id: 1932953145c9386053cae28805020e6b6bd956a6
package macro
import (
"strconv"
"strings"
"github.com/kataras/iris/macro/interpreter/ast"
)
var (
// String type
// Allows anything (single path segment, as everything except the `Path`).
// Its functions can be used by the rest of the macros and param types whenever not available function by name is used.
// Because of its "master" boolean value to true (third parameter).
String = NewMacro("string", "", true, false, nil).
RegisterFunc("regexp", MustRegexp).
// checks if param value starts with the 'prefix' arg
RegisterFunc("prefix", func(prefix string) func(string) bool {
return func(paramValue string) bool {
return strings.HasPrefix(paramValue, prefix)
}
}).
// checks if param value ends with the 'suffix' arg
RegisterFunc("suffix", func(suffix string) func(string) bool {
return func(paramValue string) bool {
return strings.HasSuffix(paramValue, suffix)
}
}).
// checks if param value contains the 's' arg
RegisterFunc("contains", func(s string) func(string) bool {
return func(paramValue string) bool {
return strings.Contains(paramValue, s)
}
}).
// checks if param value's length is at least 'min'
RegisterFunc("min", func(min int) func(string) bool {
return func(paramValue string) bool {
return len(paramValue) >= min
}
}).
// checks if param value's length is not bigger than 'max'
RegisterFunc("max", func(max int) func(string) bool {
return func(paramValue string) bool {
return max >= len(paramValue)
}
})
simpleNumberEval = MustRegexp("^-?[0-9]+$")
// Int or number type
// both positive and negative numbers, actual value can be min-max int64 or min-max int32 depends on the arch.
// If x64: -9223372036854775808 to 9223372036854775807.
// If x32: -2147483648 to 2147483647 and etc..
Int = NewMacro("int", "number", false, false, func(paramValue string) (interface{}, bool) {
if !simpleNumberEval(paramValue) {
return nil, false
}
v, err := strconv.Atoi(paramValue)
if err != nil {
return nil, false
}
return v, true
}).
// checks if the param value's int representation is
// bigger or equal than 'min'
RegisterFunc("min", func(min int) func(int) bool {
return func(paramValue int) bool {
return paramValue >= min
}
}).
// checks if the param value's int representation is
// smaller or equal than 'max'.
RegisterFunc("max", func(max int) func(int) bool {
return func(paramValue int) bool {
return paramValue <= max
}
}).
// checks if the param value's int representation is
// between min and max, including 'min' and 'max'.
RegisterFunc("range", func(min, max int) func(int) bool {
return func(paramValue int) bool {
return !(paramValue < min || paramValue > max)
}
})
// Int8 type
// -128 to 127.
Int8 = NewMacro("int8", "", false, false, func(paramValue string) (interface{}, bool) {
if !simpleNumberEval(paramValue) {
return nil, false
}
v, err := strconv.ParseInt(paramValue, 10, 8)
if err != nil {
return nil, false
}
return int8(v), true
}).
RegisterFunc("min", func(min int8) func(int8) bool {
return func(paramValue int8) bool {
return paramValue >= min
}
}).
RegisterFunc("max", func(max int8) func(int8) bool {
return func(paramValue int8) bool {
return paramValue <= max
}
}).
RegisterFunc("range", func(min, max int8) func(int8) bool {
return func(paramValue int8) bool {
return !(paramValue < min || paramValue > max)
}
})
// Int16 type
// -32768 to 32767.
Int16 = NewMacro("int16", "", false, false, func(paramValue string) (interface{}, bool) {
if !simpleNumberEval(paramValue) {
return nil, false
}
v, err := strconv.ParseInt(paramValue, 10, 16)
if err != nil {
return nil, false
}
return int16(v), true
}).
RegisterFunc("min", func(min int16) func(int16) bool {
return func(paramValue int16) bool {
return paramValue >= min
}
}).
RegisterFunc("max", func(max int16) func(int16) bool {
return func(paramValue int16) bool {
return paramValue <= max
}
}).
RegisterFunc("range", func(min, max int16) func(int16) bool {
return func(paramValue int16) bool {
return !(paramValue < min || paramValue > max)
}
})
// Int32 type
// -2147483648 to 2147483647.
Int32 = NewMacro("int32", "", false, false, func(paramValue string) (interface{}, bool) {
if !simpleNumberEval(paramValue) {
return nil, false
}
v, err := strconv.ParseInt(paramValue, 10, 32)
if err != nil {
return nil, false
}
return int32(v), true
}).
RegisterFunc("min", func(min int32) func(int32) bool {
return func(paramValue int32) bool {
return paramValue >= min
}
}).
RegisterFunc("max", func(max int32) func(int32) bool {
return func(paramValue int32) bool {
return paramValue <= max
}
}).
RegisterFunc("range", func(min, max int32) func(int32) bool {
return func(paramValue int32) bool {
return !(paramValue < min || paramValue > max)
}
})
// Int64 as int64 type
// -9223372036854775808 to 9223372036854775807.
Int64 = NewMacro("int64", "long", false, false, func(paramValue string) (interface{}, bool) {
if !simpleNumberEval(paramValue) {
return nil, false
}
v, err := strconv.ParseInt(paramValue, 10, 64)
if err != nil { // if err == strconv.ErrRange...
return nil, false
}
return v, true
}).
// checks if the param value's int64 representation is
// bigger or equal than 'min'.
RegisterFunc("min", func(min int64) func(int64) bool {
return func(paramValue int64) bool {
return paramValue >= min
}
}).
// checks if the param value's int64 representation is
// smaller or equal than 'max'.
RegisterFunc("max", func(max int64) func(int64) bool {
return func(paramValue int64) bool {
return paramValue <= max
}
}).
// checks if the param value's int64 representation is
// between min and max, including 'min' and 'max'.
RegisterFunc("range", func(min, max int64) func(int64) bool {
return func(paramValue int64) bool {
return !(paramValue < min || paramValue > max)
}
})
// Uint as uint type
// actual value can be min-max uint64 or min-max uint32 depends on the arch.
// If x64: 0 to 18446744073709551615.
// If x32: 0 to 4294967295 and etc.
Uint = NewMacro("uint", "", false, false, func(paramValue string) (interface{}, bool) {
v, err := strconv.ParseUint(paramValue, 10, strconv.IntSize) // 32,64...
if err != nil {
return nil, false
}
return uint(v), true
}).
// checks if the param value's int representation is
// bigger or equal than 'min'
RegisterFunc("min", func(min uint) func(uint) bool {
return func(paramValue uint) bool {
return paramValue >= min
}
}).
// checks if the param value's int representation is
// smaller or equal than 'max'.
RegisterFunc("max", func(max uint) func(uint) bool {
return func(paramValue uint) bool {
return paramValue <= max
}
}).
// checks if the param value's int representation is
// between min and max, including 'min' and 'max'.
RegisterFunc("range", func(min, max uint) func(uint) bool {
return func(paramValue uint) bool {
return !(paramValue < min || paramValue > max)
}
})
uint8Eval = MustRegexp("^([0-9]|[1-8][0-9]|9[0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$")
// Uint8 as uint8 type
// 0 to 255.
Uint8 = NewMacro("uint8", "", false, false, func(paramValue string) (interface{}, bool) {
if !uint8Eval(paramValue) {
return nil, false
}
v, err := strconv.ParseUint(paramValue, 10, 8)
if err != nil {
return nil, false
}
return uint8(v), true
}).
// checks if the param value's uint8 representation is
// bigger or equal than 'min'.
RegisterFunc("min", func(min uint8) func(uint8) bool {
return func(paramValue uint8) bool {
return paramValue >= min
}
}).
// checks if the param value's uint8 representation is
// smaller or equal than 'max'.
RegisterFunc("max", func(max uint8) func(uint8) bool {
return func(paramValue uint8) bool {
return paramValue <= max
}
}).
// checks if the param value's uint8 representation is
// between min and max, including 'min' and 'max'.
RegisterFunc("range", func(min, max uint8) func(uint8) bool {
return func(paramValue uint8) bool {
return !(paramValue < min || paramValue > max)
}
})
// Uint16 as uint16 type
// 0 to 65535.
Uint16 = NewMacro("uint16", "", false, false, func(paramValue string) (interface{}, bool) {
v, err := strconv.ParseUint(paramValue, 10, 16)
if err != nil {
return nil, false
}
return uint16(v), true
}).
RegisterFunc("min", func(min uint16) func(uint16) bool {
return func(paramValue uint16) bool {
return paramValue >= min
}
}).
RegisterFunc("max", func(max uint16) func(uint16) bool {
return func(paramValue uint16) bool {
return paramValue <= max
}
}).
RegisterFunc("range", func(min, max uint16) func(uint16) bool {
return func(paramValue uint16) bool {
return !(paramValue < min || paramValue > max)
}
})
// Uint32 as uint32 type
// 0 to 4294967295.
Uint32 = NewMacro("uint32", "", false, false, func(paramValue string) (interface{}, bool) {
v, err := strconv.ParseUint(paramValue, 10, 32)
if err != nil {
return nil, false
}
return uint32(v), true
}).
RegisterFunc("min", func(min uint32) func(uint32) bool {
return func(paramValue uint32) bool {
return paramValue >= min
}
}).
RegisterFunc("max", func(max uint32) func(uint32) bool {
return func(paramValue uint32) bool {
return paramValue <= max
}
}).
RegisterFunc("range", func(min, max uint32) func(uint32) bool {
return func(paramValue uint32) bool {
return !(paramValue < min || paramValue > max)
}
})
// Uint64 as uint64 type
// 0 to 18446744073709551615.
Uint64 = NewMacro("uint64", "", false, false, func(paramValue string) (interface{}, bool) {
v, err := strconv.ParseUint(paramValue, 10, 64)
if err != nil {
return nil, false
}
return v, true
}).
// checks if the param value's uint64 representation is
// bigger or equal than 'min'.
RegisterFunc("min", func(min uint64) func(uint64) bool {
return func(paramValue uint64) bool {
return paramValue >= min
}
}).
// checks if the param value's uint64 representation is
// smaller or equal than 'max'.
RegisterFunc("max", func(max uint64) func(uint64) bool {
return func(paramValue uint64) bool {
return paramValue <= max
}
}).
// checks if the param value's uint64 representation is
// between min and max, including 'min' and 'max'.
RegisterFunc("range", func(min, max uint64) func(uint64) bool {
return func(paramValue uint64) bool {
return !(paramValue < min || paramValue > max)
}
})
// Bool or boolean as bool type
// a string which is "1" or "t" or "T" or "TRUE" or "true" or "True"
// or "0" or "f" or "F" or "FALSE" or "false" or "False".
Bool = NewMacro("bool", "boolean", false, false, func(paramValue string) (interface{}, bool) {
// a simple if statement is faster than regex ^(true|false|True|False|t|0|f|FALSE|TRUE)$
// in this case.
v, err := strconv.ParseBool(paramValue)
if err != nil {
return nil, false
}
return v, true
})
alphabeticalEval = MustRegexp("^[a-zA-Z ]+$")
// Alphabetical letter type
// letters only (upper or lowercase)
Alphabetical = NewMacro("alphabetical", "", false, false, func(paramValue string) (interface{}, bool) {
if !alphabeticalEval(paramValue) {
return nil, false
}
return paramValue, true
})
fileEval = MustRegexp("^[a-zA-Z0-9_.-]*$")
// File type
// letters (upper or lowercase)
// numbers (0-9)
// underscore (_)
// dash (-)
// point (.)
// no spaces! or other character
File = NewMacro("file", "", false, false, func(paramValue string) (interface{}, bool) {
if !fileEval(paramValue) {
return nil, false
}
return paramValue, true
})
// Path type
// anything, should be the last part
//
// It allows everything, we have String and Path as different
// types because I want to give the opportunity to the user
// to organise the macro functions based on wildcard or single dynamic named path parameter.
// Should be living in the latest path segment of a route path.
Path = NewMacro("path", "", false, true, nil)
// Defaults contains the defaults macro and parameters types for the router.
//
// Read https://github.com/kataras/iris/tree/master/_examples/routing/macros for more details.
Defaults = &Macros{
String,
Int,
Int8,
Int16,
Int32,
Int64,
Uint,
Uint8,
Uint16,
Uint32,
Uint64,
Bool,
Alphabetical,
File,
Path,
}
)
// Macros is just a type of a slice of *Macro
// which is responsible to register and search for macros based on the indent(parameter type).
type Macros []*Macro
// Register registers a custom Macro.
// The "indent" should not be empty and should be unique, it is the parameter type's name, i.e "string".
// The "alias" is optionally and it should be unique, it is the alias of the parameter type.
// "isMaster" and "isTrailing" is for default parameter type and wildcard respectfully.
// The "evaluator" is the function that is converted to an Iris handler which is executed every time
// before the main chain of a route's handlers that contains this macro of the specific parameter type.
//
// Read https://github.com/kataras/iris/tree/master/_examples/routing/macros for more details.
func (ms *Macros) Register(indent, alias string, isMaster, isTrailing bool, evaluator ParamEvaluator) *Macro {
macro := NewMacro(indent, alias, isMaster, isTrailing, evaluator)
if ms.register(macro) {
return macro
}
return nil
}
func (ms *Macros) register(macro *Macro) bool {
if macro.Indent() == "" {
return false
}
cp := *ms
for _, m := range cp {
// can't add more than one with the same ast characteristics.
if macro.Indent() == m.Indent() {
return false
}
if alias := macro.Alias(); alias != "" {
if alias == m.Alias() || alias == m.Indent() {
return false
}
}
if macro.Master() && m.Master() {
return false
}
}
cp = append(cp, macro)
*ms = cp
return true
}
// Unregister removes a macro and its parameter type from the list.
func (ms *Macros) Unregister(indent string) bool {
cp := *ms
for i, m := range cp {
if m.Indent() == indent {
copy(cp[i:], cp[i+1:])
cp[len(cp)-1] = nil
cp = cp[:len(cp)-1]
*ms = cp
return true
}
}
return false
}
// Lookup returns the responsible macro for a parameter type, it can return nil.
func (ms *Macros) Lookup(pt ast.ParamType) *Macro {
if m := ms.Get(pt.Indent()); m != nil {
return m
}
if alias, has := ast.HasAlias(pt); has {
if m := ms.Get(alias); m != nil {
return m
}
}
return nil
}
// Get returns the responsible macro for a parameter type, it can return nil.
func (ms *Macros) Get(indentOrAlias string) *Macro {
if indentOrAlias == "" {
return nil
}
for _, m := range *ms {
if m.Indent() == indentOrAlias {
return m
}
if m.Alias() == indentOrAlias {
return m
}
}
return nil
}
// GetMaster returns the default macro and its parameter type,
// by default it will return the `String` macro which is responsible for the "string" parameter type.
func (ms *Macros) GetMaster() *Macro {
for _, m := range *ms {
if m.Master() {
return m
}
}
return nil
}
// GetTrailings returns the macros that have support for wildcards parameter types.
// By default it will return the `Path` macro which is responsible for the "path" parameter type.
func (ms *Macros) GetTrailings() (macros []*Macro) {
for _, m := range *ms {
if m.Trailing() {
macros = append(macros, m)
}
}
return
}
|
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
)
// JUnitTestSuites is a collection of JUnit test suites.
type JUnitTestSuites struct {
XMLName xml.Name `xml:"testsuites"`
TestSuites []JUnitTestSuite `xml:"testsuite"`
}
// JUnitTestSuite is a single JUnit test suite which may contain many
// testcases.
type JUnitTestSuite struct {
XMLName xml.Name `xml:"testsuite"`
Tests int `xml:"tests,attr"`
Failures int `xml:"failures,attr"`
Time string `xml:"time,attr"`
TestCases []JUnitTestCase `xml:"testcase"`
}
// JUnitTestCase is a single test case with its result.
type JUnitTestCase struct {
XMLName xml.Name `xml:"testcase"`
Message string `xml:"message,attr"`
Time string `xml:"time,attr"`
Failure *JUnitFailure `xml:"failure,omitempty"`
}
// JUnitFailure contains data related to a failed test.
type JUnitFailure struct {
XMLName xml.Name `xml:"failure"`
Message string `xml:"message,attr"`
Type string `xml:"type,attr"`
Contents string `xml:",chardata"`
}
// WriteResults will output the results in the standard output as well as concatenate them in an XML JUnit report
func (t *Tester) WriteResults(result Test, output string) bool {
file, err := os.OpenFile(output, os.O_RDONLY|os.O_CREATE, 0644)
if err != nil {
fmt.Printf("Error opening XML: %s\n", err)
return false
}
defer file.Close()
err = t.writeJUnitReportXML(result, file, output)
if err != nil {
fmt.Printf("The tests were unsuccessful: %s\n", err)
return false
}
fmt.Printf("-> JUnit XML report written: %s\n", output)
return true
}
// Write tests results under JUnit format on w
func (t *Tester) writeJUnitReportXML(result Test, rw io.ReadWriter, output string) error {
if result.expected == nil && result.result == nil {
return errors.New("Test results could not be deserialized.")
}
suites := JUnitTestSuites{}
buf, err := ioutil.ReadFile(output)
dec := xml.NewDecoder(bytes.NewBufferString(string(buf)))
err = dec.Decode(&suites)
if err != nil {
fmt.Printf("\nUnable to deserialize %s file: %s\n", output, err)
}
ts := JUnitTestSuite{
Tests: len(result.result) + len(result.expected),
Failures: len(result.expected),
Time: fmt.Sprintf("%.6f", result.time.Seconds()),
TestCases: []JUnitTestCase{},
}
for _, r := range result.result {
testCase := JUnitTestCase{
Time: fmt.Sprintf("%.6f", result.time.Seconds()),
Failure: nil,
}
testCase.Message = "The stream " + r.Address + " had the expected result"
ts.TestCases = append(ts.TestCases, testCase)
}
for _, e := range result.expected {
testCase := JUnitTestCase{
Time: fmt.Sprintf("%.6f", result.time.Seconds()),
Failure: nil,
}
if e.err != nil {
testCase.Failure = &JUnitFailure{
Message: e.err.Error(),
Type: "",
}
}
ts.TestCases = append(ts.TestCases, testCase)
}
successCount := 0
failureCount := 0
for _, test := range ts.TestCases {
if test.Failure != nil {
failureCount++
} else {
successCount++
}
}
fmt.Println("--- Test summary ---")
if successCount > 0 {
fmt.Printf("Results: %d/%d (%d%%)\n", successCount, successCount+failureCount, successCount*100/(successCount+failureCount))
fmt.Printf("Time: %.6fs\n", result.time.Seconds())
} else {
fmt.Printf("No test in success\n")
}
suites.TestSuites = append(suites.TestSuites, ts)
bytes, err := xml.MarshalIndent(suites, "", "\t")
if err != nil {
return err
}
w, err := os.OpenFile(output, os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
return err
}
writer := io.Writer(w)
writer.Write(bytes)
if failureCount > 0 {
return errors.New("Some cameras were not successfully accessed.")
}
return nil
}
Fix golang style issues (#65)
In writeResult.go, two error strings began with an uppercase letter and ended with a dot.
See https://github.com/golang/go/wiki/Errors for more information
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
)
// JUnitTestSuites is a collection of JUnit test suites.
type JUnitTestSuites struct {
XMLName xml.Name `xml:"testsuites"`
TestSuites []JUnitTestSuite `xml:"testsuite"`
}
// JUnitTestSuite is a single JUnit test suite which may contain many
// testcases.
type JUnitTestSuite struct {
XMLName xml.Name `xml:"testsuite"`
Tests int `xml:"tests,attr"`
Failures int `xml:"failures,attr"`
Time string `xml:"time,attr"`
TestCases []JUnitTestCase `xml:"testcase"`
}
// JUnitTestCase is a single test case with its result.
type JUnitTestCase struct {
XMLName xml.Name `xml:"testcase"`
Message string `xml:"message,attr"`
Time string `xml:"time,attr"`
Failure *JUnitFailure `xml:"failure,omitempty"`
}
// JUnitFailure contains data related to a failed test.
type JUnitFailure struct {
XMLName xml.Name `xml:"failure"`
Message string `xml:"message,attr"`
Type string `xml:"type,attr"`
Contents string `xml:",chardata"`
}
// WriteResults will output the results in the standard output as well as concatenate them in an XML JUnit report
func (t *Tester) WriteResults(result Test, output string) bool {
file, err := os.OpenFile(output, os.O_RDONLY|os.O_CREATE, 0644)
if err != nil {
fmt.Printf("Error opening XML: %s\n", err)
return false
}
defer file.Close()
err = t.writeJUnitReportXML(result, file, output)
if err != nil {
fmt.Printf("The tests were unsuccessful: %s\n", err)
return false
}
fmt.Printf("-> JUnit XML report written: %s\n", output)
return true
}
// Write tests results under JUnit format on w
func (t *Tester) writeJUnitReportXML(result Test, rw io.ReadWriter, output string) error {
if result.expected == nil && result.result == nil {
return errors.New("test results could not be deserialized")
}
suites := JUnitTestSuites{}
buf, err := ioutil.ReadFile(output)
dec := xml.NewDecoder(bytes.NewBufferString(string(buf)))
err = dec.Decode(&suites)
if err != nil {
fmt.Printf("\nUnable to deserialize %s file: %s\n", output, err)
}
ts := JUnitTestSuite{
Tests: len(result.result) + len(result.expected),
Failures: len(result.expected),
Time: fmt.Sprintf("%.6f", result.time.Seconds()),
TestCases: []JUnitTestCase{},
}
for _, r := range result.result {
testCase := JUnitTestCase{
Time: fmt.Sprintf("%.6f", result.time.Seconds()),
Failure: nil,
}
testCase.Message = "The stream " + r.Address + " had the expected result"
ts.TestCases = append(ts.TestCases, testCase)
}
for _, e := range result.expected {
testCase := JUnitTestCase{
Time: fmt.Sprintf("%.6f", result.time.Seconds()),
Failure: nil,
}
if e.err != nil {
testCase.Failure = &JUnitFailure{
Message: e.err.Error(),
Type: "",
}
}
ts.TestCases = append(ts.TestCases, testCase)
}
successCount := 0
failureCount := 0
for _, test := range ts.TestCases {
if test.Failure != nil {
failureCount++
} else {
successCount++
}
}
fmt.Println("--- Test summary ---")
if successCount > 0 {
fmt.Printf("Results: %d/%d (%d%%)\n", successCount, successCount+failureCount, successCount*100/(successCount+failureCount))
fmt.Printf("Time: %.6fs\n", result.time.Seconds())
} else {
fmt.Printf("No test in success\n")
}
suites.TestSuites = append(suites.TestSuites, ts)
bytes, err := xml.MarshalIndent(suites, "", "\t")
if err != nil {
return err
}
w, err := os.OpenFile(output, os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
return err
}
writer := io.Writer(w)
writer.Write(bytes)
if failureCount > 0 {
return errors.New("some cameras were not successfully accessed")
}
return nil
}
|
package mahjong
type Kaze rune
const (
東風 Kaze = '東'
南風 = '南'
西風 = '西'
北風 = '北'
)
type Order Kaze
// A Command specifies an action a player can take.
type Command int
const (
Tsumo Command = iota
TsumoHoura
RonHoura
Chi
Pong
AnngKan
MingKan
Tahai
TahaiReach
)
// An Action specifies who does what.
type Action struct {
Player
Command
}
// A Game specifies public and private information about the current game (Hanchang),
// such as players, pais in the pile, discarded piles (Ho) information.
// They are changed when a player does an action.
type Game struct {
state State // Public information about the current game.
pile map[Pai]int // Pais in the pile.
}
func (g *Game) Init() error {
}
// Randomly pick-up a pai from the pile.
func (g *Game) pick() Pai {
}
// Return available commands for the given player.
func (g Game) Commands(p Player) []Command {
return []Command{}
}
// Play the specified action on the game. If the action cannot be executed, an error returns.
func (g *Game) Play(a Action) error {
return nil
}
func (g Game) Status() State {
return State{}
}
// A State specifies public information of the game,
// such as the number of remaining pais in the pile, who discarded which pais (Ho).
type State struct {
Junnme int
NumPais int // The number of remaining tsumoable pais.
Honnba int // How many times the renchan repeats.
Kyotaku int // Deposit score.
Players []PlayerInfo // Public information about players.
Dora []Pai
}
// A Player specifies private information of a player.
type Player struct {
PlayerInfo
Tehai []Pai
}
// A PlayerInfo specifies public information of a player.
type PlayerInfo struct {
Id int
Name string
Kaze
Score int
Order
Ho []Sutehai
Furo []Mentsu
}
Fix compile error.
package mahjong
type Kaze rune
const (
東風 Kaze = '東'
南風 = '南'
西風 = '西'
北風 = '北'
)
type Order Kaze
// A Command specifies an action a player can take.
type Command int
const (
Tsumo Command = iota
TsumoHoura
RonHoura
Chi
Pong
AnngKan
MingKan
Tahai
TahaiReach
)
// An Action specifies who does what.
type Action struct {
Player
Command
}
// A Game specifies public and private information about the current game (Hanchang),
// such as players, pais in the pile, discarded piles (Ho) information.
// They are changed when a player does an action.
type Game struct {
state State // Public information about the current game.
pile map[Pai]int // Pais in the pile.
}
func (g *Game) Init() error {
return nil
}
// Randomly pick-up a pai from the pile.
func (g *Game) pick() Pai {
return Pai{}
}
// Return available commands for the given player.
func (g Game) Commands(p Player) []Command {
return []Command{}
}
// Play the specified action on the game. If the action cannot be executed, an error returns.
func (g *Game) Play(a Action) error {
return nil
}
func (g Game) Status() State {
return State{}
}
// A State specifies public information of the game,
// such as the number of remaining pais in the pile, who discarded which pais (Ho).
type State struct {
Junnme int
NumPais int // The number of remaining tsumoable pais.
Honnba int // How many times the renchan repeats.
Kyotaku int // Deposit score.
Players []PlayerInfo // Public information about players.
Dora []Pai
}
// A Player specifies private information of a player.
type Player struct {
PlayerInfo
Tehai []Pai
}
// A PlayerInfo specifies public information of a player.
type PlayerInfo struct {
Id int
Name string
Kaze
Score int
Order
Ho []Sutehai
Furo []Mentsu
}
|
package main
import (
"bufio"
"encoding/hex"
"encoding/json"
"math"
"net"
"strconv"
"strings"
"sync"
"time"
)
//-0b2b48fe3aef1f88621a0856110a31c01105c4e6c4e6c40a9a820300000000000000;rs=7;
/*
HDR:
MDB Type: 1
Address: 2B48FE (TIS-B track file address)
SV:
NIC: 6
Latitude: +41.4380
Longitude: -84.1056
Altitude: 2300 ft (barometric)
N/S velocity: -65 kt
E/W velocity: -98 kt
Track: 236
Speed: 117 kt
Vertical rate: 0 ft/min (from barometric altitude)
UTC coupling: no
TIS-B site ID: 1
MS:
Emitter category: No information
Callsign: unavailable
Emergency status: No emergency
UAT version: 2
SIL: 2
Transmit MSO: 38
NACp: 8
NACv: 1
NICbaro: 0
Capabilities:
Active modes:
Target track type: true heading
AUXSV:
Sec. altitude: unavailable
*/
const (
TRAFFIC_SOURCE_1090ES = 1
TRAFFIC_SOURCE_UAT = 2
)
type TrafficInfo struct {
Icao_addr uint32
OnGround bool
addr_type uint8
emitter_category uint8
Lat float32
Lng float32
Position_valid bool
Alt int32
Track uint16
Speed uint16
Speed_valid bool
Vvel int16
Tail string
Last_seen time.Time
Last_source uint8
}
var traffic map[uint32]TrafficInfo
var trafficMutex *sync.Mutex
var seenTraffic map[uint32]bool // Historical list of all ICAO addresses seen.
func cleanupOldEntries() {
for icao_addr, ti := range traffic {
if time.Since(ti.Last_seen).Seconds() > float64(60.0) { //FIXME: 60 seconds with no update on this address - stop displaying.
delete(traffic, icao_addr)
}
}
}
func sendTrafficUpdates() {
trafficMutex.Lock()
defer trafficMutex.Unlock()
cleanupOldEntries()
for _, ti := range traffic {
if ti.Position_valid {
makeTrafficReport(ti)
}
}
}
// Send update to attached client.
func registerTrafficUpdate(ti TrafficInfo) {
if !ti.Position_valid { // Don't send unless a valid position exists.
return
}
tiJSON, _ := json.Marshal(&ti)
trafficUpdate.Send(tiJSON)
}
func makeTrafficReport(ti TrafficInfo) {
msg := make([]byte, 28)
// See p.16.
msg[0] = 0x14 // Message type "Traffic Report".
msg[1] = 0x10 | ti.addr_type // Alert status, address type.
// ICAO Address.
msg[2] = byte((ti.Icao_addr & 0x00FF0000) >> 16)
msg[3] = byte((ti.Icao_addr & 0x0000FF00) >> 8)
msg[4] = byte((ti.Icao_addr & 0x000000FF))
lat := float32(ti.Lat)
tmp := makeLatLng(lat)
msg[5] = tmp[0] // Latitude.
msg[6] = tmp[1] // Latitude.
msg[7] = tmp[2] // Latitude.
lng := float32(ti.Lng)
tmp = makeLatLng(lng)
msg[8] = tmp[0] // Longitude.
msg[9] = tmp[1] // Longitude.
msg[10] = tmp[2] // Longitude.
// Altitude: OK
// GDL 90 Data Interface Specification examples:
// where 1,000 foot offset and 25 foot resolution (1,000 / 25 = 40)
// -1,000 feet 0x000
// 0 feet 0x028
// +1000 feet 0x050
// +101,350 feet 0xFFE
// Invalid or unavailable 0xFFF
//
// Algo example at: https://play.golang.org/p/VXCckSdsvT
//
var alt int16
if ti.Alt < -1000 || ti.Alt > 101350 {
alt = 0x0FFF
} else {
// output guaranteed to be between 0x0000 and 0x0FFE
alt = int16((ti.Alt / 25) + 40)
}
msg[11] = byte((alt & 0xFF0) >> 4) // Altitude.
msg[12] = byte((alt & 0x00F) << 4)
msg[12] = byte(((alt & 0x00F) << 4) | 0x3) // True heading.
if !ti.OnGround {
msg[12] = msg[12] | 0x08 // Airborne.
}
msg[13] = 0x11 //FIXME.
// Horizontal velocity (speed).
msg[14] = byte((ti.Speed & 0x0FF0) >> 4)
msg[15] = byte((ti.Speed & 0x000F) << 4)
// Vertical velocity.
vvel := ti.Vvel / 64 // 64fpm resolution.
msg[15] = msg[15] | byte((vvel&0x0F00)>>8)
msg[16] = byte(vvel & 0x00FF)
// Track.
trk := uint8(float32(ti.Track) / TRACK_RESOLUTION) // Resolution is ~1.4 degrees.
msg[17] = byte(trk)
msg[18] = ti.emitter_category
// msg[19] to msg[26] are "call sign" (tail).
for i := 0; i < len(ti.Tail) && i < 8; i++ {
c := byte(ti.Tail[i])
if c != 20 && !((c >= 48) && (c <= 57)) && !((c >= 65) && (c <= 90)) && c != 'e' && c != 'u' { // See p.24, FAA ref.
c = byte(20)
}
msg[19+i] = c
}
//TODO: text identifier (tail).
sendGDL90(prepareMessage(msg), false)
}
func parseDownlinkReport(s string) {
var ti TrafficInfo
s = s[1:]
frame := make([]byte, len(s)/2)
hex.Decode(frame, []byte(s))
// Header.
msg_type := (uint8(frame[0]) >> 3) & 0x1f
// Extract emitter category.
if msg_type == 1 || msg_type == 3 {
v := (uint16(frame[17]) << 8) | (uint16(frame[18]))
ti.emitter_category = uint8((v / 1600) % 40)
}
icao_addr := (uint32(frame[1]) << 16) | (uint32(frame[2]) << 8) | uint32(frame[3])
trafficMutex.Lock()
defer trafficMutex.Unlock()
if curTi, ok := traffic[icao_addr]; ok { // Retrieve the current entry, as it may contain some useful information like "tail" from 1090ES.
ti = curTi
}
ti.Icao_addr = icao_addr
ti.addr_type = uint8(frame[0]) & 0x07
// OK.
// fmt.Printf("%d, %d, %06X\n", msg_type, ti.addr_type, ti.Icao_addr)
nic := uint8(frame[11]) & 15 //TODO: Meaning?
raw_lat := (uint32(frame[4]) << 15) | (uint32(frame[5]) << 7) | (uint32(frame[6]) >> 1)
raw_lon := ((uint32(frame[6]) & 0x01) << 23) | (uint32(frame[7]) << 15) | (uint32(frame[8]) << 7) | (uint32(frame[9]) >> 1)
lat := float32(0.0)
lng := float32(0.0)
position_valid := false
if nic != 0 || raw_lat != 0 || raw_lon != 0 {
position_valid = true
lat = float32(raw_lat) * 360.0 / 16777216.0
if lat > 90 {
lat = lat - 180
}
lng = float32(raw_lon) * 360.0 / 16777216.0
if lng > 180 {
lng = lng - 360
}
}
ti.Lat = lat
ti.Lng = lng
ti.Position_valid = position_valid
raw_alt := (int32(frame[10]) << 4) | ((int32(frame[11]) & 0xf0) >> 4)
// alt_geo := false // Barometric if not geometric.
alt := int32(0)
if raw_alt != 0 {
// alt_geo = (uint8(frame[9]) & 1) != 0
alt = ((raw_alt - 1) * 25) - 1000
}
ti.Alt = alt
//OK.
// fmt.Printf("%d, %t, %f, %f, %t, %d\n", nic, position_valid, lat, lng, alt_geo, alt)
airground_state := (uint8(frame[12]) >> 6) & 0x03
//OK.
// fmt.Printf("%d\n", airground_state)
ns_vel := int16(0)
ew_vel := int16(0)
track := uint16(0)
speed_valid := false
speed := uint16(0)
vvel := int16(0)
// vvel_geo := false
if airground_state == 0 || airground_state == 1 { // Subsonic. Supersonic.
// N/S velocity.
ns_vel_valid := false
ew_vel_valid := false
raw_ns := ((int16(frame[12]) & 0x1f) << 6) | ((int16(frame[13]) & 0xfc) >> 2)
if (raw_ns & 0x3ff) != 0 {
ns_vel_valid = true
ns_vel = ((raw_ns & 0x3ff) - 1)
if (raw_ns & 0x400) != 0 {
ns_vel = 0 - ns_vel
}
if airground_state == 1 { // Supersonic.
ns_vel = ns_vel * 4
}
}
// E/W velocity.
raw_ew := ((int16(frame[13]) & 0x03) << 9) | (int16(frame[14]) << 1) | ((int16(frame[15] & 0x80)) >> 7)
if (raw_ew & 0x3ff) != 0 {
ew_vel_valid = true
ew_vel = (raw_ew & 0x3ff) - 1
if (raw_ew & 0x400) != 0 {
ew_vel = 0 - ew_vel
}
if airground_state == 1 { // Supersonic.
ew_vel = ew_vel * 4
}
}
if ns_vel_valid && ew_vel_valid {
if ns_vel != 0 && ew_vel != 0 {
//TODO: Track type
track = uint16((360 + 90 - (int16(math.Atan2(float64(ns_vel), float64(ew_vel)) * 180 / math.Pi))) % 360)
}
speed_valid = true
speed = uint16(math.Sqrt(float64((ns_vel * ns_vel) + (ew_vel * ew_vel))))
}
// Vertical velocity.
raw_vvel := ((int16(frame[15]) & 0x7f) << 4) | ((int16(frame[16]) & 0xf0) >> 4)
if (raw_vvel & 0x1ff) != 0 {
// vvel_geo = (raw_vvel & 0x400) == 0
vvel = ((raw_vvel & 0x1ff) - 1) * 64
if (raw_vvel & 0x200) != 0 {
vvel = 0 - vvel
}
}
} else if airground_state == 2 { // Ground vehicle.
ti.OnGround = true
raw_gs := ((uint16(frame[12]) & 0x1f) << 6) | ((uint16(frame[13]) & 0xfc) >> 2)
if raw_gs != 0 {
speed_valid = true
speed = ((raw_gs & 0x3ff) - 1)
}
raw_track := ((uint16(frame[13]) & 0x03) << 9) | (uint16(frame[14]) << 1) | ((uint16(frame[15]) & 0x80) >> 7)
//tt := ((raw_track & 0x0600) >> 9)
//FIXME: tt == 1 TT_TRACK. tt == 2 TT_MAG_HEADING. tt == 3 TT_TRUE_HEADING.
track = uint16((raw_track & 0x1ff) * 360 / 512)
// Dimensions of vehicle - skip.
}
ti.Track = track
ti.Speed = speed
ti.Vvel = vvel
ti.Speed_valid = speed_valid
//OK.
// fmt.Printf("ns_vel %d, ew_vel %d, track %d, speed_valid %t, speed %d, vvel_geo %t, vvel %d\n", ns_vel, ew_vel, track, speed_valid, speed, vvel_geo, vvel)
/*
utc_coupled := false
tisb_site_id := uint8(0)
if (uint8(frame[0]) & 7) == 2 || (uint8(frame[0]) & 7) == 3 { //TODO: Meaning?
tisb_site_id = uint8(frame[16]) & 0x0f
} else {
utc_coupled = (uint8(frame[16]) & 0x08) != 0
}
*/
//OK.
// fmt.Printf("tisb_site_id %d, utc_coupled %t\n", tisb_site_id, utc_coupled)
ti.Last_source = TRAFFIC_SOURCE_UAT
ti.Last_seen = time.Now()
// Parse tail number, if available.
if msg_type == 1 || msg_type == 3 { // Need "MS" portion of message.
base40_alphabet := string("0123456789ABCDEFGHIJKLMNOPQRTSUVWXYZ ..")
tail := ""
v := (uint16(frame[17]) << 8) | uint16(frame[18])
tail += string(base40_alphabet[(v/40)%40])
tail += string(base40_alphabet[v%40])
v = (uint16(frame[19]) << 8) | uint16(frame[20])
tail += string(base40_alphabet[(v/1600)%40])
tail += string(base40_alphabet[(v/40)%40])
tail += string(base40_alphabet[v%40])
v = (uint16(frame[21]) << 8) | uint16(frame[22])
tail += string(base40_alphabet[(v/1600)%40])
tail += string(base40_alphabet[(v/40)%40])
tail += string(base40_alphabet[v%40])
tail = strings.Trim(tail, " ")
ti.Tail = tail
}
if globalSettings.DEBUG {
// This is a hack to show the source of the traffic in ForeFlight.
if len(ti.Tail) == 0 || (len(ti.Tail) != 0 && len(ti.Tail) < 8 && ti.Tail[0] != 'U') {
ti.Tail = "u" + ti.Tail
}
}
traffic[ti.Icao_addr] = ti
registerTrafficUpdate(ti)
seenTraffic[ti.Icao_addr] = true // Mark as seen.
}
func esListen() {
for {
if !globalSettings.ES_Enabled {
time.Sleep(1 * time.Second) // Don't do much unless ES is actually enabled.
continue
}
dump1090Addr := "127.0.0.1:30003"
inConn, err := net.Dial("tcp", dump1090Addr)
if err != nil { // Local connection failed.
time.Sleep(1 * time.Second)
continue
}
rdr := bufio.NewReader(inConn)
for globalSettings.ES_Enabled {
buf, err := rdr.ReadString('\n')
if err != nil { // Must have disconnected?
break
}
buf = strings.Trim(buf, "\r\n")
//log.Printf("%s\n", buf)
replayLog(buf, MSGCLASS_ES) // Log the raw message.
x := strings.Split(buf, ",")
if len(x) < 22 {
continue
}
icao := x[4]
icaoDecf, err := strconv.ParseInt(icao, 16, 32)
if err != nil {
continue
}
// Log the message after we've determined that it at least meets some requirements on the fields.
var thisMsg msg
thisMsg.MessageClass = MSGCLASS_ES
thisMsg.TimeReceived = time.Now()
thisMsg.Data = []byte(buf)
MsgLog = append(MsgLog, thisMsg)
// Begin to parse the message.
icaoDec := uint32(icaoDecf)
trafficMutex.Lock()
// Retrieve previous information on this ICAO code.
var ti TrafficInfo
if val, ok := traffic[icaoDec]; ok {
ti = val
}
ti.Icao_addr = icaoDec
//FIXME: Some stale information will be renewed.
valid_change := true
if x[1] == "3" { // ES airborne position message. DF17 BDS 0,5.
//MSG,3,111,11111,AC2BB7,111111,2015/07/28,03:59:12.363,2015/07/28,03:59:12.353,,5550,,,42.35847,-83.42212,,,,,,0
//MSG,3,111,11111,A5D007,111111, , , , ,,35000,,,42.47454,-82.57433,,,0,0,0,0
alt := x[11]
lat := x[14]
lng := x[15]
if len(alt) == 0 || len(lat) == 0 || len(lng) == 0 { //FIXME.
valid_change = false
}
altFloat, err := strconv.ParseFloat(alt, 32)
if err != nil {
// log.Printf("err parsing alt (%s): %s\n", alt, err.Error())
valid_change = false
}
latFloat, err := strconv.ParseFloat(lat, 32)
if err != nil {
// log.Printf("err parsing lat (%s): %s\n", lat, err.Error())
valid_change = false
}
lngFloat, err := strconv.ParseFloat(lng, 32)
if err != nil {
// log.Printf("err parsing lng (%s): %s\n", lng, err.Error())
valid_change = false
}
//log.Printf("icao=%s, icaoDec=%d, alt=%s, lat=%s, lng=%s\n", icao, icaoDec, alt, lat, lng)
if valid_change {
ti.Alt = int32(altFloat)
ti.Lat = float32(latFloat)
ti.Lng = float32(lngFloat)
ti.Position_valid = true
}
}
if x[1] == "4" { // ES airborne velocity message. DF17 BDS 0,9.
// MSG,4,111,11111,A3B557,111111,2015/07/28,06:13:36.417,2015/07/28,06:13:36.398,,,414,278,,,-64,,,,,0
speed := x[12]
track := x[13]
vvel := x[16]
if len(speed) == 0 || len(track) == 0 || len(vvel) == 0 {
valid_change = false
}
speedFloat, err := strconv.ParseFloat(speed, 32)
if err != nil {
// log.Printf("err parsing speed (%s): %s\n", speed, err.Error())
valid_change = false
}
trackFloat, err := strconv.ParseFloat(track, 32)
if err != nil {
// log.Printf("err parsing track (%s): %s\n", track, err.Error())
valid_change = false
}
vvelFloat, err := strconv.ParseFloat(vvel, 32)
if err != nil {
// log.Printf("err parsing vvel (%s): %s\n", vvel, err.Error())
valid_change = false
}
//log.Printf("icao=%s, icaoDec=%d, vel=%s, hdg=%s, vr=%s\n", icao, icaoDec, vel, hdg, vr)
if valid_change {
ti.Speed = uint16(speedFloat)
ti.Track = uint16(trackFloat)
ti.Vvel = int16(vvelFloat)
ti.Speed_valid = true
}
}
if x[1] == "1" { // ES identification and category. DF17 BDS 0,8.
// MSG,1,,,%02X%02X%02X,,,,,,%s,,,,,,,,0,0,0,0
tail := x[10]
if len(tail) == 0 {
valid_change = false
}
if valid_change {
ti.Tail = tail
}
}
// Update "last seen" (any type of message, as long as the ICAO addr can be parsed).
ti.Last_source = TRAFFIC_SOURCE_1090ES
ti.Last_seen = time.Now()
ti.addr_type = 0 //FIXME: ADS-B with ICAO address. Not recognized by ForeFlight.
ti.emitter_category = 0x01 //FIXME. "Light"
// This is a hack to show the source of the traffic in ForeFlight.
ti.Tail = strings.Trim(ti.Tail, " ")
if globalSettings.DEBUG {
if len(ti.Tail) == 0 || (len(ti.Tail) != 0 && len(ti.Tail) < 8 && ti.Tail[0] != 'E') {
ti.Tail = "e" + ti.Tail
}
}
traffic[icaoDec] = ti // Update information on this ICAO code.
registerTrafficUpdate(ti)
seenTraffic[icaoDec] = true // Mark as seen.
trafficMutex.Unlock()
}
}
}
func initTraffic() {
traffic = make(map[uint32]TrafficInfo)
seenTraffic = make(map[uint32]bool)
trafficMutex = &sync.Mutex{}
go esListen()
}
Add SBS1 message type 5 "Surveillance alt message".
package main
import (
"bufio"
"encoding/hex"
"encoding/json"
"math"
"net"
"strconv"
"strings"
"sync"
"time"
)
//-0b2b48fe3aef1f88621a0856110a31c01105c4e6c4e6c40a9a820300000000000000;rs=7;
/*
HDR:
MDB Type: 1
Address: 2B48FE (TIS-B track file address)
SV:
NIC: 6
Latitude: +41.4380
Longitude: -84.1056
Altitude: 2300 ft (barometric)
N/S velocity: -65 kt
E/W velocity: -98 kt
Track: 236
Speed: 117 kt
Vertical rate: 0 ft/min (from barometric altitude)
UTC coupling: no
TIS-B site ID: 1
MS:
Emitter category: No information
Callsign: unavailable
Emergency status: No emergency
UAT version: 2
SIL: 2
Transmit MSO: 38
NACp: 8
NACv: 1
NICbaro: 0
Capabilities:
Active modes:
Target track type: true heading
AUXSV:
Sec. altitude: unavailable
*/
const (
TRAFFIC_SOURCE_1090ES = 1
TRAFFIC_SOURCE_UAT = 2
)
type TrafficInfo struct {
Icao_addr uint32
OnGround bool
addr_type uint8
emitter_category uint8
Lat float32
Lng float32
Position_valid bool
Alt int32
Track uint16
Speed uint16
Speed_valid bool
Vvel int16
Tail string
Last_seen time.Time
Last_source uint8
}
var traffic map[uint32]TrafficInfo
var trafficMutex *sync.Mutex
var seenTraffic map[uint32]bool // Historical list of all ICAO addresses seen.
func cleanupOldEntries() {
for icao_addr, ti := range traffic {
if time.Since(ti.Last_seen).Seconds() > float64(60.0) { //FIXME: 60 seconds with no update on this address - stop displaying.
delete(traffic, icao_addr)
}
}
}
func sendTrafficUpdates() {
trafficMutex.Lock()
defer trafficMutex.Unlock()
cleanupOldEntries()
for _, ti := range traffic {
if ti.Position_valid {
makeTrafficReport(ti)
}
}
}
// Send update to attached client.
func registerTrafficUpdate(ti TrafficInfo) {
if !ti.Position_valid { // Don't send unless a valid position exists.
return
}
tiJSON, _ := json.Marshal(&ti)
trafficUpdate.Send(tiJSON)
}
func makeTrafficReport(ti TrafficInfo) {
msg := make([]byte, 28)
// See p.16.
msg[0] = 0x14 // Message type "Traffic Report".
msg[1] = 0x10 | ti.addr_type // Alert status, address type.
// ICAO Address.
msg[2] = byte((ti.Icao_addr & 0x00FF0000) >> 16)
msg[3] = byte((ti.Icao_addr & 0x0000FF00) >> 8)
msg[4] = byte((ti.Icao_addr & 0x000000FF))
lat := float32(ti.Lat)
tmp := makeLatLng(lat)
msg[5] = tmp[0] // Latitude.
msg[6] = tmp[1] // Latitude.
msg[7] = tmp[2] // Latitude.
lng := float32(ti.Lng)
tmp = makeLatLng(lng)
msg[8] = tmp[0] // Longitude.
msg[9] = tmp[1] // Longitude.
msg[10] = tmp[2] // Longitude.
// Altitude: OK
// GDL 90 Data Interface Specification examples:
// where 1,000 foot offset and 25 foot resolution (1,000 / 25 = 40)
// -1,000 feet 0x000
// 0 feet 0x028
// +1000 feet 0x050
// +101,350 feet 0xFFE
// Invalid or unavailable 0xFFF
//
// Algo example at: https://play.golang.org/p/VXCckSdsvT
//
var alt int16
if ti.Alt < -1000 || ti.Alt > 101350 {
alt = 0x0FFF
} else {
// output guaranteed to be between 0x0000 and 0x0FFE
alt = int16((ti.Alt / 25) + 40)
}
msg[11] = byte((alt & 0xFF0) >> 4) // Altitude.
msg[12] = byte((alt & 0x00F) << 4)
msg[12] = byte(((alt & 0x00F) << 4) | 0x3) // True heading.
if !ti.OnGround {
msg[12] = msg[12] | 0x08 // Airborne.
}
msg[13] = 0x11 //FIXME.
// Horizontal velocity (speed).
msg[14] = byte((ti.Speed & 0x0FF0) >> 4)
msg[15] = byte((ti.Speed & 0x000F) << 4)
// Vertical velocity.
vvel := ti.Vvel / 64 // 64fpm resolution.
msg[15] = msg[15] | byte((vvel&0x0F00)>>8)
msg[16] = byte(vvel & 0x00FF)
// Track.
trk := uint8(float32(ti.Track) / TRACK_RESOLUTION) // Resolution is ~1.4 degrees.
msg[17] = byte(trk)
msg[18] = ti.emitter_category
// msg[19] to msg[26] are "call sign" (tail).
for i := 0; i < len(ti.Tail) && i < 8; i++ {
c := byte(ti.Tail[i])
if c != 20 && !((c >= 48) && (c <= 57)) && !((c >= 65) && (c <= 90)) && c != 'e' && c != 'u' { // See p.24, FAA ref.
c = byte(20)
}
msg[19+i] = c
}
//TODO: text identifier (tail).
sendGDL90(prepareMessage(msg), false)
}
func parseDownlinkReport(s string) {
var ti TrafficInfo
s = s[1:]
frame := make([]byte, len(s)/2)
hex.Decode(frame, []byte(s))
// Header.
msg_type := (uint8(frame[0]) >> 3) & 0x1f
// Extract emitter category.
if msg_type == 1 || msg_type == 3 {
v := (uint16(frame[17]) << 8) | (uint16(frame[18]))
ti.emitter_category = uint8((v / 1600) % 40)
}
icao_addr := (uint32(frame[1]) << 16) | (uint32(frame[2]) << 8) | uint32(frame[3])
trafficMutex.Lock()
defer trafficMutex.Unlock()
if curTi, ok := traffic[icao_addr]; ok { // Retrieve the current entry, as it may contain some useful information like "tail" from 1090ES.
ti = curTi
}
ti.Icao_addr = icao_addr
ti.addr_type = uint8(frame[0]) & 0x07
// OK.
// fmt.Printf("%d, %d, %06X\n", msg_type, ti.addr_type, ti.Icao_addr)
nic := uint8(frame[11]) & 15 //TODO: Meaning?
raw_lat := (uint32(frame[4]) << 15) | (uint32(frame[5]) << 7) | (uint32(frame[6]) >> 1)
raw_lon := ((uint32(frame[6]) & 0x01) << 23) | (uint32(frame[7]) << 15) | (uint32(frame[8]) << 7) | (uint32(frame[9]) >> 1)
lat := float32(0.0)
lng := float32(0.0)
position_valid := false
if nic != 0 || raw_lat != 0 || raw_lon != 0 {
position_valid = true
lat = float32(raw_lat) * 360.0 / 16777216.0
if lat > 90 {
lat = lat - 180
}
lng = float32(raw_lon) * 360.0 / 16777216.0
if lng > 180 {
lng = lng - 360
}
}
ti.Lat = lat
ti.Lng = lng
ti.Position_valid = position_valid
raw_alt := (int32(frame[10]) << 4) | ((int32(frame[11]) & 0xf0) >> 4)
// alt_geo := false // Barometric if not geometric.
alt := int32(0)
if raw_alt != 0 {
// alt_geo = (uint8(frame[9]) & 1) != 0
alt = ((raw_alt - 1) * 25) - 1000
}
ti.Alt = alt
//OK.
// fmt.Printf("%d, %t, %f, %f, %t, %d\n", nic, position_valid, lat, lng, alt_geo, alt)
airground_state := (uint8(frame[12]) >> 6) & 0x03
//OK.
// fmt.Printf("%d\n", airground_state)
ns_vel := int16(0)
ew_vel := int16(0)
track := uint16(0)
speed_valid := false
speed := uint16(0)
vvel := int16(0)
// vvel_geo := false
if airground_state == 0 || airground_state == 1 { // Subsonic. Supersonic.
// N/S velocity.
ns_vel_valid := false
ew_vel_valid := false
raw_ns := ((int16(frame[12]) & 0x1f) << 6) | ((int16(frame[13]) & 0xfc) >> 2)
if (raw_ns & 0x3ff) != 0 {
ns_vel_valid = true
ns_vel = ((raw_ns & 0x3ff) - 1)
if (raw_ns & 0x400) != 0 {
ns_vel = 0 - ns_vel
}
if airground_state == 1 { // Supersonic.
ns_vel = ns_vel * 4
}
}
// E/W velocity.
raw_ew := ((int16(frame[13]) & 0x03) << 9) | (int16(frame[14]) << 1) | ((int16(frame[15] & 0x80)) >> 7)
if (raw_ew & 0x3ff) != 0 {
ew_vel_valid = true
ew_vel = (raw_ew & 0x3ff) - 1
if (raw_ew & 0x400) != 0 {
ew_vel = 0 - ew_vel
}
if airground_state == 1 { // Supersonic.
ew_vel = ew_vel * 4
}
}
if ns_vel_valid && ew_vel_valid {
if ns_vel != 0 && ew_vel != 0 {
//TODO: Track type
track = uint16((360 + 90 - (int16(math.Atan2(float64(ns_vel), float64(ew_vel)) * 180 / math.Pi))) % 360)
}
speed_valid = true
speed = uint16(math.Sqrt(float64((ns_vel * ns_vel) + (ew_vel * ew_vel))))
}
// Vertical velocity.
raw_vvel := ((int16(frame[15]) & 0x7f) << 4) | ((int16(frame[16]) & 0xf0) >> 4)
if (raw_vvel & 0x1ff) != 0 {
// vvel_geo = (raw_vvel & 0x400) == 0
vvel = ((raw_vvel & 0x1ff) - 1) * 64
if (raw_vvel & 0x200) != 0 {
vvel = 0 - vvel
}
}
} else if airground_state == 2 { // Ground vehicle.
ti.OnGround = true
raw_gs := ((uint16(frame[12]) & 0x1f) << 6) | ((uint16(frame[13]) & 0xfc) >> 2)
if raw_gs != 0 {
speed_valid = true
speed = ((raw_gs & 0x3ff) - 1)
}
raw_track := ((uint16(frame[13]) & 0x03) << 9) | (uint16(frame[14]) << 1) | ((uint16(frame[15]) & 0x80) >> 7)
//tt := ((raw_track & 0x0600) >> 9)
//FIXME: tt == 1 TT_TRACK. tt == 2 TT_MAG_HEADING. tt == 3 TT_TRUE_HEADING.
track = uint16((raw_track & 0x1ff) * 360 / 512)
// Dimensions of vehicle - skip.
}
ti.Track = track
ti.Speed = speed
ti.Vvel = vvel
ti.Speed_valid = speed_valid
//OK.
// fmt.Printf("ns_vel %d, ew_vel %d, track %d, speed_valid %t, speed %d, vvel_geo %t, vvel %d\n", ns_vel, ew_vel, track, speed_valid, speed, vvel_geo, vvel)
/*
utc_coupled := false
tisb_site_id := uint8(0)
if (uint8(frame[0]) & 7) == 2 || (uint8(frame[0]) & 7) == 3 { //TODO: Meaning?
tisb_site_id = uint8(frame[16]) & 0x0f
} else {
utc_coupled = (uint8(frame[16]) & 0x08) != 0
}
*/
//OK.
// fmt.Printf("tisb_site_id %d, utc_coupled %t\n", tisb_site_id, utc_coupled)
ti.Last_source = TRAFFIC_SOURCE_UAT
ti.Last_seen = time.Now()
// Parse tail number, if available.
if msg_type == 1 || msg_type == 3 { // Need "MS" portion of message.
base40_alphabet := string("0123456789ABCDEFGHIJKLMNOPQRTSUVWXYZ ..")
tail := ""
v := (uint16(frame[17]) << 8) | uint16(frame[18])
tail += string(base40_alphabet[(v/40)%40])
tail += string(base40_alphabet[v%40])
v = (uint16(frame[19]) << 8) | uint16(frame[20])
tail += string(base40_alphabet[(v/1600)%40])
tail += string(base40_alphabet[(v/40)%40])
tail += string(base40_alphabet[v%40])
v = (uint16(frame[21]) << 8) | uint16(frame[22])
tail += string(base40_alphabet[(v/1600)%40])
tail += string(base40_alphabet[(v/40)%40])
tail += string(base40_alphabet[v%40])
tail = strings.Trim(tail, " ")
ti.Tail = tail
}
if globalSettings.DEBUG {
// This is a hack to show the source of the traffic in ForeFlight.
if len(ti.Tail) == 0 || (len(ti.Tail) != 0 && len(ti.Tail) < 8 && ti.Tail[0] != 'U') {
ti.Tail = "u" + ti.Tail
}
}
traffic[ti.Icao_addr] = ti
registerTrafficUpdate(ti)
seenTraffic[ti.Icao_addr] = true // Mark as seen.
}
func esListen() {
for {
if !globalSettings.ES_Enabled {
time.Sleep(1 * time.Second) // Don't do much unless ES is actually enabled.
continue
}
dump1090Addr := "127.0.0.1:30003"
inConn, err := net.Dial("tcp", dump1090Addr)
if err != nil { // Local connection failed.
time.Sleep(1 * time.Second)
continue
}
rdr := bufio.NewReader(inConn)
for globalSettings.ES_Enabled {
buf, err := rdr.ReadString('\n')
if err != nil { // Must have disconnected?
break
}
buf = strings.Trim(buf, "\r\n")
//log.Printf("%s\n", buf)
replayLog(buf, MSGCLASS_ES) // Log the raw message.
x := strings.Split(buf, ",")
if len(x) < 22 {
continue
}
icao := x[4]
icaoDecf, err := strconv.ParseInt(icao, 16, 32)
if err != nil {
continue
}
// Log the message after we've determined that it at least meets some requirements on the fields.
var thisMsg msg
thisMsg.MessageClass = MSGCLASS_ES
thisMsg.TimeReceived = time.Now()
thisMsg.Data = []byte(buf)
MsgLog = append(MsgLog, thisMsg)
// Begin to parse the message.
icaoDec := uint32(icaoDecf)
trafficMutex.Lock()
// Retrieve previous information on this ICAO code.
var ti TrafficInfo
if val, ok := traffic[icaoDec]; ok {
ti = val
}
ti.Icao_addr = icaoDec
//FIXME: Some stale information will be renewed.
valid_change := true
if x[1] == "3" { // ES airborne position message. DF17 BDS 0,5.
//MSG,3,111,11111,AC2BB7,111111,2015/07/28,03:59:12.363,2015/07/28,03:59:12.353,,5550,,,42.35847,-83.42212,,,,,,0
//MSG,3,111,11111,A5D007,111111, , , , ,,35000,,,42.47454,-82.57433,,,0,0,0,0
alt := x[11]
lat := x[14]
lng := x[15]
if len(alt) == 0 || len(lat) == 0 || len(lng) == 0 { //FIXME.
valid_change = false
}
altFloat, err := strconv.ParseFloat(alt, 32)
if err != nil {
// log.Printf("err parsing alt (%s): %s\n", alt, err.Error())
valid_change = false
}
latFloat, err := strconv.ParseFloat(lat, 32)
if err != nil {
// log.Printf("err parsing lat (%s): %s\n", lat, err.Error())
valid_change = false
}
lngFloat, err := strconv.ParseFloat(lng, 32)
if err != nil {
// log.Printf("err parsing lng (%s): %s\n", lng, err.Error())
valid_change = false
}
//log.Printf("icao=%s, icaoDec=%d, alt=%s, lat=%s, lng=%s\n", icao, icaoDec, alt, lat, lng)
if valid_change {
ti.Alt = int32(altFloat)
ti.Lat = float32(latFloat)
ti.Lng = float32(lngFloat)
ti.Position_valid = true
}
}
if x[1] == "4" { // ES airborne velocity message. DF17 BDS 0,9.
// MSG,4,111,11111,A3B557,111111,2015/07/28,06:13:36.417,2015/07/28,06:13:36.398,,,414,278,,,-64,,,,,0
speed := x[12]
track := x[13]
vvel := x[16]
if len(speed) == 0 || len(track) == 0 || len(vvel) == 0 {
valid_change = false
}
speedFloat, err := strconv.ParseFloat(speed, 32)
if err != nil {
// log.Printf("err parsing speed (%s): %s\n", speed, err.Error())
valid_change = false
}
trackFloat, err := strconv.ParseFloat(track, 32)
if err != nil {
// log.Printf("err parsing track (%s): %s\n", track, err.Error())
valid_change = false
}
vvelFloat, err := strconv.ParseFloat(vvel, 32)
if err != nil {
// log.Printf("err parsing vvel (%s): %s\n", vvel, err.Error())
valid_change = false
}
//log.Printf("icao=%s, icaoDec=%d, vel=%s, hdg=%s, vr=%s\n", icao, icaoDec, vel, hdg, vr)
if valid_change {
ti.Speed = uint16(speedFloat)
ti.Track = uint16(trackFloat)
ti.Vvel = int16(vvelFloat)
ti.Speed_valid = true
}
}
if x[1] == "1" { // ES identification and category. DF17 BDS 0,8.
// MSG,1,,,%02X%02X%02X,,,,,,%s,,,,,,,,0,0,0,0
tail := x[10]
if len(tail) == 0 {
valid_change = false
}
if valid_change {
ti.Tail = tail
}
}
if x[1] == "5" { // Surveillance alt message. DF4, DF20.
// MSG,5,,,%02X%02X%02X,,,,,,,%d,,,,,,,%d,%d,%d,%d
// MSG,5,111,11111,AB5F1B,111111,2016/01/03,04:43:52.028,2016/01/03,04:43:52.006,,13050,,,,,,,0,,0,0
alt := x[11]
altFloat, err := strconv.ParseFloat(alt, 32)
if len(alt) != 0 && err == nil {
ti.Alt = int32(altFloat)
}
}
// Update "last seen" (any type of message, as long as the ICAO addr can be parsed).
ti.Last_source = TRAFFIC_SOURCE_1090ES
ti.Last_seen = time.Now()
ti.addr_type = 0 //FIXME: ADS-B with ICAO address. Not recognized by ForeFlight.
ti.emitter_category = 0x01 //FIXME. "Light"
// This is a hack to show the source of the traffic in ForeFlight.
ti.Tail = strings.Trim(ti.Tail, " ")
if globalSettings.DEBUG {
if len(ti.Tail) == 0 || (len(ti.Tail) != 0 && len(ti.Tail) < 8 && ti.Tail[0] != 'E') {
ti.Tail = "e" + ti.Tail
}
}
traffic[icaoDec] = ti // Update information on this ICAO code.
registerTrafficUpdate(ti)
seenTraffic[icaoDec] = true // Mark as seen.
trafficMutex.Unlock()
}
}
}
func initTraffic() {
traffic = make(map[uint32]TrafficInfo)
seenTraffic = make(map[uint32]bool)
trafficMutex = &sync.Mutex{}
go esListen()
}
|
package main
//#include <Windows.h>
import "C"
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"syscall"
"unsafe"
"github.com/AllenDang/w32"
"github.com/gonutz/blob"
"github.com/gonutz/d3d9"
"github.com/gonutz/mixer"
"github.com/gonutz/payload"
)
func init() {
runtime.LockOSThread()
}
const (
version = "1"
)
var (
readFile func(id string) ([]byte, error) = readFileFromDisk
rscBlob *blob.Blob
logFile io.WriteCloser
muted bool
previousPlacement C.WINDOWPLACEMENT
)
func main() {
// close the log file at the end of the program
defer func() {
if logFile != nil {
logFile.Close()
}
}()
// load the resource blob from the executable
rscBlobData, err := payload.Read()
if err == nil {
rscBlob, err = blob.Read(bytes.NewReader(rscBlobData))
if err == nil {
readFile = readFileFromBlob
logf("blob in exe contains %v item(s)\n", rscBlob.ItemCount())
} else {
logln("unable to decode blob: ", err)
}
} else {
logln("unable to read payload:", err)
}
// create the window and initialize DirectX
w32Window, err := openWindow(
"LD36WindowClass",
handleMessage,
0, 0, 640, 480,
)
if err != nil {
fatal("unable to open window: ", err)
}
cWindow := C.HWND(unsafe.Pointer(w32Window))
w32.SetWindowText(w32Window, "LD36 - v"+version)
fullscreen := true
//fullscreen = false // NOTE toggle comment on this line for debugging
if fullscreen {
toggleFullscreen(cWindow)
}
client := w32.GetClientRect(w32Window)
windowW := uint(client.Right - client.Left)
windowH := uint(client.Bottom - client.Top)
err = mixer.Init()
if err != nil {
logln("unable to initialize the DirectSound8 mixer: ", err)
muted = true
} else {
defer mixer.Close()
}
// initialize Direct3D9
if err := d3d9.Init(); err != nil {
fatal("unable to initialize Direct3D9: ", err)
}
defer d3d9.Close()
d3d, err := d3d9.Create(d3d9.SDK_VERSION)
if err != nil {
fatal("unable to create Direct3D9 object: ", err)
}
defer d3d.Release()
var maxScreenW, maxScreenH uint
for i := uint(0); i < d3d.GetAdapterCount(); i++ {
mode, err := d3d.GetAdapterDisplayMode(i)
if err == nil {
if mode.Width > maxScreenW {
maxScreenW = mode.Width
}
if mode.Height > maxScreenH {
maxScreenH = mode.Height
}
}
}
if maxScreenW == 0 || maxScreenH == 0 {
maxScreenW, maxScreenH = windowW, windowH
}
device, _, err := d3d.CreateDevice(
d3d9.ADAPTER_DEFAULT,
d3d9.DEVTYPE_HAL,
unsafe.Pointer(cWindow),
d3d9.CREATE_HARDWARE_VERTEXPROCESSING,
d3d9.PRESENT_PARAMETERS{
BackBufferWidth: maxScreenW,
BackBufferHeight: maxScreenH,
BackBufferFormat: d3d9.FMT_A8R8G8B8,
BackBufferCount: 1,
Windowed: true,
SwapEffect: d3d9.SWAPEFFECT_DISCARD,
HDeviceWindow: unsafe.Pointer(cWindow),
},
)
if err != nil {
fatal("unable to create Direct3D09 device: ", err)
}
defer device.Release()
device.SetRenderState(d3d9.RS_CULLMODE, uint32(d3d9.CULL_CW))
device.SetRenderState(d3d9.RS_SRCBLEND, d3d9.BLEND_SRCALPHA)
device.SetRenderState(d3d9.RS_DESTBLEND, d3d9.BLEND_INVSRCALPHA)
device.SetRenderState(d3d9.RS_ALPHABLENDENABLE, 1)
var msg C.MSG
C.PeekMessage(&msg, nil, 0, 0, C.PM_NOREMOVE)
for msg.message != C.WM_QUIT {
if C.PeekMessage(&msg, nil, 0, 0, C.PM_REMOVE) != 0 {
C.TranslateMessage(&msg)
C.DispatchMessage(&msg)
} else {
device.SetViewport(
d3d9.VIEWPORT{0, 0, uint32(windowW), uint32(windowH), 0, 1},
)
device.Clear(nil, d3d9.CLEAR_TARGET, d3d9.ColorRGB(0, 95, 83), 1, 0)
// TODO render game
// TODO check device lost error
device.Present(
&d3d9.RECT{0, 0, int32(windowW), int32(windowH)},
nil,
nil,
nil,
)
}
}
}
func handleMessage(window w32.HWND, message uint32, w, l uintptr) uintptr {
switch message {
case w32.WM_KEYDOWN:
switch w {
case w32.VK_F11:
toggleFullscreen((C.HWND)(unsafe.Pointer(window)))
}
return 1
case w32.WM_DESTROY:
w32.PostQuitMessage(0)
return 1
default:
return w32.DefWindowProc(window, message, w, l)
}
}
type messageCallback func(window w32.HWND, msg uint32, w, l uintptr) uintptr
func openWindow(
className string,
callback messageCallback,
x, y, width, height int,
) (
w32.HWND, error,
) {
windowProc := syscall.NewCallback(callback)
class := w32.WNDCLASSEX{
Size: C.sizeof_WNDCLASSEX,
WndProc: windowProc,
Cursor: w32.LoadCursor(0, (*uint16)(unsafe.Pointer(uintptr(w32.IDC_ARROW)))),
ClassName: syscall.StringToUTF16Ptr(className),
}
atom := w32.RegisterClassEx(&class)
if atom == 0 {
return 0, errors.New("RegisterClassEx failed")
}
window := w32.CreateWindowEx(
0,
syscall.StringToUTF16Ptr(className),
nil,
w32.WS_OVERLAPPED|w32.WS_CAPTION|w32.WS_SYSMENU|w32.WS_VISIBLE,
x, y, width, height,
0, 0, 0, nil,
)
if window == 0 {
return 0, errors.New("CreateWindowEx failed")
}
return window, nil
}
func toggleFullscreen(window C.HWND) {
style := C.GetWindowLong(window, C.GWL_STYLE)
if style&C.WS_OVERLAPPEDWINDOW != 0 {
// go into full-screen
monitorInfo := C.MONITORINFO{cbSize: C.sizeof_MONITORINFO}
previousPlacement.length = C.sizeof_WINDOWPLACEMENT
monitor := C.MonitorFromWindow(window, C.MONITOR_DEFAULTTOPRIMARY)
if C.GetWindowPlacement(window, &previousPlacement) != 0 &&
C.GetMonitorInfo(monitor, &monitorInfo) != 0 {
C.SetWindowLong(
window,
C.GWL_STYLE,
style & ^C.WS_OVERLAPPED & ^w32.WS_CAPTION & ^w32.WS_SYSMENU,
)
C.SetWindowPos(window, C.HWND(unsafe.Pointer(uintptr(0))),
C.int(monitorInfo.rcMonitor.left),
C.int(monitorInfo.rcMonitor.top),
C.int(monitorInfo.rcMonitor.right-monitorInfo.rcMonitor.left),
C.int(monitorInfo.rcMonitor.bottom-monitorInfo.rcMonitor.top),
C.SWP_NOOWNERZORDER|C.SWP_FRAMECHANGED,
)
}
C.ShowCursor(0)
} else {
// go into windowed mode
C.SetWindowLong(
window,
C.GWL_STYLE,
style|w32.WS_OVERLAPPED|w32.WS_CAPTION|w32.WS_SYSMENU,
)
C.SetWindowPlacement(window, &previousPlacement)
C.SetWindowPos(window, nil, 0, 0, 0, 0,
C.SWP_NOMOVE|C.SWP_NOSIZE|C.SWP_NOZORDER|
C.SWP_NOOWNERZORDER|C.SWP_FRAMECHANGED,
)
C.ShowCursor(1)
}
}
func readFileFromDisk(id string) ([]byte, error) {
path := "./rsc" + id
return ioutil.ReadFile(path)
}
func readFileFromBlob(id string) (data []byte, err error) {
var exists bool
data, exists = rscBlob.GetByID(id)
if !exists {
err = errors.New("resource '" + id + "' does not exist in blob")
}
return
}
func log(a ...interface{}) { logToFile(fmt.Sprint(a...)) }
func logf(format string, a ...interface{}) { logToFile(fmt.Sprintf(format, a...)) }
func logln(a ...interface{}) { logToFile(fmt.Sprintln(a...)) }
func logToFile(msg string) {
if logFile == nil {
path := filepath.Join(os.Getenv("APPDATA"), "ld36_log.txt")
logFile, _ = os.Create(path)
}
fmt.Print(msg)
if logFile != nil {
logFile.Write([]byte(msg))
}
}
func fatal(a ...interface{}) {
msg := fmt.Sprint(a...)
fail(msg)
}
func fatalf(format string, a ...interface{}) {
msg := fmt.Sprintf(format, a...)
fail(msg)
}
func fail(msg string) {
const MB_TOPMOST = 0x00040000
w32.MessageBox(0, msg, "Error", w32.MB_OK|w32.MB_ICONERROR|MB_TOPMOST)
log("fatal error: ", msg)
panic(msg)
}
Add support for old graphics cards
package main
//#include <Windows.h>
import "C"
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"syscall"
"unsafe"
"github.com/AllenDang/w32"
"github.com/gonutz/blob"
"github.com/gonutz/d3d9"
"github.com/gonutz/mixer"
"github.com/gonutz/payload"
)
func init() {
runtime.LockOSThread()
}
const (
version = "1"
)
var (
readFile func(id string) ([]byte, error) = readFileFromDisk
rscBlob *blob.Blob
logFile io.WriteCloser
muted bool
previousPlacement C.WINDOWPLACEMENT
)
func main() {
// close the log file at the end of the program
defer func() {
if logFile != nil {
logFile.Close()
}
}()
// load the resource blob from the executable
rscBlobData, err := payload.Read()
if err == nil {
rscBlob, err = blob.Read(bytes.NewReader(rscBlobData))
if err == nil {
readFile = readFileFromBlob
logf("blob in exe contains %v item(s)\n", rscBlob.ItemCount())
} else {
logln("unable to decode blob: ", err)
}
} else {
logln("unable to read payload:", err)
}
// create the window and initialize DirectX
w32Window, err := openWindow(
"LD36WindowClass",
handleMessage,
0, 0, 640, 480,
)
if err != nil {
fatal("unable to open window: ", err)
}
cWindow := C.HWND(unsafe.Pointer(w32Window))
w32.SetWindowText(w32Window, "LD36 - v"+version)
fullscreen := true
fullscreen = false // NOTE toggle comment on this line for debugging
if fullscreen {
toggleFullscreen(cWindow)
}
client := w32.GetClientRect(w32Window)
windowW := uint(client.Right - client.Left)
windowH := uint(client.Bottom - client.Top)
err = mixer.Init()
if err != nil {
logln("unable to initialize the DirectSound8 mixer: ", err)
muted = true
} else {
defer mixer.Close()
}
// initialize Direct3D9
if err := d3d9.Init(); err != nil {
fatal("unable to initialize Direct3D9: ", err)
}
defer d3d9.Close()
d3d, err := d3d9.Create(d3d9.SDK_VERSION)
if err != nil {
fatal("unable to create Direct3D9 object: ", err)
}
defer d3d.Release()
var maxScreenW, maxScreenH uint
for i := uint(0); i < d3d.GetAdapterCount(); i++ {
mode, err := d3d.GetAdapterDisplayMode(i)
if err == nil {
if mode.Width > maxScreenW {
maxScreenW = mode.Width
}
if mode.Height > maxScreenH {
maxScreenH = mode.Height
}
}
}
if maxScreenW == 0 || maxScreenH == 0 {
maxScreenW, maxScreenH = windowW, windowH
}
var createFlags uint32 = d3d9.CREATE_SOFTWARE_VERTEXPROCESSING
caps, err := d3d.GetDeviceCaps(d3d9.ADAPTER_DEFAULT, d3d9.DEVTYPE_HAL)
if err == nil &&
caps.DevCaps&d3d9.DEVCAPS_HWTRANSFORMANDLIGHT != 0 {
createFlags = d3d9.CREATE_HARDWARE_VERTEXPROCESSING
logln("graphics card supports hardware vertex processing")
}
device, _, err := d3d.CreateDevice(
d3d9.ADAPTER_DEFAULT,
d3d9.DEVTYPE_HAL,
unsafe.Pointer(cWindow),
createFlags,
d3d9.PRESENT_PARAMETERS{
BackBufferWidth: maxScreenW,
BackBufferHeight: maxScreenH,
BackBufferFormat: d3d9.FMT_A8R8G8B8,
BackBufferCount: 1,
Windowed: true,
SwapEffect: d3d9.SWAPEFFECT_DISCARD,
HDeviceWindow: unsafe.Pointer(cWindow),
},
)
if err != nil {
fatal("unable to create Direct3D09 device: ", err)
}
defer device.Release()
device.SetRenderState(d3d9.RS_CULLMODE, uint32(d3d9.CULL_CW))
device.SetRenderState(d3d9.RS_SRCBLEND, d3d9.BLEND_SRCALPHA)
device.SetRenderState(d3d9.RS_DESTBLEND, d3d9.BLEND_INVSRCALPHA)
device.SetRenderState(d3d9.RS_ALPHABLENDENABLE, 1)
var msg C.MSG
C.PeekMessage(&msg, nil, 0, 0, C.PM_NOREMOVE)
for msg.message != C.WM_QUIT {
if C.PeekMessage(&msg, nil, 0, 0, C.PM_REMOVE) != 0 {
C.TranslateMessage(&msg)
C.DispatchMessage(&msg)
} else {
device.SetViewport(
d3d9.VIEWPORT{0, 0, uint32(windowW), uint32(windowH), 0, 1},
)
device.Clear(nil, d3d9.CLEAR_TARGET, d3d9.ColorRGB(0, 95, 83), 1, 0)
// TODO render game
// TODO check device lost error
device.Present(
&d3d9.RECT{0, 0, int32(windowW), int32(windowH)},
nil,
nil,
nil,
)
}
}
}
func handleMessage(window w32.HWND, message uint32, w, l uintptr) uintptr {
switch message {
case w32.WM_KEYDOWN:
switch w {
case w32.VK_F11:
toggleFullscreen((C.HWND)(unsafe.Pointer(window)))
}
return 1
case w32.WM_DESTROY:
w32.PostQuitMessage(0)
return 1
default:
return w32.DefWindowProc(window, message, w, l)
}
}
type messageCallback func(window w32.HWND, msg uint32, w, l uintptr) uintptr
func openWindow(
className string,
callback messageCallback,
x, y, width, height int,
) (
w32.HWND, error,
) {
windowProc := syscall.NewCallback(callback)
class := w32.WNDCLASSEX{
Size: C.sizeof_WNDCLASSEX,
WndProc: windowProc,
Cursor: w32.LoadCursor(0, (*uint16)(unsafe.Pointer(uintptr(w32.IDC_ARROW)))),
ClassName: syscall.StringToUTF16Ptr(className),
}
atom := w32.RegisterClassEx(&class)
if atom == 0 {
return 0, errors.New("RegisterClassEx failed")
}
window := w32.CreateWindowEx(
0,
syscall.StringToUTF16Ptr(className),
nil,
w32.WS_OVERLAPPED|w32.WS_CAPTION|w32.WS_SYSMENU|w32.WS_VISIBLE,
x, y, width, height,
0, 0, 0, nil,
)
if window == 0 {
return 0, errors.New("CreateWindowEx failed")
}
return window, nil
}
func toggleFullscreen(window C.HWND) {
style := C.GetWindowLong(window, C.GWL_STYLE)
if style&C.WS_OVERLAPPEDWINDOW != 0 {
// go into full-screen
monitorInfo := C.MONITORINFO{cbSize: C.sizeof_MONITORINFO}
previousPlacement.length = C.sizeof_WINDOWPLACEMENT
monitor := C.MonitorFromWindow(window, C.MONITOR_DEFAULTTOPRIMARY)
if C.GetWindowPlacement(window, &previousPlacement) != 0 &&
C.GetMonitorInfo(monitor, &monitorInfo) != 0 {
C.SetWindowLong(
window,
C.GWL_STYLE,
style & ^C.WS_OVERLAPPED & ^w32.WS_CAPTION & ^w32.WS_SYSMENU,
)
C.SetWindowPos(window, C.HWND(unsafe.Pointer(uintptr(0))),
C.int(monitorInfo.rcMonitor.left),
C.int(monitorInfo.rcMonitor.top),
C.int(monitorInfo.rcMonitor.right-monitorInfo.rcMonitor.left),
C.int(monitorInfo.rcMonitor.bottom-monitorInfo.rcMonitor.top),
C.SWP_NOOWNERZORDER|C.SWP_FRAMECHANGED,
)
}
C.ShowCursor(0)
} else {
// go into windowed mode
C.SetWindowLong(
window,
C.GWL_STYLE,
style|w32.WS_OVERLAPPED|w32.WS_CAPTION|w32.WS_SYSMENU,
)
C.SetWindowPlacement(window, &previousPlacement)
C.SetWindowPos(window, nil, 0, 0, 0, 0,
C.SWP_NOMOVE|C.SWP_NOSIZE|C.SWP_NOZORDER|
C.SWP_NOOWNERZORDER|C.SWP_FRAMECHANGED,
)
C.ShowCursor(1)
}
}
func readFileFromDisk(id string) ([]byte, error) {
path := "./rsc" + id
return ioutil.ReadFile(path)
}
func readFileFromBlob(id string) (data []byte, err error) {
var exists bool
data, exists = rscBlob.GetByID(id)
if !exists {
err = errors.New("resource '" + id + "' does not exist in blob")
}
return
}
func log(a ...interface{}) { logToFile(fmt.Sprint(a...)) }
func logf(format string, a ...interface{}) { logToFile(fmt.Sprintf(format, a...)) }
func logln(a ...interface{}) { logToFile(fmt.Sprintln(a...)) }
func logToFile(msg string) {
if logFile == nil {
path := filepath.Join(os.Getenv("APPDATA"), "ld36_log.txt")
logFile, _ = os.Create(path)
}
fmt.Print(msg)
if logFile != nil {
logFile.Write([]byte(msg))
}
}
func fatal(a ...interface{}) {
msg := fmt.Sprint(a...)
fail(msg)
}
func fatalf(format string, a ...interface{}) {
msg := fmt.Sprintf(format, a...)
fail(msg)
}
func fail(msg string) {
const MB_TOPMOST = 0x00040000
w32.MessageBox(0, msg, "Error", w32.MB_OK|w32.MB_ICONERROR|MB_TOPMOST)
log("fatal error: ", msg)
panic(msg)
}
|
package mains
import (
"context"
"errors"
"fmt"
"io"
"os"
"runtime"
"github.com/mattn/go-isatty"
"github.com/zetamatta/nyagos/frame"
"github.com/zetamatta/nyagos/functions"
"github.com/zetamatta/nyagos/history"
"github.com/zetamatta/nyagos/lua"
"github.com/zetamatta/nyagos/shell"
)
var noLuaEngineErr = errors.New("no lua engine")
var prompt_hook lua.Object = lua.TGoFunction(lua2cmd(functions.Prompt))
func printPrompt(L lua.Lua) (int, error) {
L.Push(prompt_hook)
if !L.IsFunction(-1) {
L.Pop(1)
return 0, nil
}
L.PushString(os.Getenv("PROMPT"))
if err := L.Call(1, 1); err != nil {
return 0, err
}
length, lengthErr := L.ToInteger(-1)
L.Pop(1)
if lengthErr == nil {
return length, nil
} else {
return 0, fmt.Errorf("nyagos.prompt: return-value(length) is invalid: %s", lengthErr.Error())
}
}
var luaFilter lua.Object = lua.TNil{}
func doLuaFilter(L lua.Lua, line string) string {
stackPos := L.GetTop()
defer L.SetTop(stackPos)
L.Push(luaFilter)
if !L.IsFunction(-1) {
return line
}
L.PushString(line)
err := L.Call(1, 1)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return line
}
if !L.IsString(-1) {
return line
}
line2, err2 := L.ToString(-1)
if err2 != nil {
fmt.Fprintln(os.Stderr, err2)
return line
}
return line2
}
type luaWrapper struct {
lua.Lua
}
func (this *luaWrapper) Clone() (shell.CloneCloser, error) {
L := this.Lua
newL, err := NewLua()
if err != nil {
return nil, err
}
err = L.CloneTo(newL)
if err != nil {
return nil, err
}
return &luaWrapper{newL}, nil
}
func (this *luaWrapper) Close() error {
return this.Lua.Close()
}
type MainStream struct {
shell.Stream
L lua.Lua
}
func (this *MainStream) ReadLine(ctx context.Context) (context.Context, string, error) {
if this.L != 0 {
ctx = context.WithValue(ctx, lua.PackageId, this.L)
}
ctx, line, err := this.Stream.ReadLine(ctx)
if err != nil {
return ctx, "", err
}
if this.L != 0 {
return ctx, doLuaFilter(this.L, line), nil
} else {
return ctx, line, nil
}
}
type ScriptEngineForOptionImpl struct {
L lua.Lua
Sh *shell.Shell
}
func (this *ScriptEngineForOptionImpl) SetArg(args []string) {
if this.L != 0 {
setLuaArg(this.L, args)
}
}
func (this *ScriptEngineForOptionImpl) RunFile(fname string) ([]byte, error) {
if this.L != 0 {
return runLua(this.Sh, this.L, fname)
} else {
return nil, noLuaEngineErr
}
}
func (this *ScriptEngineForOptionImpl) RunString(code string) error {
if this.L == 0 {
return noLuaEngineErr
}
if err := this.L.LoadString(code); err != nil {
return err
}
this.L.Call(0, 0)
return nil
}
func optionParseLua(sh *shell.Shell, L lua.Lua) (func() error, error) {
e := &ScriptEngineForOptionImpl{Sh: sh, L: L}
return frame.OptionParse(sh, e)
}
func Main() error {
// for issue #155 & #158
lua.NG_UPVALUE_NAME["prompter"] = struct{}{}
// Lua extension
L, err := NewLua()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
} else {
defer L.Close()
}
sh := shell.New()
if L != 0 {
sh.SetTag(&luaWrapper{L})
}
defer sh.Close()
ctx := context.Background()
langEngine := func(fname string) ([]byte, error) {
if L != 0 {
return runLua(sh, L, fname)
} else {
return nil, nil
}
}
shellEngine := func(fname string) error {
fd, err := os.Open(fname)
if err != nil {
return err
}
stream1 := frame.NewCmdStreamFile(fd)
_, err = sh.Loop(ctx, stream1)
fd.Close()
if err == io.EOF {
return nil
} else {
return err
}
}
script, err := optionParseLua(sh, L)
if err != nil {
return err
}
if !isatty.IsTerminal(os.Stdin.Fd()) || script != nil {
frame.SilentMode = true
}
if !frame.OptionNorc {
if !frame.SilentMode {
fmt.Printf("Nihongo Yet Another GOing Shell %s-%s by %s",
frame.VersionOrStamp(),
runtime.GOARCH,
runtime.Version())
if L != 0 {
fmt.Print(" & Lua 5.3")
}
fmt.Println("\n(c) 2014-2018 NYAOS.ORG <http://www.nyaos.org>")
}
if err := frame.LoadScripts(shellEngine, langEngine); err != nil {
fmt.Fprintln(os.Stderr, err.Error())
}
}
if script != nil {
if err := script(); err != nil {
if err != io.EOF {
return err
} else {
return nil
}
}
}
var stream1 shell.Stream
if isatty.IsTerminal(os.Stdin.Fd()) {
constream := frame.NewCmdStreamConsole(func() (int, error) {
if L != 0 {
return printPrompt(L)
} else {
functions.Prompt(
[]interface{}{frame.Format2Prompt(os.Getenv("PROMPT"))})
return 0, nil
}
})
stream1 = constream
frame.DefaultHistory = constream.History
ctx = context.WithValue(ctx, history.PackageId, constream.History)
} else {
stream1 = frame.NewCmdStreamFile(os.Stdin)
}
sh.ForEver(ctx, &MainStream{stream1, L})
return nil
}
Replace mains.MainStream to mains.LuaFilterStream which does not change context.
package mains
import (
"context"
"errors"
"fmt"
"io"
"os"
"runtime"
"github.com/mattn/go-isatty"
"github.com/zetamatta/nyagos/frame"
"github.com/zetamatta/nyagos/functions"
"github.com/zetamatta/nyagos/history"
"github.com/zetamatta/nyagos/lua"
"github.com/zetamatta/nyagos/shell"
)
var noLuaEngineErr = errors.New("no lua engine")
var prompt_hook lua.Object = lua.TGoFunction(lua2cmd(functions.Prompt))
func printPrompt(L lua.Lua) (int, error) {
L.Push(prompt_hook)
if !L.IsFunction(-1) {
L.Pop(1)
return 0, nil
}
L.PushString(os.Getenv("PROMPT"))
if err := L.Call(1, 1); err != nil {
return 0, err
}
length, lengthErr := L.ToInteger(-1)
L.Pop(1)
if lengthErr == nil {
return length, nil
} else {
return 0, fmt.Errorf("nyagos.prompt: return-value(length) is invalid: %s", lengthErr.Error())
}
}
var luaFilter lua.Object = lua.TNil{}
type luaWrapper struct {
lua.Lua
}
func (this *luaWrapper) Clone() (shell.CloneCloser, error) {
L := this.Lua
newL, err := NewLua()
if err != nil {
return nil, err
}
err = L.CloneTo(newL)
if err != nil {
return nil, err
}
return &luaWrapper{newL}, nil
}
func (this *luaWrapper) Close() error {
return this.Lua.Close()
}
type LuaFilterStream struct {
shell.Stream
L lua.Lua
}
func (this *LuaFilterStream) ReadLine(ctx context.Context) (context.Context, string, error) {
ctx, line, err := this.Stream.ReadLine(ctx)
if err != nil {
return ctx, "", err
}
L := this.L
stackPos := L.GetTop()
defer L.SetTop(stackPos)
L.Push(luaFilter)
if !L.IsFunction(-1) {
return ctx, line, nil
}
L.PushString(line)
err = L.Call(1, 1)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return ctx, line, nil
}
if !L.IsString(-1) {
return ctx, line, nil
}
newLine, err := L.ToString(-1)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return ctx, line, nil
}
return ctx, newLine, nil
}
type ScriptEngineForOptionImpl struct {
L lua.Lua
Sh *shell.Shell
}
func (this *ScriptEngineForOptionImpl) SetArg(args []string) {
if this.L != 0 {
setLuaArg(this.L, args)
}
}
func (this *ScriptEngineForOptionImpl) RunFile(fname string) ([]byte, error) {
if this.L != 0 {
return runLua(this.Sh, this.L, fname)
} else {
return nil, noLuaEngineErr
}
}
func (this *ScriptEngineForOptionImpl) RunString(code string) error {
if this.L == 0 {
return noLuaEngineErr
}
if err := this.L.LoadString(code); err != nil {
return err
}
this.L.Call(0, 0)
return nil
}
func optionParseLua(sh *shell.Shell, L lua.Lua) (func() error, error) {
e := &ScriptEngineForOptionImpl{Sh: sh, L: L}
return frame.OptionParse(sh, e)
}
func Main() error {
// for issue #155 & #158
lua.NG_UPVALUE_NAME["prompter"] = struct{}{}
// Lua extension
L, err := NewLua()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
} else {
defer L.Close()
}
sh := shell.New()
if L != 0 {
sh.SetTag(&luaWrapper{L})
}
defer sh.Close()
ctx := context.Background()
langEngine := func(fname string) ([]byte, error) {
if L != 0 {
return runLua(sh, L, fname)
} else {
return nil, nil
}
}
shellEngine := func(fname string) error {
fd, err := os.Open(fname)
if err != nil {
return err
}
stream1 := frame.NewCmdStreamFile(fd)
_, err = sh.Loop(ctx, stream1)
fd.Close()
if err == io.EOF {
return nil
} else {
return err
}
}
script, err := optionParseLua(sh, L)
if err != nil {
return err
}
if !isatty.IsTerminal(os.Stdin.Fd()) || script != nil {
frame.SilentMode = true
}
if !frame.OptionNorc {
if !frame.SilentMode {
fmt.Printf("Nihongo Yet Another GOing Shell %s-%s by %s",
frame.VersionOrStamp(),
runtime.GOARCH,
runtime.Version())
if L != 0 {
fmt.Print(" & Lua 5.3")
}
fmt.Println("\n(c) 2014-2018 NYAOS.ORG <http://www.nyaos.org>")
}
if err := frame.LoadScripts(shellEngine, langEngine); err != nil {
fmt.Fprintln(os.Stderr, err.Error())
}
}
if script != nil {
if err := script(); err != nil {
if err != io.EOF {
return err
} else {
return nil
}
}
}
var stream1 shell.Stream
if isatty.IsTerminal(os.Stdin.Fd()) {
constream := frame.NewCmdStreamConsole(func() (int, error) {
if L != 0 {
return printPrompt(L)
} else {
functions.Prompt(
[]interface{}{frame.Format2Prompt(os.Getenv("PROMPT"))})
return 0, nil
}
})
stream1 = constream
frame.DefaultHistory = constream.History
ctx = context.WithValue(ctx, history.PackageId, constream.History)
} else {
stream1 = frame.NewCmdStreamFile(os.Stdin)
}
if L != 0 {
ctx = context.WithValue(ctx, lua.PackageId, L)
sh.ForEver(ctx, &LuaFilterStream{stream1, L})
} else {
sh.ForEver(ctx, stream1)
}
return nil
}
|
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIDriver) DeepCopyInto(out *CSIDriver) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriver.
func (in *CSIDriver) DeepCopy() *CSIDriver {
if in == nil {
return nil
}
out := new(CSIDriver)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSIDriver) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIDriverInfo) DeepCopyInto(out *CSIDriverInfo) {
*out = *in
if in.TopologyKeys != nil {
in, out := &in.TopologyKeys, &out.TopologyKeys
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverInfo.
func (in *CSIDriverInfo) DeepCopy() *CSIDriverInfo {
if in == nil {
return nil
}
out := new(CSIDriverInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIDriverList) DeepCopyInto(out *CSIDriverList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CSIDriver, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverList.
func (in *CSIDriverList) DeepCopy() *CSIDriverList {
if in == nil {
return nil
}
out := new(CSIDriverList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSIDriverList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) {
*out = *in
if in.AttachRequired != nil {
in, out := &in.AttachRequired, &out.AttachRequired
*out = new(bool)
**out = **in
}
if in.PodInfoRequiredOnMount != nil {
in, out := &in.PodInfoRequiredOnMount, &out.PodInfoRequiredOnMount
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverSpec.
func (in *CSIDriverSpec) DeepCopy() *CSIDriverSpec {
if in == nil {
return nil
}
out := new(CSIDriverSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSINodeInfo) DeepCopyInto(out *CSINodeInfo) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.CSIDrivers != nil {
in, out := &in.CSIDrivers, &out.CSIDrivers
*out = make([]CSIDriverInfo, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeInfo.
func (in *CSINodeInfo) DeepCopy() *CSINodeInfo {
if in == nil {
return nil
}
out := new(CSINodeInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSINodeInfo) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSINodeInfoList) DeepCopyInto(out *CSINodeInfoList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CSINodeInfo, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeInfoList.
func (in *CSINodeInfoList) DeepCopy() *CSINodeInfoList {
if in == nil {
return nil
}
out := new(CSINodeInfoList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSINodeInfoList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
Generated code
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIDriver) DeepCopyInto(out *CSIDriver) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriver.
func (in *CSIDriver) DeepCopy() *CSIDriver {
if in == nil {
return nil
}
out := new(CSIDriver)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSIDriver) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIDriverInfo) DeepCopyInto(out *CSIDriverInfo) {
*out = *in
if in.TopologyKeys != nil {
in, out := &in.TopologyKeys, &out.TopologyKeys
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverInfo.
func (in *CSIDriverInfo) DeepCopy() *CSIDriverInfo {
if in == nil {
return nil
}
out := new(CSIDriverInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIDriverList) DeepCopyInto(out *CSIDriverList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CSIDriver, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverList.
func (in *CSIDriverList) DeepCopy() *CSIDriverList {
if in == nil {
return nil
}
out := new(CSIDriverList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSIDriverList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) {
*out = *in
if in.AttachRequired != nil {
in, out := &in.AttachRequired, &out.AttachRequired
*out = new(bool)
**out = **in
}
if in.PodInfoOnMountVersion != nil {
in, out := &in.PodInfoOnMountVersion, &out.PodInfoOnMountVersion
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverSpec.
func (in *CSIDriverSpec) DeepCopy() *CSIDriverSpec {
if in == nil {
return nil
}
out := new(CSIDriverSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSINodeInfo) DeepCopyInto(out *CSINodeInfo) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.CSIDrivers != nil {
in, out := &in.CSIDrivers, &out.CSIDrivers
*out = make([]CSIDriverInfo, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeInfo.
func (in *CSINodeInfo) DeepCopy() *CSINodeInfo {
if in == nil {
return nil
}
out := new(CSINodeInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSINodeInfo) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSINodeInfoList) DeepCopyInto(out *CSINodeInfoList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CSINodeInfo, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeInfoList.
func (in *CSINodeInfoList) DeepCopy() *CSINodeInfoList {
if in == nil {
return nil
}
out := new(CSINodeInfoList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSINodeInfoList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
|
package remoteexec
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strings"
"time"
helper "github.com/hashicorp/terraform/helper/ssh"
"github.com/hashicorp/terraform/terraform"
)
const (
// DefaultShebang is added at the top of the script file
DefaultShebang = "#!/bin/sh"
)
type ResourceProvisioner struct{}
func (p *ResourceProvisioner) Apply(
o terraform.UIOutput,
s *terraform.InstanceState,
c *terraform.ResourceConfig) error {
// Ensure the connection type is SSH
if err := helper.VerifySSH(s); err != nil {
return err
}
// Get the SSH configuration
conf, err := helper.ParseSSHConfig(s)
if err != nil {
return err
}
// Collect the scripts
scripts, err := p.collectScripts(c)
if err != nil {
return err
}
for _, s := range scripts {
defer s.Close()
}
// Copy and execute each script
if err := p.runScripts(conf, scripts); err != nil {
return err
}
return nil
}
func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) (ws []string, es []error) {
num := 0
for name := range c.Raw {
switch name {
case "scripts":
fallthrough
case "script":
fallthrough
case "inline":
num++
default:
es = append(es, fmt.Errorf("Unknown configuration '%s'", name))
}
}
if num != 1 {
es = append(es, fmt.Errorf("Must provide one of 'scripts', 'script' or 'inline' to remote-exec"))
}
return
}
// generateScript takes the configuration and creates a script to be executed
// from the inline configs
func (p *ResourceProvisioner) generateScript(c *terraform.ResourceConfig) (string, error) {
lines := []string{DefaultShebang}
command, ok := c.Config["inline"]
if ok {
switch cmd := command.(type) {
case string:
lines = append(lines, cmd)
case []string:
lines = append(lines, cmd...)
case []interface{}:
for _, l := range cmd {
lStr, ok := l.(string)
if ok {
lines = append(lines, lStr)
} else {
return "", fmt.Errorf("Unsupported 'inline' type! Must be string, or list of strings.")
}
}
default:
return "", fmt.Errorf("Unsupported 'inline' type! Must be string, or list of strings.")
}
}
lines = append(lines, "")
return strings.Join(lines, "\n"), nil
}
// collectScripts is used to collect all the scripts we need
// to execute in preparation for copying them.
func (p *ResourceProvisioner) collectScripts(c *terraform.ResourceConfig) ([]io.ReadCloser, error) {
// Check if inline
_, ok := c.Config["inline"]
if ok {
script, err := p.generateScript(c)
if err != nil {
return nil, err
}
rc := ioutil.NopCloser(bytes.NewReader([]byte(script)))
return []io.ReadCloser{rc}, nil
}
// Collect scripts
var scripts []string
s, ok := c.Config["script"]
if ok {
sStr, ok := s.(string)
if !ok {
return nil, fmt.Errorf("Unsupported 'script' type! Must be a string.")
}
scripts = append(scripts, sStr)
}
sl, ok := c.Config["scripts"]
if ok {
switch slt := sl.(type) {
case []string:
scripts = append(scripts, slt...)
case []interface{}:
for _, l := range slt {
lStr, ok := l.(string)
if ok {
scripts = append(scripts, lStr)
} else {
return nil, fmt.Errorf("Unsupported 'scripts' type! Must be list of strings.")
}
}
default:
return nil, fmt.Errorf("Unsupported 'scripts' type! Must be list of strings.")
}
}
// Open all the scripts
var fhs []io.ReadCloser
for _, s := range scripts {
fh, err := os.Open(s)
if err != nil {
for _, fh := range fhs {
fh.Close()
}
return nil, fmt.Errorf("Failed to open script '%s': %v", s, err)
}
fhs = append(fhs, fh)
}
// Done, return the file handles
return fhs, nil
}
// runScripts is used to copy and execute a set of scripts
func (p *ResourceProvisioner) runScripts(conf *helper.SSHConfig, scripts []io.ReadCloser) error {
// Get the SSH client config
config, err := helper.PrepareConfig(conf)
if err != nil {
return err
}
// Wait and retry until we establish the SSH connection
var comm *helper.SSHCommunicator
err = retryFunc(conf.TimeoutVal, func() error {
host := fmt.Sprintf("%s:%d", conf.Host, conf.Port)
comm, err = helper.New(host, config)
return err
})
if err != nil {
return err
}
for _, script := range scripts {
var cmd *helper.RemoteCmd
err := retryFunc(conf.TimeoutVal, func() error {
if err := comm.Upload(conf.ScriptPath, script); err != nil {
return fmt.Errorf("Failed to upload script: %v", err)
}
cmd = &helper.RemoteCmd{
Command: fmt.Sprintf("chmod 0777 %s", conf.ScriptPath),
}
if err := comm.Start(cmd); err != nil {
return fmt.Errorf(
"Error chmodding script file to 0777 in remote "+
"machine: %s", err)
}
cmd.Wait()
stdOutReader, stdOutWriter := io.Pipe()
stdErrReader, stdErrWriter := io.Pipe()
go streamLogs(stdOutReader, "stdout")
go streamLogs(stdErrReader, "stderr")
cmd = &helper.RemoteCmd{
Command: conf.ScriptPath,
Stdout: stdOutWriter,
Stderr: stdErrWriter,
}
if err := comm.Start(cmd); err != nil {
return fmt.Errorf("Error starting script: %v", err)
}
return nil
})
if err != nil {
return err
}
cmd.Wait()
if cmd.ExitStatus != 0 {
return fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus)
}
}
return nil
}
// retryFunc is used to retry a function for a given duration
func retryFunc(timeout time.Duration, f func() error) error {
finish := time.After(timeout)
for {
err := f()
if err == nil {
return nil
}
log.Printf("Retryable error: %v", err)
select {
case <-finish:
return err
case <-time.After(3 * time.Second):
}
}
}
// streamLogs is used to stream lines from stdout/stderr
// of a remote command to log output for users.
func streamLogs(r io.ReadCloser, name string) {
defer r.Close()
scanner := bufio.NewScanner(r)
for scanner.Scan() {
log.Printf("remote-exec: %s: %s", name, scanner.Text())
}
if err := scanner.Err(); err != nil {
return
}
}
provisioners/remote-exec: output
package remoteexec
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strings"
"time"
helper "github.com/hashicorp/terraform/helper/ssh"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/go-linereader"
)
const (
// DefaultShebang is added at the top of the script file
DefaultShebang = "#!/bin/sh"
)
type ResourceProvisioner struct{}
func (p *ResourceProvisioner) Apply(
o terraform.UIOutput,
s *terraform.InstanceState,
c *terraform.ResourceConfig) error {
// Ensure the connection type is SSH
if err := helper.VerifySSH(s); err != nil {
return err
}
// Get the SSH configuration
conf, err := helper.ParseSSHConfig(s)
if err != nil {
return err
}
// Collect the scripts
scripts, err := p.collectScripts(c)
if err != nil {
return err
}
for _, s := range scripts {
defer s.Close()
}
// Copy and execute each script
if err := p.runScripts(o, conf, scripts); err != nil {
return err
}
return nil
}
func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) (ws []string, es []error) {
num := 0
for name := range c.Raw {
switch name {
case "scripts":
fallthrough
case "script":
fallthrough
case "inline":
num++
default:
es = append(es, fmt.Errorf("Unknown configuration '%s'", name))
}
}
if num != 1 {
es = append(es, fmt.Errorf("Must provide one of 'scripts', 'script' or 'inline' to remote-exec"))
}
return
}
// generateScript takes the configuration and creates a script to be executed
// from the inline configs
func (p *ResourceProvisioner) generateScript(c *terraform.ResourceConfig) (string, error) {
lines := []string{DefaultShebang}
command, ok := c.Config["inline"]
if ok {
switch cmd := command.(type) {
case string:
lines = append(lines, cmd)
case []string:
lines = append(lines, cmd...)
case []interface{}:
for _, l := range cmd {
lStr, ok := l.(string)
if ok {
lines = append(lines, lStr)
} else {
return "", fmt.Errorf("Unsupported 'inline' type! Must be string, or list of strings.")
}
}
default:
return "", fmt.Errorf("Unsupported 'inline' type! Must be string, or list of strings.")
}
}
lines = append(lines, "")
return strings.Join(lines, "\n"), nil
}
// collectScripts is used to collect all the scripts we need
// to execute in preparation for copying them.
func (p *ResourceProvisioner) collectScripts(c *terraform.ResourceConfig) ([]io.ReadCloser, error) {
// Check if inline
_, ok := c.Config["inline"]
if ok {
script, err := p.generateScript(c)
if err != nil {
return nil, err
}
rc := ioutil.NopCloser(bytes.NewReader([]byte(script)))
return []io.ReadCloser{rc}, nil
}
// Collect scripts
var scripts []string
s, ok := c.Config["script"]
if ok {
sStr, ok := s.(string)
if !ok {
return nil, fmt.Errorf("Unsupported 'script' type! Must be a string.")
}
scripts = append(scripts, sStr)
}
sl, ok := c.Config["scripts"]
if ok {
switch slt := sl.(type) {
case []string:
scripts = append(scripts, slt...)
case []interface{}:
for _, l := range slt {
lStr, ok := l.(string)
if ok {
scripts = append(scripts, lStr)
} else {
return nil, fmt.Errorf("Unsupported 'scripts' type! Must be list of strings.")
}
}
default:
return nil, fmt.Errorf("Unsupported 'scripts' type! Must be list of strings.")
}
}
// Open all the scripts
var fhs []io.ReadCloser
for _, s := range scripts {
fh, err := os.Open(s)
if err != nil {
for _, fh := range fhs {
fh.Close()
}
return nil, fmt.Errorf("Failed to open script '%s': %v", s, err)
}
fhs = append(fhs, fh)
}
// Done, return the file handles
return fhs, nil
}
// runScripts is used to copy and execute a set of scripts
func (p *ResourceProvisioner) runScripts(
o terraform.UIOutput,
conf *helper.SSHConfig,
scripts []io.ReadCloser) error {
// Get the SSH client config
config, err := helper.PrepareConfig(conf)
if err != nil {
return err
}
o.Output(fmt.Sprintf(
"Connecting to remote host via SSH...\n"+
" Host: %s\n"+
" User: %s\n"+
" Password: %v\n"+
" Private key: %v",
conf.Host, conf.User,
conf.Password != "",
conf.KeyFile != ""))
// Wait and retry until we establish the SSH connection
var comm *helper.SSHCommunicator
err = retryFunc(conf.TimeoutVal, func() error {
host := fmt.Sprintf("%s:%d", conf.Host, conf.Port)
comm, err = helper.New(host, config)
if err != nil {
o.Output(fmt.Sprintf("Connection error, will retry: %s", err))
}
return err
})
if err != nil {
return err
}
o.Output("Connected! Executing scripts...")
for _, script := range scripts {
var cmd *helper.RemoteCmd
outR, outW := io.Pipe()
errR, errW := io.Pipe()
outDoneCh := make(chan struct{})
errDoneCh := make(chan struct{})
go p.copyOutput(o, outR, outDoneCh)
go p.copyOutput(o, errR, errDoneCh)
err := retryFunc(conf.TimeoutVal, func() error {
if err := comm.Upload(conf.ScriptPath, script); err != nil {
return fmt.Errorf("Failed to upload script: %v", err)
}
cmd = &helper.RemoteCmd{
Command: fmt.Sprintf("chmod 0777 %s", conf.ScriptPath),
}
if err := comm.Start(cmd); err != nil {
return fmt.Errorf(
"Error chmodding script file to 0777 in remote "+
"machine: %s", err)
}
cmd.Wait()
cmd = &helper.RemoteCmd{
Command: conf.ScriptPath,
Stdout: outW,
Stderr: errW,
}
if err := comm.Start(cmd); err != nil {
return fmt.Errorf("Error starting script: %v", err)
}
return nil
})
if err == nil {
cmd.Wait()
if cmd.ExitStatus != 0 {
err = fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus)
}
}
// Wait for output to clean up
outW.Close()
errW.Close()
<-outDoneCh
<-errDoneCh
// If we have an error, return it out now that we've cleaned up
if err != nil {
return err
}
}
return nil
}
func (p *ResourceProvisioner) copyOutput(
o terraform.UIOutput, r io.Reader, doneCh chan<- struct{}) {
defer close(doneCh)
lr := linereader.New(r)
for line := range lr.Ch {
o.Output(line)
}
}
// retryFunc is used to retry a function for a given duration
func retryFunc(timeout time.Duration, f func() error) error {
finish := time.After(timeout)
for {
err := f()
if err == nil {
return nil
}
log.Printf("Retryable error: %v", err)
select {
case <-finish:
return err
case <-time.After(3 * time.Second):
}
}
}
|
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The coordinator runs on GCE and coordinates builds in Docker containers.
package main // import "golang.org/x/tools/dashboard/coordinator"
import (
"bytes"
"crypto/hmac"
"crypto/md5"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"sort"
"strings"
"sync"
"time"
)
var (
masterKeyFile = flag.String("masterkey", "", "Path to builder master key. Else fetched using GCE project attribute 'builder-master-key'.")
maxBuilds = flag.Int("maxbuilds", 6, "Max concurrent builds")
// Debug flags:
addTemp = flag.Bool("temp", false, "Append -temp to all builders.")
just = flag.String("just", "", "If non-empty, run single build in the foreground. Requires rev.")
rev = flag.String("rev", "", "Revision to build.")
)
var (
startTime = time.Now()
builders = map[string]buildConfig{} // populated once at startup
watchers = map[string]watchConfig{} // populated once at startup
donec = make(chan builderRev) // reports of finished builders
statusMu sync.Mutex
status = map[builderRev]*buildStatus{}
)
type imageInfo struct {
url string // of tar file
mu sync.Mutex
lastMod string
}
var images = map[string]*imageInfo{
"go-commit-watcher": {url: "https://storage.googleapis.com/go-builder-data/docker-commit-watcher.tar.gz"},
"gobuilders/linux-x86-base": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.base.tar.gz"},
"gobuilders/linux-x86-clang": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.clang.tar.gz"},
"gobuilders/linux-x86-gccgo": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.gccgo.tar.gz"},
"gobuilders/linux-x86-nacl": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.nacl.tar.gz"},
"gobuilders/linux-x86-sid": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.sid.tar.gz"},
}
type buildConfig struct {
name string // "linux-amd64-race"
image string // Docker image to use to build
cmd string // optional -cmd flag (relative to go/src/)
env []string // extra environment ("key=value") pairs
dashURL string // url of the build dashboard
tool string // the tool this configuration is for
}
type watchConfig struct {
repo string // "https://go.googlesource.com/go"
dash string // "https://build.golang.org/" (must end in /)
interval time.Duration // Polling interval
}
func main() {
flag.Parse()
addBuilder(buildConfig{name: "linux-386"})
addBuilder(buildConfig{name: "linux-386-387", env: []string{"GO386=387"}})
addBuilder(buildConfig{name: "linux-amd64"})
addBuilder(buildConfig{name: "linux-amd64-nocgo", env: []string{"CGO_ENABLED=0", "USER=root"}})
addBuilder(buildConfig{name: "linux-amd64-noopt", env: []string{"GO_GCFLAGS=-N -l"}})
addBuilder(buildConfig{name: "linux-amd64-race"})
addBuilder(buildConfig{name: "nacl-386"})
addBuilder(buildConfig{name: "nacl-amd64p32"})
addBuilder(buildConfig{
name: "linux-amd64-gccgo",
image: "gobuilders/linux-x86-gccgo",
cmd: "make RUNTESTFLAGS=\"--target_board=unix/-m64\" check-go -j16",
dashURL: "https://build.golang.org/gccgo",
tool: "gccgo",
})
addBuilder(buildConfig{
name: "linux-386-gccgo",
image: "gobuilders/linux-x86-gccgo",
cmd: "make RUNTESTFLAGS=\"--target_board=unix/-m32\" check-go -j16",
dashURL: "https://build.golang.org/gccgo",
tool: "gccgo",
})
addBuilder(buildConfig{name: "linux-386-sid", image: "gobuilders/linux-x86-sid"})
addBuilder(buildConfig{name: "linux-amd64-sid", image: "gobuilders/linux-x86-sid"})
addBuilder(buildConfig{name: "linux-386-clang", image: "gobuilders/linux-x86-clang"})
addBuilder(buildConfig{name: "linux-amd64-clang", image: "gobuilders/linux-x86-clang"})
addWatcher(watchConfig{repo: "https://go.googlesource.com/go", dash: "https://build.golang.org/"})
// TODO(adg,cmang): fix gccgo watcher
// addWatcher(watchConfig{repo: "https://code.google.com/p/gofrontend", dash: "https://build.golang.org/gccgo/"})
if (*just != "") != (*rev != "") {
log.Fatalf("--just and --rev must be used together")
}
if *just != "" {
conf, ok := builders[*just]
if !ok {
log.Fatalf("unknown builder %q", *just)
}
cmd := exec.Command("docker", append([]string{"run"}, conf.dockerRunArgs(*rev)...)...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatalf("Build failed: %v", err)
}
return
}
http.HandleFunc("/", handleStatus)
http.HandleFunc("/logs", handleLogs)
go http.ListenAndServe(":80", nil)
for _, watcher := range watchers {
if err := startWatching(watchers[watcher.repo]); err != nil {
log.Printf("Error starting watcher for %s: %v", watcher.repo, err)
}
}
workc := make(chan builderRev)
for name, builder := range builders {
go findWorkLoop(name, builder.dashURL, workc)
}
ticker := time.NewTicker(1 * time.Minute)
for {
select {
case work := <-workc:
log.Printf("workc received %+v; len(status) = %v, maxBuilds = %v; cur = %p", work, len(status), *maxBuilds, status[work])
mayBuild := mayBuildRev(work)
if mayBuild {
if numBuilds() > *maxBuilds {
mayBuild = false
}
}
if mayBuild {
if st, err := startBuilding(builders[work.name], work.rev); err == nil {
setStatus(work, st)
log.Printf("%v now building in %v", work, st.container)
} else {
log.Printf("Error starting to build %v: %v", work, err)
}
}
case done := <-donec:
log.Printf("%v done", done)
setStatus(done, nil)
case <-ticker.C:
if numCurrentBuilds() == 0 && time.Now().After(startTime.Add(10*time.Minute)) {
// TODO: halt the whole machine to kill the VM or something
}
}
}
}
func numCurrentBuilds() int {
statusMu.Lock()
defer statusMu.Unlock()
return len(status)
}
func mayBuildRev(work builderRev) bool {
statusMu.Lock()
defer statusMu.Unlock()
return len(status) < *maxBuilds && status[work] == nil
}
func setStatus(work builderRev, st *buildStatus) {
statusMu.Lock()
defer statusMu.Unlock()
if st == nil {
delete(status, work)
} else {
status[work] = st
}
}
func getStatus(work builderRev) *buildStatus {
statusMu.Lock()
defer statusMu.Unlock()
return status[work]
}
type byAge []*buildStatus
func (s byAge) Len() int { return len(s) }
func (s byAge) Less(i, j int) bool { return s[i].start.Before(s[j].start) }
func (s byAge) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func handleStatus(w http.ResponseWriter, r *http.Request) {
var active []*buildStatus
statusMu.Lock()
for _, st := range status {
active = append(active, st)
}
statusMu.Unlock()
fmt.Fprintf(w, "<html><body><h1>Go build coordinator</h1>%d of max %d builds running:<p><pre>", len(status), *maxBuilds)
sort.Sort(byAge(active))
for _, st := range active {
fmt.Fprintf(w, "%-22s hg %s in container <a href='/logs?name=%s&rev=%s'>%s</a>, %v ago\n", st.name, st.rev, st.name, st.rev,
st.container, time.Now().Sub(st.start))
}
fmt.Fprintf(w, "</pre></body></html>")
}
func handleLogs(w http.ResponseWriter, r *http.Request) {
st := getStatus(builderRev{r.FormValue("name"), r.FormValue("rev")})
if st == nil {
fmt.Fprintf(w, "<html><body><h1>not building</h1>")
return
}
out, err := exec.Command("docker", "logs", st.container).CombinedOutput()
if err != nil {
log.Print(err)
http.Error(w, "Error fetching logs. Already finished?", 500)
return
}
key := builderKey(st.name)
logs := strings.Replace(string(out), key, "BUILDERKEY", -1)
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
io.WriteString(w, logs)
}
func findWorkLoop(builderName, dashURL string, work chan<- builderRev) {
// TODO: make this better
for {
rev, err := findWork(builderName, dashURL)
if err != nil {
log.Printf("Finding work for %s: %v", builderName, err)
} else if rev != "" {
work <- builderRev{builderName, rev}
}
time.Sleep(60 * time.Second)
}
}
func findWork(builderName, dashURL string) (rev string, err error) {
var jres struct {
Response struct {
Kind string
Data struct {
Hash string
PerfResults []string
}
}
}
res, err := http.Get(dashURL + "/todo?builder=" + builderName + "&kind=build-go-commit")
if err != nil {
return
}
defer res.Body.Close()
if res.StatusCode != 200 {
return "", fmt.Errorf("unexpected http status %d", res.StatusCode)
}
err = json.NewDecoder(res.Body).Decode(&jres)
if jres.Response.Kind == "build-go-commit" {
rev = jres.Response.Data.Hash
}
return rev, err
}
type builderRev struct {
name, rev string
}
// returns the part after "docker run"
func (conf buildConfig) dockerRunArgs(rev string) (args []string) {
if key := builderKey(conf.name); key != "" {
tmpKey := "/tmp/" + conf.name + ".buildkey"
if _, err := os.Stat(tmpKey); err != nil {
if err := ioutil.WriteFile(tmpKey, []byte(key), 0600); err != nil {
log.Fatal(err)
}
}
// Images may look for .gobuildkey in / or /root, so provide both.
// TODO(adg): fix images that look in the wrong place.
args = append(args, "-v", tmpKey+":/.gobuildkey")
args = append(args, "-v", tmpKey+":/root/.gobuildkey")
}
for _, pair := range conf.env {
args = append(args, "-e", pair)
}
args = append(args,
conf.image,
"/usr/local/bin/builder",
"-rev="+rev,
"-dashboard="+conf.dashURL,
"-tool="+conf.tool,
"-buildroot=/",
"-v",
)
if conf.cmd != "" {
args = append(args, "-cmd", conf.cmd)
}
args = append(args, conf.name)
return
}
func addBuilder(c buildConfig) {
if c.name == "" {
panic("empty name")
}
if *addTemp {
c.name += "-temp"
}
if _, dup := builders[c.name]; dup {
panic("dup name")
}
if c.dashURL == "" {
c.dashURL = "https://build.golang.org"
}
if c.tool == "" {
c.tool = "go"
}
if strings.HasPrefix(c.name, "nacl-") {
if c.image == "" {
c.image = "gobuilders/linux-x86-nacl"
}
if c.cmd == "" {
c.cmd = "/usr/local/bin/build-command.pl"
}
}
if strings.HasPrefix(c.name, "linux-") && c.image == "" {
c.image = "gobuilders/linux-x86-base"
}
if c.image == "" {
panic("empty image")
}
builders[c.name] = c
}
// returns the part after "docker run"
func (conf watchConfig) dockerRunArgs() (args []string) {
log.Printf("Running watcher with master key %q", masterKey())
if key := masterKey(); len(key) > 0 {
tmpKey := "/tmp/watcher.buildkey"
if _, err := os.Stat(tmpKey); err != nil {
if err := ioutil.WriteFile(tmpKey, key, 0600); err != nil {
log.Fatal(err)
}
}
// Images may look for .gobuildkey in / or /root, so provide both.
// TODO(adg): fix images that look in the wrong place.
args = append(args, "-v", tmpKey+":/.gobuildkey")
args = append(args, "-v", tmpKey+":/root/.gobuildkey")
}
args = append(args,
"go-commit-watcher",
"/usr/local/bin/watcher",
"-repo="+conf.repo,
"-dash="+conf.dash,
"-poll="+conf.interval.String(),
)
return
}
func addWatcher(c watchConfig) {
if c.repo == "" {
c.repo = "https://go.googlesource.com/go"
}
if c.dash == "" {
c.dash = "https://build.golang.org/"
}
if c.interval == 0 {
c.interval = 10 * time.Second
}
watchers[c.repo] = c
}
func condUpdateImage(img string) error {
ii := images[img]
if ii == nil {
log.Fatalf("Image %q not described.", img)
}
ii.mu.Lock()
defer ii.mu.Unlock()
res, err := http.Head(ii.url)
if err != nil {
return fmt.Errorf("Error checking %s: %v", ii.url, err)
}
if res.StatusCode != 200 {
return fmt.Errorf("Error checking %s: %v", ii.url, res.Status)
}
if res.Header.Get("Last-Modified") == ii.lastMod {
return nil
}
res, err = http.Get(ii.url)
if err != nil || res.StatusCode != 200 {
return fmt.Errorf("Get after Head failed for %s: %v, %v", ii.url, err, res)
}
defer res.Body.Close()
log.Printf("Running: docker load of %s\n", ii.url)
cmd := exec.Command("docker", "load")
cmd.Stdin = res.Body
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
if cmd.Run(); err != nil {
log.Printf("Failed to pull latest %s from %s and pipe into docker load: %v, %s", img, ii.url, err, out.Bytes())
return err
}
ii.lastMod = res.Header.Get("Last-Modified")
return nil
}
// numBuilds finds the number of go builder instances currently running.
func numBuilds() int {
out, _ := exec.Command("docker", "ps").Output()
numBuilds := 0
ps := bytes.Split(out, []byte("\n"))
for _, p := range ps {
if bytes.HasPrefix(p, []byte("gobuilders/")) {
numBuilds++
}
}
log.Printf("num current docker builds: %d", numBuilds)
return numBuilds
}
func startBuilding(conf buildConfig, rev string) (*buildStatus, error) {
if err := condUpdateImage(conf.image); err != nil {
log.Printf("Failed to setup container for %v %v: %v", conf.name, rev, err)
return nil, err
}
cmd := exec.Command("docker", append([]string{"run", "-d"}, conf.dockerRunArgs(rev)...)...)
all, err := cmd.CombinedOutput()
log.Printf("Docker run for %v %v = err:%v, output:%s", conf.name, rev, err, all)
if err != nil {
return nil, err
}
container := strings.TrimSpace(string(all))
go func() {
all, err := exec.Command("docker", "wait", container).CombinedOutput()
log.Printf("docker wait %s/%s: %v, %s", container, rev, err, strings.TrimSpace(string(all)))
donec <- builderRev{conf.name, rev}
exec.Command("docker", "rm", container).Run()
}()
return &buildStatus{
builderRev: builderRev{
name: conf.name,
rev: rev,
},
container: container,
start: time.Now(),
}, nil
}
type buildStatus struct {
builderRev
container string
start time.Time
mu sync.Mutex
// ...
}
func startWatching(conf watchConfig) (err error) {
defer func() {
if err != nil {
restartWatcherSoon(conf)
}
}()
log.Printf("Starting watcher for %v", conf.repo)
if err := condUpdateImage("go-commit-watcher"); err != nil {
log.Printf("Failed to setup container for commit watcher: %v", err)
return err
}
cmd := exec.Command("docker", append([]string{"run", "-d"}, conf.dockerRunArgs()...)...)
all, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Docker run for commit watcher = err:%v, output: %s", err, all)
return err
}
container := strings.TrimSpace(string(all))
// Start a goroutine to wait for the watcher to die.
go func() {
exec.Command("docker", "wait", container).Run()
exec.Command("docker", "rm", "-v", container).Run()
log.Printf("Watcher crashed. Restarting soon.")
restartWatcherSoon(conf)
}()
return nil
}
func restartWatcherSoon(conf watchConfig) {
time.AfterFunc(30*time.Second, func() {
startWatching(conf)
})
}
func builderKey(builder string) string {
master := masterKey()
if len(master) == 0 {
return ""
}
h := hmac.New(md5.New, master)
io.WriteString(h, builder)
return fmt.Sprintf("%x", h.Sum(nil))
}
func masterKey() []byte {
keyOnce.Do(loadKey)
return masterKeyCache
}
var (
keyOnce sync.Once
masterKeyCache []byte
)
func loadKey() {
if *masterKeyFile != "" {
b, err := ioutil.ReadFile(*masterKeyFile)
if err != nil {
log.Fatal(err)
}
masterKeyCache = bytes.TrimSpace(b)
return
}
req, _ := http.NewRequest("GET", "http://metadata.google.internal/computeMetadata/v1/project/attributes/builder-master-key", nil)
req.Header.Set("Metadata-Flavor", "Google")
res, err := http.DefaultClient.Do(req)
if err != nil {
log.Fatal("No builder master key available")
}
defer res.Body.Close()
if res.StatusCode != 200 {
log.Fatalf("No builder-master-key project attribute available.")
}
slurp, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Fatal(err)
}
masterKeyCache = bytes.TrimSpace(slurp)
}
dashboard/coordinator: run background goroutine cleaning old docker containers
I'm tired of figuring out what isn't cleaning up after itself, so keep
a background goroutine that looks at old containers and deletes them
as a backup measure. Verified it works by creating some dummy containers on
the machine.
Also adds df output to the HTML status page.
Change-Id: I23adc22872def882b3b9b3a4ec730017899bb966
Reviewed-on: https://go-review.googlesource.com/1537
Reviewed-by: Andrew Gerrand <395a7d33bec8475c9b83b7d440f141bcbd994aa5@golang.org>
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The coordinator runs on GCE and coordinates builds in Docker containers.
package main // import "golang.org/x/tools/dashboard/coordinator"
import (
"bytes"
"crypto/hmac"
"crypto/md5"
"encoding/json"
"flag"
"fmt"
"html"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"sort"
"strings"
"sync"
"time"
)
var (
masterKeyFile = flag.String("masterkey", "", "Path to builder master key. Else fetched using GCE project attribute 'builder-master-key'.")
maxBuilds = flag.Int("maxbuilds", 6, "Max concurrent builds")
// Debug flags:
addTemp = flag.Bool("temp", false, "Append -temp to all builders.")
just = flag.String("just", "", "If non-empty, run single build in the foreground. Requires rev.")
rev = flag.String("rev", "", "Revision to build.")
)
var (
startTime = time.Now()
builders = map[string]buildConfig{} // populated once at startup
watchers = map[string]watchConfig{} // populated once at startup
donec = make(chan builderRev) // reports of finished builders
statusMu sync.Mutex
status = map[builderRev]*buildStatus{}
)
type imageInfo struct {
url string // of tar file
mu sync.Mutex
lastMod string
}
var images = map[string]*imageInfo{
"go-commit-watcher": {url: "https://storage.googleapis.com/go-builder-data/docker-commit-watcher.tar.gz"},
"gobuilders/linux-x86-base": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.base.tar.gz"},
"gobuilders/linux-x86-clang": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.clang.tar.gz"},
"gobuilders/linux-x86-gccgo": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.gccgo.tar.gz"},
"gobuilders/linux-x86-nacl": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.nacl.tar.gz"},
"gobuilders/linux-x86-sid": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.sid.tar.gz"},
}
type buildConfig struct {
name string // "linux-amd64-race"
image string // Docker image to use to build
cmd string // optional -cmd flag (relative to go/src/)
env []string // extra environment ("key=value") pairs
dashURL string // url of the build dashboard
tool string // the tool this configuration is for
}
type watchConfig struct {
repo string // "https://go.googlesource.com/go"
dash string // "https://build.golang.org/" (must end in /)
interval time.Duration // Polling interval
}
func main() {
flag.Parse()
addBuilder(buildConfig{name: "linux-386"})
addBuilder(buildConfig{name: "linux-386-387", env: []string{"GO386=387"}})
addBuilder(buildConfig{name: "linux-amd64"})
addBuilder(buildConfig{name: "linux-amd64-nocgo", env: []string{"CGO_ENABLED=0", "USER=root"}})
addBuilder(buildConfig{name: "linux-amd64-noopt", env: []string{"GO_GCFLAGS=-N -l"}})
addBuilder(buildConfig{name: "linux-amd64-race"})
addBuilder(buildConfig{name: "nacl-386"})
addBuilder(buildConfig{name: "nacl-amd64p32"})
addBuilder(buildConfig{
name: "linux-amd64-gccgo",
image: "gobuilders/linux-x86-gccgo",
cmd: "make RUNTESTFLAGS=\"--target_board=unix/-m64\" check-go -j16",
dashURL: "https://build.golang.org/gccgo",
tool: "gccgo",
})
addBuilder(buildConfig{
name: "linux-386-gccgo",
image: "gobuilders/linux-x86-gccgo",
cmd: "make RUNTESTFLAGS=\"--target_board=unix/-m32\" check-go -j16",
dashURL: "https://build.golang.org/gccgo",
tool: "gccgo",
})
addBuilder(buildConfig{name: "linux-386-sid", image: "gobuilders/linux-x86-sid"})
addBuilder(buildConfig{name: "linux-amd64-sid", image: "gobuilders/linux-x86-sid"})
addBuilder(buildConfig{name: "linux-386-clang", image: "gobuilders/linux-x86-clang"})
addBuilder(buildConfig{name: "linux-amd64-clang", image: "gobuilders/linux-x86-clang"})
addWatcher(watchConfig{repo: "https://go.googlesource.com/go", dash: "https://build.golang.org/"})
// TODO(adg,cmang): fix gccgo watcher
// addWatcher(watchConfig{repo: "https://code.google.com/p/gofrontend", dash: "https://build.golang.org/gccgo/"})
if (*just != "") != (*rev != "") {
log.Fatalf("--just and --rev must be used together")
}
if *just != "" {
conf, ok := builders[*just]
if !ok {
log.Fatalf("unknown builder %q", *just)
}
cmd := exec.Command("docker", append([]string{"run"}, conf.dockerRunArgs(*rev)...)...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatalf("Build failed: %v", err)
}
return
}
http.HandleFunc("/", handleStatus)
http.HandleFunc("/logs", handleLogs)
go http.ListenAndServe(":80", nil)
go cleanUpOldContainers()
for _, watcher := range watchers {
if err := startWatching(watchers[watcher.repo]); err != nil {
log.Printf("Error starting watcher for %s: %v", watcher.repo, err)
}
}
workc := make(chan builderRev)
for name, builder := range builders {
go findWorkLoop(name, builder.dashURL, workc)
}
ticker := time.NewTicker(1 * time.Minute)
for {
select {
case work := <-workc:
log.Printf("workc received %+v; len(status) = %v, maxBuilds = %v; cur = %p", work, len(status), *maxBuilds, status[work])
mayBuild := mayBuildRev(work)
if mayBuild {
if numBuilds() > *maxBuilds {
mayBuild = false
}
}
if mayBuild {
if st, err := startBuilding(builders[work.name], work.rev); err == nil {
setStatus(work, st)
log.Printf("%v now building in %v", work, st.container)
} else {
log.Printf("Error starting to build %v: %v", work, err)
}
}
case done := <-donec:
log.Printf("%v done", done)
setStatus(done, nil)
case <-ticker.C:
if numCurrentBuilds() == 0 && time.Now().After(startTime.Add(10*time.Minute)) {
// TODO: halt the whole machine to kill the VM or something
}
}
}
}
func numCurrentBuilds() int {
statusMu.Lock()
defer statusMu.Unlock()
return len(status)
}
func mayBuildRev(work builderRev) bool {
statusMu.Lock()
defer statusMu.Unlock()
return len(status) < *maxBuilds && status[work] == nil
}
func setStatus(work builderRev, st *buildStatus) {
statusMu.Lock()
defer statusMu.Unlock()
if st == nil {
delete(status, work)
} else {
status[work] = st
}
}
func getStatus(work builderRev) *buildStatus {
statusMu.Lock()
defer statusMu.Unlock()
return status[work]
}
type byAge []*buildStatus
func (s byAge) Len() int { return len(s) }
func (s byAge) Less(i, j int) bool { return s[i].start.Before(s[j].start) }
func (s byAge) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func handleStatus(w http.ResponseWriter, r *http.Request) {
var active []*buildStatus
statusMu.Lock()
for _, st := range status {
active = append(active, st)
}
statusMu.Unlock()
fmt.Fprintf(w, "<html><body><h1>Go build coordinator</h1>%d of max %d builds running:<p><pre>", len(status), *maxBuilds)
sort.Sort(byAge(active))
for _, st := range active {
fmt.Fprintf(w, "%-22s hg %s in container <a href='/logs?name=%s&rev=%s'>%s</a>, %v ago\n", st.name, st.rev, st.name, st.rev,
st.container, time.Now().Sub(st.start))
}
fmt.Fprintf(w, "</pre><h2>disk space</h2><pre>%s</pre></body></html>", html.EscapeString(diskFree()))
}
func diskFree() string {
out, _ := exec.Command("df", "-h").Output()
return string(out)
}
func handleLogs(w http.ResponseWriter, r *http.Request) {
st := getStatus(builderRev{r.FormValue("name"), r.FormValue("rev")})
if st == nil {
fmt.Fprintf(w, "<html><body><h1>not building</h1>")
return
}
out, err := exec.Command("docker", "logs", st.container).CombinedOutput()
if err != nil {
log.Print(err)
http.Error(w, "Error fetching logs. Already finished?", 500)
return
}
key := builderKey(st.name)
logs := strings.Replace(string(out), key, "BUILDERKEY", -1)
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
io.WriteString(w, logs)
}
func findWorkLoop(builderName, dashURL string, work chan<- builderRev) {
// TODO: make this better
for {
rev, err := findWork(builderName, dashURL)
if err != nil {
log.Printf("Finding work for %s: %v", builderName, err)
} else if rev != "" {
work <- builderRev{builderName, rev}
}
time.Sleep(60 * time.Second)
}
}
func findWork(builderName, dashURL string) (rev string, err error) {
var jres struct {
Response struct {
Kind string
Data struct {
Hash string
PerfResults []string
}
}
}
res, err := http.Get(dashURL + "/todo?builder=" + builderName + "&kind=build-go-commit")
if err != nil {
return
}
defer res.Body.Close()
if res.StatusCode != 200 {
return "", fmt.Errorf("unexpected http status %d", res.StatusCode)
}
err = json.NewDecoder(res.Body).Decode(&jres)
if jres.Response.Kind == "build-go-commit" {
rev = jres.Response.Data.Hash
}
return rev, err
}
type builderRev struct {
name, rev string
}
// returns the part after "docker run"
func (conf buildConfig) dockerRunArgs(rev string) (args []string) {
if key := builderKey(conf.name); key != "" {
tmpKey := "/tmp/" + conf.name + ".buildkey"
if _, err := os.Stat(tmpKey); err != nil {
if err := ioutil.WriteFile(tmpKey, []byte(key), 0600); err != nil {
log.Fatal(err)
}
}
// Images may look for .gobuildkey in / or /root, so provide both.
// TODO(adg): fix images that look in the wrong place.
args = append(args, "-v", tmpKey+":/.gobuildkey")
args = append(args, "-v", tmpKey+":/root/.gobuildkey")
}
for _, pair := range conf.env {
args = append(args, "-e", pair)
}
args = append(args,
conf.image,
"/usr/local/bin/builder",
"-rev="+rev,
"-dashboard="+conf.dashURL,
"-tool="+conf.tool,
"-buildroot=/",
"-v",
)
if conf.cmd != "" {
args = append(args, "-cmd", conf.cmd)
}
args = append(args, conf.name)
return
}
func addBuilder(c buildConfig) {
if c.name == "" {
panic("empty name")
}
if *addTemp {
c.name += "-temp"
}
if _, dup := builders[c.name]; dup {
panic("dup name")
}
if c.dashURL == "" {
c.dashURL = "https://build.golang.org"
}
if c.tool == "" {
c.tool = "go"
}
if strings.HasPrefix(c.name, "nacl-") {
if c.image == "" {
c.image = "gobuilders/linux-x86-nacl"
}
if c.cmd == "" {
c.cmd = "/usr/local/bin/build-command.pl"
}
}
if strings.HasPrefix(c.name, "linux-") && c.image == "" {
c.image = "gobuilders/linux-x86-base"
}
if c.image == "" {
panic("empty image")
}
builders[c.name] = c
}
// returns the part after "docker run"
func (conf watchConfig) dockerRunArgs() (args []string) {
log.Printf("Running watcher with master key %q", masterKey())
if key := masterKey(); len(key) > 0 {
tmpKey := "/tmp/watcher.buildkey"
if _, err := os.Stat(tmpKey); err != nil {
if err := ioutil.WriteFile(tmpKey, key, 0600); err != nil {
log.Fatal(err)
}
}
// Images may look for .gobuildkey in / or /root, so provide both.
// TODO(adg): fix images that look in the wrong place.
args = append(args, "-v", tmpKey+":/.gobuildkey")
args = append(args, "-v", tmpKey+":/root/.gobuildkey")
}
args = append(args,
"go-commit-watcher",
"/usr/local/bin/watcher",
"-repo="+conf.repo,
"-dash="+conf.dash,
"-poll="+conf.interval.String(),
)
return
}
func addWatcher(c watchConfig) {
if c.repo == "" {
c.repo = "https://go.googlesource.com/go"
}
if c.dash == "" {
c.dash = "https://build.golang.org/"
}
if c.interval == 0 {
c.interval = 10 * time.Second
}
watchers[c.repo] = c
}
func condUpdateImage(img string) error {
ii := images[img]
if ii == nil {
log.Fatalf("Image %q not described.", img)
}
ii.mu.Lock()
defer ii.mu.Unlock()
res, err := http.Head(ii.url)
if err != nil {
return fmt.Errorf("Error checking %s: %v", ii.url, err)
}
if res.StatusCode != 200 {
return fmt.Errorf("Error checking %s: %v", ii.url, res.Status)
}
if res.Header.Get("Last-Modified") == ii.lastMod {
return nil
}
res, err = http.Get(ii.url)
if err != nil || res.StatusCode != 200 {
return fmt.Errorf("Get after Head failed for %s: %v, %v", ii.url, err, res)
}
defer res.Body.Close()
log.Printf("Running: docker load of %s\n", ii.url)
cmd := exec.Command("docker", "load")
cmd.Stdin = res.Body
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
if cmd.Run(); err != nil {
log.Printf("Failed to pull latest %s from %s and pipe into docker load: %v, %s", img, ii.url, err, out.Bytes())
return err
}
ii.lastMod = res.Header.Get("Last-Modified")
return nil
}
// numBuilds finds the number of go builder instances currently running.
func numBuilds() int {
out, _ := exec.Command("docker", "ps").Output()
numBuilds := 0
ps := bytes.Split(out, []byte("\n"))
for _, p := range ps {
if bytes.HasPrefix(p, []byte("gobuilders/")) {
numBuilds++
}
}
log.Printf("num current docker builds: %d", numBuilds)
return numBuilds
}
func startBuilding(conf buildConfig, rev string) (*buildStatus, error) {
if err := condUpdateImage(conf.image); err != nil {
log.Printf("Failed to setup container for %v %v: %v", conf.name, rev, err)
return nil, err
}
cmd := exec.Command("docker", append([]string{"run", "-d"}, conf.dockerRunArgs(rev)...)...)
all, err := cmd.CombinedOutput()
log.Printf("Docker run for %v %v = err:%v, output:%s", conf.name, rev, err, all)
if err != nil {
return nil, err
}
container := strings.TrimSpace(string(all))
go func() {
all, err := exec.Command("docker", "wait", container).CombinedOutput()
log.Printf("docker wait %s/%s: %v, %s", container, rev, err, strings.TrimSpace(string(all)))
donec <- builderRev{conf.name, rev}
exec.Command("docker", "rm", container).Run()
}()
return &buildStatus{
builderRev: builderRev{
name: conf.name,
rev: rev,
},
container: container,
start: time.Now(),
}, nil
}
type buildStatus struct {
builderRev
container string
start time.Time
mu sync.Mutex
// ...
}
func startWatching(conf watchConfig) (err error) {
defer func() {
if err != nil {
restartWatcherSoon(conf)
}
}()
log.Printf("Starting watcher for %v", conf.repo)
if err := condUpdateImage("go-commit-watcher"); err != nil {
log.Printf("Failed to setup container for commit watcher: %v", err)
return err
}
cmd := exec.Command("docker", append([]string{"run", "-d"}, conf.dockerRunArgs()...)...)
all, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Docker run for commit watcher = err:%v, output: %s", err, all)
return err
}
container := strings.TrimSpace(string(all))
// Start a goroutine to wait for the watcher to die.
go func() {
exec.Command("docker", "wait", container).Run()
exec.Command("docker", "rm", "-v", container).Run()
log.Printf("Watcher crashed. Restarting soon.")
restartWatcherSoon(conf)
}()
return nil
}
func restartWatcherSoon(conf watchConfig) {
time.AfterFunc(30*time.Second, func() {
startWatching(conf)
})
}
func builderKey(builder string) string {
master := masterKey()
if len(master) == 0 {
return ""
}
h := hmac.New(md5.New, master)
io.WriteString(h, builder)
return fmt.Sprintf("%x", h.Sum(nil))
}
func masterKey() []byte {
keyOnce.Do(loadKey)
return masterKeyCache
}
var (
keyOnce sync.Once
masterKeyCache []byte
)
func loadKey() {
if *masterKeyFile != "" {
b, err := ioutil.ReadFile(*masterKeyFile)
if err != nil {
log.Fatal(err)
}
masterKeyCache = bytes.TrimSpace(b)
return
}
req, _ := http.NewRequest("GET", "http://metadata.google.internal/computeMetadata/v1/project/attributes/builder-master-key", nil)
req.Header.Set("Metadata-Flavor", "Google")
res, err := http.DefaultClient.Do(req)
if err != nil {
log.Fatal("No builder master key available")
}
defer res.Body.Close()
if res.StatusCode != 200 {
log.Fatalf("No builder-master-key project attribute available.")
}
slurp, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Fatal(err)
}
masterKeyCache = bytes.TrimSpace(slurp)
}
func cleanUpOldContainers() {
for {
for _, cid := range oldContainers() {
log.Printf("Cleaning old container %v", cid)
exec.Command("docker", "rm", "-v", cid).Run()
}
time.Sleep(30 * time.Second)
}
}
func oldContainers() []string {
out, _ := exec.Command("docker", "ps", "-a", "--filter=status=exited", "--no-trunc", "-q").Output()
return strings.Fields(string(out))
}
|
package middlewares
import "github.com/gin-gonic/gin"
// ErrorWriter writes last error into response body if not written yet
func ErrorWriter() gin.HandlerFunc {
return func(c *gin.Context) {
c.Next()
if c.Writer.Written() {
return
}
if err := c.Errors.ByType(gin.ErrorTypeAny).Last(); err != nil {
c.JSON(-1, err)
}
}
}
Remove custom error logger
|
// Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package m3
import (
"fmt"
"io"
"math"
"os"
"runtime"
"sort"
"strconv"
"sync"
"time"
"github.com/pkg/errors"
"github.com/uber-go/tally"
"github.com/uber-go/tally/internal/cache"
customtransport "github.com/uber-go/tally/m3/customtransports"
m3thrift "github.com/uber-go/tally/m3/thrift/v2"
"github.com/uber-go/tally/m3/thriftudp"
"github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift"
"go.uber.org/atomic"
)
// Protocol describes a M3 thrift transport protocol.
type Protocol int
// Compact and Binary represent the compact and
// binary thrift protocols respectively.
const (
Compact Protocol = iota
Binary
)
const (
// ServiceTag is the name of the M3 service tag.
ServiceTag = "service"
// EnvTag is the name of the M3 env tag.
EnvTag = "env"
// HostTag is the name of the M3 host tag.
HostTag = "host"
// DefaultMaxQueueSize is the default M3 reporter queue size.
DefaultMaxQueueSize = 4096
// DefaultMaxPacketSize is the default M3 reporter max packet size.
DefaultMaxPacketSize = int32(1440)
// DefaultHistogramBucketIDName is the default histogram bucket ID tag name
DefaultHistogramBucketIDName = "bucketid"
// DefaultHistogramBucketName is the default histogram bucket name tag name
DefaultHistogramBucketName = "bucket"
// DefaultHistogramBucketTagPrecision is the default
// precision to use when formatting the metric tag
// with the histogram bucket bound values.
DefaultHistogramBucketTagPrecision = uint(6)
emitMetricBatchOverhead = 19
minMetricBucketIDTagLength = 4
)
var (
_maxInt64 = int64(math.MaxInt64)
_maxFloat64 = math.MaxFloat64
)
type metricType int
const (
counterType metricType = iota + 1
timerType
gaugeType
)
var (
errNoHostPorts = errors.New("at least one entry for HostPorts is required")
errCommonTagSize = errors.New("common tags serialized size exceeds packet size")
errAlreadyClosed = errors.New("reporter already closed")
)
// Reporter is an M3 reporter.
type Reporter interface {
tally.CachedStatsReporter
io.Closer
}
// reporter is a metrics backend that reports metrics to a local or
// remote M3 collector, metrics are batched together and emitted
// via either thrift compact or binary protocol in batch UDP packets.
type reporter struct {
bucketIDTagName string
bucketTagName string
bucketValFmt string
buckets []tally.BucketPair
calc *customtransport.TCalcTransport
calcLock sync.Mutex
calcProto thrift.TProtocol
client *m3thrift.M3Client
commonTags []m3thrift.MetricTag
done atomic.Bool
donech chan struct{}
freeBytes int32
metCh chan sizedMetric
now atomic.Int64
overheadBytes int32
pending atomic.Uint64
resourcePool *resourcePool
stringInterner *cache.StringInterner
tagCache *cache.TagCache
wg sync.WaitGroup
batchSizeHistogram tally.CachedHistogram
numBatches atomic.Int64
numBatchesCounter tally.CachedCount
numMetrics atomic.Int64
numMetricsCounter tally.CachedCount
numWriteErrors atomic.Int64
numWriteErrorsCounter tally.CachedCount
}
// Options is a set of options for the M3 reporter.
type Options struct {
HostPorts []string
Service string
Env string
CommonTags map[string]string
IncludeHost bool
Protocol Protocol
MaxQueueSize int
MaxPacketSizeBytes int32
HistogramBucketIDName string
HistogramBucketName string
HistogramBucketTagPrecision uint
}
// NewReporter creates a new M3 reporter.
func NewReporter(opts Options) (Reporter, error) {
if opts.MaxQueueSize <= 0 {
opts.MaxQueueSize = DefaultMaxQueueSize
}
if opts.MaxPacketSizeBytes <= 0 {
opts.MaxPacketSizeBytes = DefaultMaxPacketSize
}
if opts.HistogramBucketIDName == "" {
opts.HistogramBucketIDName = DefaultHistogramBucketIDName
}
if opts.HistogramBucketName == "" {
opts.HistogramBucketName = DefaultHistogramBucketName
}
if opts.HistogramBucketTagPrecision == 0 {
opts.HistogramBucketTagPrecision = DefaultHistogramBucketTagPrecision
}
// Create M3 thrift client
var trans thrift.TTransport
var err error
if len(opts.HostPorts) == 0 {
err = errNoHostPorts
} else if len(opts.HostPorts) == 1 {
trans, err = thriftudp.NewTUDPClientTransport(opts.HostPorts[0], "")
} else {
trans, err = thriftudp.NewTMultiUDPClientTransport(opts.HostPorts, "")
}
if err != nil {
return nil, err
}
var protocolFactory thrift.TProtocolFactory
if opts.Protocol == Compact {
protocolFactory = thrift.NewTCompactProtocolFactory()
} else {
protocolFactory = thrift.NewTBinaryProtocolFactoryDefault()
}
var (
client = m3thrift.NewM3ClientFactory(trans, protocolFactory)
resourcePool = newResourcePool(protocolFactory)
tagm = make(map[string]string)
tags = resourcePool.getMetricTagSlice()
)
// Create common tags
for k, v := range opts.CommonTags {
tagm[k] = v
}
if opts.CommonTags[ServiceTag] == "" {
if opts.Service == "" {
return nil, fmt.Errorf("%s common tag is required", ServiceTag)
}
tagm[ServiceTag] = opts.Service
}
if opts.CommonTags[EnvTag] == "" {
if opts.Env == "" {
return nil, fmt.Errorf("%s common tag is required", EnvTag)
}
tagm[EnvTag] = opts.Env
}
if opts.IncludeHost {
if opts.CommonTags[HostTag] == "" {
hostname, err := os.Hostname()
if err != nil {
return nil, errors.WithMessage(err, "error resolving host tag")
}
tagm[HostTag] = hostname
}
}
for k, v := range tagm {
tags = append(tags, m3thrift.MetricTag{
Name: k,
Value: v,
})
}
// Calculate size of common tags
var (
batch = m3thrift.MetricBatch{
Metrics: resourcePool.getMetricSlice(),
CommonTags: tags,
}
proto = resourcePool.getProto()
)
if err := batch.Write(proto); err != nil {
return nil, errors.WithMessage(
err,
"failed to write to proto for size calculation",
)
}
resourcePool.releaseMetricSlice(batch.Metrics)
var (
calc = proto.Transport().(*customtransport.TCalcTransport)
numOverheadBytes = emitMetricBatchOverhead + calc.GetCount()
freeBytes = opts.MaxPacketSizeBytes - numOverheadBytes
)
calc.ResetCount()
if freeBytes <= 0 {
return nil, errCommonTagSize
}
buckets := tally.ValueBuckets(append(
[]float64{0.0},
tally.MustMakeExponentialValueBuckets(2.0, 2.0, 11)...,
))
r := &reporter{
buckets: tally.BucketPairs(buckets),
bucketIDTagName: opts.HistogramBucketIDName,
bucketTagName: opts.HistogramBucketName,
bucketValFmt: "%." + strconv.Itoa(int(opts.HistogramBucketTagPrecision)) + "f",
calc: calc,
calcProto: proto,
client: client,
commonTags: tags,
donech: make(chan struct{}),
freeBytes: freeBytes,
metCh: make(chan sizedMetric, opts.MaxQueueSize),
overheadBytes: numOverheadBytes,
resourcePool: resourcePool,
stringInterner: cache.NewStringInterner(),
tagCache: cache.NewTagCache(),
}
var (
internalTags = map[string]string{
"version": tally.Version,
}
)
r.batchSizeHistogram = r.AllocateHistogram("tally.internal.batch-size", internalTags, buckets)
r.numBatchesCounter = r.AllocateCounter("tally.internal.num-batches", internalTags)
r.numMetricsCounter = r.AllocateCounter("tally.internal.num-metrics", internalTags)
r.numWriteErrorsCounter = r.AllocateCounter("tally.internal.num-write-errors", internalTags)
r.wg.Add(1)
go func() {
defer r.wg.Done()
r.process()
}()
r.wg.Add(1)
go func() {
defer r.wg.Done()
r.timeLoop()
}()
return r, nil
}
// AllocateCounter implements tally.CachedStatsReporter.
func (r *reporter) AllocateCounter(
name string,
tags map[string]string,
) tally.CachedCount {
return r.allocateCounter(name, tags)
}
func (r *reporter) allocateCounter(
name string,
tags map[string]string,
) cachedMetric {
var (
counter = r.newMetric(name, tags, counterType)
size = r.calculateSize(counter)
)
return cachedMetric{
metric: counter,
reporter: r,
size: size,
}
}
// AllocateGauge implements tally.CachedStatsReporter.
func (r *reporter) AllocateGauge(
name string,
tags map[string]string,
) tally.CachedGauge {
var (
gauge = r.newMetric(name, tags, gaugeType)
size = r.calculateSize(gauge)
)
return cachedMetric{
metric: gauge,
reporter: r,
size: size,
}
}
// AllocateTimer implements tally.CachedStatsReporter.
func (r *reporter) AllocateTimer(
name string,
tags map[string]string,
) tally.CachedTimer {
var (
timer = r.newMetric(name, tags, timerType)
size = r.calculateSize(timer)
)
return cachedMetric{
metric: timer,
reporter: r,
size: size,
}
}
// AllocateHistogram implements tally.CachedStatsReporter.
func (r *reporter) AllocateHistogram(
name string,
tags map[string]string,
buckets tally.Buckets,
) tally.CachedHistogram {
var (
_, isDuration = buckets.(tally.DurationBuckets)
bucketIDLen = int(math.Max(
float64(ndigits(buckets.Len())),
float64(minMetricBucketIDTagLength),
))
bucketIDFmt = "%0" + strconv.Itoa(bucketIDLen) + "d"
cachedValueBuckets []cachedHistogramBucket
cachedDurationBuckets []cachedHistogramBucket
)
var (
mtags = r.convertTags(tags)
prevDuration = time.Duration(math.MinInt64)
prevValue = -math.MaxFloat64
)
for i, pair := range tally.BucketPairs(buckets) {
var (
counter = r.allocateCounter(name, nil)
hbucket = cachedHistogramBucket{
bucketID: r.stringInterner.Intern(fmt.Sprintf(bucketIDFmt, i)),
valueUpperBound: pair.UpperBoundValue(),
durationUpperBound: pair.UpperBoundDuration(),
metric: &counter,
}
delta = len(r.bucketIDTagName) + len(r.bucketTagName) + len(hbucket.bucketID)
)
hbucket.metric.metric.Tags = mtags
hbucket.metric.size = r.calculateSize(hbucket.metric.metric)
if isDuration {
bname := r.stringInterner.Intern(
r.durationBucketString(prevDuration) + "-" +
r.durationBucketString(pair.UpperBoundDuration()),
)
hbucket.bucket = bname
hbucket.metric.size += int32(delta + len(bname))
cachedDurationBuckets = append(cachedDurationBuckets, hbucket)
} else {
bname := r.stringInterner.Intern(
r.valueBucketString(prevValue) + "-" +
r.valueBucketString(pair.UpperBoundValue()),
)
hbucket.bucket = bname
hbucket.metric.size += int32(delta + len(bname))
cachedValueBuckets = append(cachedValueBuckets, hbucket)
}
prevDuration = pair.UpperBoundDuration()
prevValue = pair.UpperBoundValue()
}
return cachedHistogram{
r: r,
name: name,
cachedValueBuckets: cachedValueBuckets,
cachedDurationBuckets: cachedDurationBuckets,
}
}
func (r *reporter) valueBucketString(v float64) string {
if v == math.MaxFloat64 {
return "infinity"
}
if v == -math.MaxFloat64 {
return "-infinity"
}
return fmt.Sprintf(r.bucketValFmt, v)
}
func (r *reporter) durationBucketString(d time.Duration) string {
if d == 0 {
return "0"
}
if d == time.Duration(math.MaxInt64) {
return "infinity"
}
if d == time.Duration(math.MinInt64) {
return "-infinity"
}
return d.String()
}
func (r *reporter) newMetric(
name string,
tags map[string]string,
t metricType,
) m3thrift.Metric {
m := m3thrift.Metric{
Name: r.stringInterner.Intern(name),
Timestamp: _maxInt64,
}
switch t {
case counterType:
m.Value.MetricType = m3thrift.MetricType_COUNTER
m.Value.Count = _maxInt64
case gaugeType:
m.Value.MetricType = m3thrift.MetricType_GAUGE
m.Value.Gauge = _maxFloat64
case timerType:
m.Value.MetricType = m3thrift.MetricType_TIMER
m.Value.Timer = _maxInt64
}
if len(tags) == 0 {
return m
}
m.Tags = r.convertTags(tags)
return m
}
func (r *reporter) calculateSize(m m3thrift.Metric) int32 {
r.calcLock.Lock()
m.Write(r.calcProto) //nolint:errcheck
size := r.calc.GetCount()
r.calc.ResetCount()
r.calcLock.Unlock()
return size
}
func (r *reporter) reportCopyMetric(
m m3thrift.Metric,
size int32,
bucket string,
bucketID string,
) {
r.pending.Inc()
defer r.pending.Dec()
if r.done.Load() {
return
}
m.Timestamp = r.now.Load()
sm := sizedMetric{
m: m,
size: size,
set: true,
bucket: bucket,
bucketID: bucketID,
}
select {
case r.metCh <- sm:
case <-r.donech:
}
}
// Flush sends an empty sizedMetric to signal a flush.
func (r *reporter) Flush() {
r.pending.Inc()
defer r.pending.Dec()
if r.done.Load() {
return
}
r.reportInternalMetrics()
r.metCh <- sizedMetric{}
}
// Close waits for metrics to be flushed before closing the backend.
func (r *reporter) Close() (err error) {
if !r.done.CAS(false, true) {
return errAlreadyClosed
}
// Wait for any pending reports to complete.
for r.pending.Load() > 0 {
runtime.Gosched()
}
close(r.donech)
close(r.metCh)
r.wg.Wait()
return nil
}
func (r *reporter) Capabilities() tally.Capabilities {
return r
}
func (r *reporter) Reporting() bool {
return true
}
func (r *reporter) Tagging() bool {
return true
}
func (r *reporter) process() {
var (
extraTags = sync.Pool{
New: func() interface{} {
return make([]m3thrift.MetricTag, 0, 8)
},
}
borrowedTags = make([][]m3thrift.MetricTag, 0, 128)
mets = make([]m3thrift.Metric, 0, r.freeBytes/10)
bytes int32
)
for smet := range r.metCh {
flush := !smet.set && len(mets) > 0
if flush || bytes+smet.size > r.freeBytes {
r.numMetrics.Add(int64(len(mets)))
mets = r.flush(mets)
bytes = 0
if len(borrowedTags) > 0 {
for i := range borrowedTags {
extraTags.Put(borrowedTags[i][:0])
}
borrowedTags = borrowedTags[:0]
}
}
if !smet.set {
continue
}
m := smet.m
if len(smet.bucket) > 0 {
tags := extraTags.Get().([]m3thrift.MetricTag)
tags = append(tags, m.Tags...)
tags = append(
tags,
m3thrift.MetricTag{
Name: r.bucketIDTagName,
Value: smet.bucketID,
},
m3thrift.MetricTag{
Name: r.bucketTagName,
Value: smet.bucket,
},
)
borrowedTags = append(borrowedTags, tags)
m.Tags = tags
}
mets = append(mets, m)
bytes += smet.size
}
// Final flush
r.flush(mets)
}
func (r *reporter) flush(mets []m3thrift.Metric) []m3thrift.Metric {
if len(mets) == 0 {
return mets
}
r.numBatches.Inc()
err := r.client.EmitMetricBatchV2(m3thrift.MetricBatch{
Metrics: mets,
CommonTags: r.commonTags,
})
if err != nil {
r.numWriteErrors.Inc()
}
// n.b. In the event that we had allocated additional tag storage in
// process(), clear it so that it can be reclaimed. This does not
// affect allocated metrics' tags.
for i := range mets {
mets[i].Tags = nil
}
return mets[:0]
}
func (r *reporter) convertTags(tags map[string]string) []m3thrift.MetricTag {
key := cache.TagMapKey(tags)
mtags, ok := r.tagCache.Get(key)
if !ok {
mtags = r.resourcePool.getMetricTagSlice()
for k, v := range tags {
mtags = append(mtags, m3thrift.MetricTag{
Name: r.stringInterner.Intern(k),
Value: r.stringInterner.Intern(v),
})
}
mtags = r.tagCache.Set(key, mtags)
}
return mtags
}
func (r *reporter) reportInternalMetrics() {
var (
batches = r.numBatches.Swap(0)
metrics = r.numMetrics.Swap(0)
writeErrors = r.numWriteErrors.Swap(0)
batchSize = float64(metrics) / float64(batches)
)
bucket := sort.Search(len(r.buckets), func(i int) bool {
return r.buckets[i].UpperBoundValue() >= batchSize
})
var value float64
if bucket < len(r.buckets) {
value = r.buckets[bucket].UpperBoundValue()
} else {
value = math.MaxFloat64
}
r.batchSizeHistogram.ValueBucket(0, value).ReportSamples(1)
r.numBatchesCounter.ReportCount(batches)
r.numMetricsCounter.ReportCount(metrics)
r.numWriteErrorsCounter.ReportCount(writeErrors)
}
func (r *reporter) timeLoop() {
for !r.done.Load() {
r.now.Store(time.Now().UnixNano())
time.Sleep(time.Millisecond)
}
}
type cachedMetric struct {
metric m3thrift.Metric
reporter *reporter
size int32
}
func (c cachedMetric) ReportCount(value int64) {
c.metric.Value.Count = value
c.reporter.reportCopyMetric(c.metric, c.size, "", "")
}
func (c cachedMetric) ReportGauge(value float64) {
c.metric.Value.Gauge = value
c.reporter.reportCopyMetric(c.metric, c.size, "", "")
}
func (c cachedMetric) ReportTimer(interval time.Duration) {
c.metric.Value.Timer = int64(interval)
c.reporter.reportCopyMetric(c.metric, c.size, "", "")
}
type noopMetric struct{}
func (c noopMetric) ReportCount(value int64) {}
func (c noopMetric) ReportGauge(value float64) {}
func (c noopMetric) ReportTimer(interval time.Duration) {}
func (c noopMetric) ReportSamples(value int64) {}
type cachedHistogram struct {
r *reporter
name string
cachedValueBuckets []cachedHistogramBucket
cachedDurationBuckets []cachedHistogramBucket
}
func (h cachedHistogram) ValueBucket(
_ float64,
bucketUpperBound float64,
) tally.CachedHistogramBucket {
var (
n = len(h.cachedValueBuckets)
idx = sort.Search(n, func(i int) bool {
return h.cachedValueBuckets[i].valueUpperBound >= bucketUpperBound
})
)
if idx == n {
return noopMetric{}
}
var (
b = h.cachedValueBuckets[idx]
cm = b.metric
m = cm.metric
size = cm.size
bucket = b.bucket
bucketID = b.bucketID
rep = cm.reporter
)
return reportSamplesFunc(func(value int64) {
m.Value.Count = value
rep.reportCopyMetric(m, size, bucket, bucketID)
})
}
func (h cachedHistogram) DurationBucket(
_ time.Duration,
bucketUpperBound time.Duration,
) tally.CachedHistogramBucket {
var (
n = len(h.cachedDurationBuckets)
idx = sort.Search(n, func(i int) bool {
return h.cachedDurationBuckets[i].durationUpperBound >= bucketUpperBound
})
)
if idx == n {
return noopMetric{}
}
var (
b = h.cachedDurationBuckets[idx]
cm = b.metric
m = cm.metric
size = cm.size
bucket = b.bucket
bucketID = b.bucketID
rep = cm.reporter
)
return reportSamplesFunc(func(value int64) {
m.Value.Count = value
rep.reportCopyMetric(m, size, bucket, bucketID)
})
}
type cachedHistogramBucket struct {
metric *cachedMetric
durationUpperBound time.Duration
valueUpperBound float64
bucket string
bucketID string
}
type reportSamplesFunc func(value int64)
func (f reportSamplesFunc) ReportSamples(value int64) {
f(value)
}
type sizedMetric struct {
m m3thrift.Metric
size int32
set bool
bucket string
bucketID string
}
func ndigits(i int) int {
n := 1
for i/10 != 0 {
n++
i /= 10
}
return n
}
Decrease M3 reporter time resolution from 1ms to 100ms (#165)
// Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package m3
import (
"fmt"
"io"
"math"
"os"
"runtime"
"sort"
"strconv"
"sync"
"time"
"github.com/pkg/errors"
"github.com/uber-go/tally"
"github.com/uber-go/tally/internal/cache"
customtransport "github.com/uber-go/tally/m3/customtransports"
m3thrift "github.com/uber-go/tally/m3/thrift/v2"
"github.com/uber-go/tally/m3/thriftudp"
"github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift"
"go.uber.org/atomic"
)
// Protocol describes a M3 thrift transport protocol.
type Protocol int
// Compact and Binary represent the compact and
// binary thrift protocols respectively.
const (
Compact Protocol = iota
Binary
)
const (
// ServiceTag is the name of the M3 service tag.
ServiceTag = "service"
// EnvTag is the name of the M3 env tag.
EnvTag = "env"
// HostTag is the name of the M3 host tag.
HostTag = "host"
// DefaultMaxQueueSize is the default M3 reporter queue size.
DefaultMaxQueueSize = 4096
// DefaultMaxPacketSize is the default M3 reporter max packet size.
DefaultMaxPacketSize = int32(1440)
// DefaultHistogramBucketIDName is the default histogram bucket ID tag name
DefaultHistogramBucketIDName = "bucketid"
// DefaultHistogramBucketName is the default histogram bucket name tag name
DefaultHistogramBucketName = "bucket"
// DefaultHistogramBucketTagPrecision is the default
// precision to use when formatting the metric tag
// with the histogram bucket bound values.
DefaultHistogramBucketTagPrecision = uint(6)
_emitMetricBatchOverhead = 19
_minMetricBucketIDTagLength = 4
_timeResolution = 100 * time.Millisecond
)
var (
_maxInt64 = int64(math.MaxInt64)
_maxFloat64 = math.MaxFloat64
)
type metricType int
const (
counterType metricType = iota + 1
timerType
gaugeType
)
var (
errNoHostPorts = errors.New("at least one entry for HostPorts is required")
errCommonTagSize = errors.New("common tags serialized size exceeds packet size")
errAlreadyClosed = errors.New("reporter already closed")
)
// Reporter is an M3 reporter.
type Reporter interface {
tally.CachedStatsReporter
io.Closer
}
// reporter is a metrics backend that reports metrics to a local or
// remote M3 collector, metrics are batched together and emitted
// via either thrift compact or binary protocol in batch UDP packets.
type reporter struct {
bucketIDTagName string
bucketTagName string
bucketValFmt string
buckets []tally.BucketPair
calc *customtransport.TCalcTransport
calcLock sync.Mutex
calcProto thrift.TProtocol
client *m3thrift.M3Client
commonTags []m3thrift.MetricTag
done atomic.Bool
donech chan struct{}
freeBytes int32
metCh chan sizedMetric
now atomic.Int64
overheadBytes int32
pending atomic.Uint64
resourcePool *resourcePool
stringInterner *cache.StringInterner
tagCache *cache.TagCache
wg sync.WaitGroup
batchSizeHistogram tally.CachedHistogram
numBatches atomic.Int64
numBatchesCounter tally.CachedCount
numMetrics atomic.Int64
numMetricsCounter tally.CachedCount
numWriteErrors atomic.Int64
numWriteErrorsCounter tally.CachedCount
}
// Options is a set of options for the M3 reporter.
type Options struct {
HostPorts []string
Service string
Env string
CommonTags map[string]string
IncludeHost bool
Protocol Protocol
MaxQueueSize int
MaxPacketSizeBytes int32
HistogramBucketIDName string
HistogramBucketName string
HistogramBucketTagPrecision uint
}
// NewReporter creates a new M3 reporter.
func NewReporter(opts Options) (Reporter, error) {
if opts.MaxQueueSize <= 0 {
opts.MaxQueueSize = DefaultMaxQueueSize
}
if opts.MaxPacketSizeBytes <= 0 {
opts.MaxPacketSizeBytes = DefaultMaxPacketSize
}
if opts.HistogramBucketIDName == "" {
opts.HistogramBucketIDName = DefaultHistogramBucketIDName
}
if opts.HistogramBucketName == "" {
opts.HistogramBucketName = DefaultHistogramBucketName
}
if opts.HistogramBucketTagPrecision == 0 {
opts.HistogramBucketTagPrecision = DefaultHistogramBucketTagPrecision
}
// Create M3 thrift client
var trans thrift.TTransport
var err error
if len(opts.HostPorts) == 0 {
err = errNoHostPorts
} else if len(opts.HostPorts) == 1 {
trans, err = thriftudp.NewTUDPClientTransport(opts.HostPorts[0], "")
} else {
trans, err = thriftudp.NewTMultiUDPClientTransport(opts.HostPorts, "")
}
if err != nil {
return nil, err
}
var protocolFactory thrift.TProtocolFactory
if opts.Protocol == Compact {
protocolFactory = thrift.NewTCompactProtocolFactory()
} else {
protocolFactory = thrift.NewTBinaryProtocolFactoryDefault()
}
var (
client = m3thrift.NewM3ClientFactory(trans, protocolFactory)
resourcePool = newResourcePool(protocolFactory)
tagm = make(map[string]string)
tags = resourcePool.getMetricTagSlice()
)
// Create common tags
for k, v := range opts.CommonTags {
tagm[k] = v
}
if opts.CommonTags[ServiceTag] == "" {
if opts.Service == "" {
return nil, fmt.Errorf("%s common tag is required", ServiceTag)
}
tagm[ServiceTag] = opts.Service
}
if opts.CommonTags[EnvTag] == "" {
if opts.Env == "" {
return nil, fmt.Errorf("%s common tag is required", EnvTag)
}
tagm[EnvTag] = opts.Env
}
if opts.IncludeHost {
if opts.CommonTags[HostTag] == "" {
hostname, err := os.Hostname()
if err != nil {
return nil, errors.WithMessage(err, "error resolving host tag")
}
tagm[HostTag] = hostname
}
}
for k, v := range tagm {
tags = append(tags, m3thrift.MetricTag{
Name: k,
Value: v,
})
}
// Calculate size of common tags
var (
batch = m3thrift.MetricBatch{
Metrics: resourcePool.getMetricSlice(),
CommonTags: tags,
}
proto = resourcePool.getProto()
)
if err := batch.Write(proto); err != nil {
return nil, errors.WithMessage(
err,
"failed to write to proto for size calculation",
)
}
resourcePool.releaseMetricSlice(batch.Metrics)
var (
calc = proto.Transport().(*customtransport.TCalcTransport)
numOverheadBytes = _emitMetricBatchOverhead + calc.GetCount()
freeBytes = opts.MaxPacketSizeBytes - numOverheadBytes
)
calc.ResetCount()
if freeBytes <= 0 {
return nil, errCommonTagSize
}
buckets := tally.ValueBuckets(append(
[]float64{0.0},
tally.MustMakeExponentialValueBuckets(2.0, 2.0, 11)...,
))
r := &reporter{
buckets: tally.BucketPairs(buckets),
bucketIDTagName: opts.HistogramBucketIDName,
bucketTagName: opts.HistogramBucketName,
bucketValFmt: "%." + strconv.Itoa(int(opts.HistogramBucketTagPrecision)) + "f",
calc: calc,
calcProto: proto,
client: client,
commonTags: tags,
donech: make(chan struct{}),
freeBytes: freeBytes,
metCh: make(chan sizedMetric, opts.MaxQueueSize),
overheadBytes: numOverheadBytes,
resourcePool: resourcePool,
stringInterner: cache.NewStringInterner(),
tagCache: cache.NewTagCache(),
}
internalTags := map[string]string{
"version": tally.Version,
}
r.batchSizeHistogram = r.AllocateHistogram("tally.internal.batch-size", internalTags, buckets)
r.numBatchesCounter = r.AllocateCounter("tally.internal.num-batches", internalTags)
r.numMetricsCounter = r.AllocateCounter("tally.internal.num-metrics", internalTags)
r.numWriteErrorsCounter = r.AllocateCounter("tally.internal.num-write-errors", internalTags)
r.wg.Add(1)
go func() {
defer r.wg.Done()
r.process()
}()
r.wg.Add(1)
go func() {
defer r.wg.Done()
r.timeLoop()
}()
return r, nil
}
// AllocateCounter implements tally.CachedStatsReporter.
func (r *reporter) AllocateCounter(
name string,
tags map[string]string,
) tally.CachedCount {
return r.allocateCounter(name, tags)
}
func (r *reporter) allocateCounter(
name string,
tags map[string]string,
) cachedMetric {
var (
counter = r.newMetric(name, tags, counterType)
size = r.calculateSize(counter)
)
return cachedMetric{
metric: counter,
reporter: r,
size: size,
}
}
// AllocateGauge implements tally.CachedStatsReporter.
func (r *reporter) AllocateGauge(
name string,
tags map[string]string,
) tally.CachedGauge {
var (
gauge = r.newMetric(name, tags, gaugeType)
size = r.calculateSize(gauge)
)
return cachedMetric{
metric: gauge,
reporter: r,
size: size,
}
}
// AllocateTimer implements tally.CachedStatsReporter.
func (r *reporter) AllocateTimer(
name string,
tags map[string]string,
) tally.CachedTimer {
var (
timer = r.newMetric(name, tags, timerType)
size = r.calculateSize(timer)
)
return cachedMetric{
metric: timer,
reporter: r,
size: size,
}
}
// AllocateHistogram implements tally.CachedStatsReporter.
func (r *reporter) AllocateHistogram(
name string,
tags map[string]string,
buckets tally.Buckets,
) tally.CachedHistogram {
var (
_, isDuration = buckets.(tally.DurationBuckets)
bucketIDLen = int(math.Max(
float64(ndigits(buckets.Len())),
float64(_minMetricBucketIDTagLength),
))
bucketIDFmt = "%0" + strconv.Itoa(bucketIDLen) + "d"
cachedValueBuckets []cachedHistogramBucket
cachedDurationBuckets []cachedHistogramBucket
)
var (
mtags = r.convertTags(tags)
prevDuration = time.Duration(math.MinInt64)
prevValue = -math.MaxFloat64
)
for i, pair := range tally.BucketPairs(buckets) {
var (
counter = r.allocateCounter(name, nil)
hbucket = cachedHistogramBucket{
bucketID: r.stringInterner.Intern(fmt.Sprintf(bucketIDFmt, i)),
valueUpperBound: pair.UpperBoundValue(),
durationUpperBound: pair.UpperBoundDuration(),
metric: &counter,
}
delta = len(r.bucketIDTagName) + len(r.bucketTagName) + len(hbucket.bucketID)
)
hbucket.metric.metric.Tags = mtags
hbucket.metric.size = r.calculateSize(hbucket.metric.metric)
if isDuration {
bname := r.stringInterner.Intern(
r.durationBucketString(prevDuration) + "-" +
r.durationBucketString(pair.UpperBoundDuration()),
)
hbucket.bucket = bname
hbucket.metric.size += int32(delta + len(bname))
cachedDurationBuckets = append(cachedDurationBuckets, hbucket)
} else {
bname := r.stringInterner.Intern(
r.valueBucketString(prevValue) + "-" +
r.valueBucketString(pair.UpperBoundValue()),
)
hbucket.bucket = bname
hbucket.metric.size += int32(delta + len(bname))
cachedValueBuckets = append(cachedValueBuckets, hbucket)
}
prevDuration = pair.UpperBoundDuration()
prevValue = pair.UpperBoundValue()
}
return cachedHistogram{
r: r,
name: name,
cachedValueBuckets: cachedValueBuckets,
cachedDurationBuckets: cachedDurationBuckets,
}
}
func (r *reporter) valueBucketString(v float64) string {
if v == math.MaxFloat64 {
return "infinity"
}
if v == -math.MaxFloat64 {
return "-infinity"
}
return fmt.Sprintf(r.bucketValFmt, v)
}
func (r *reporter) durationBucketString(d time.Duration) string {
if d == 0 {
return "0"
}
if d == time.Duration(math.MaxInt64) {
return "infinity"
}
if d == time.Duration(math.MinInt64) {
return "-infinity"
}
return d.String()
}
func (r *reporter) newMetric(
name string,
tags map[string]string,
t metricType,
) m3thrift.Metric {
m := m3thrift.Metric{
Name: r.stringInterner.Intern(name),
Timestamp: _maxInt64,
}
switch t {
case counterType:
m.Value.MetricType = m3thrift.MetricType_COUNTER
m.Value.Count = _maxInt64
case gaugeType:
m.Value.MetricType = m3thrift.MetricType_GAUGE
m.Value.Gauge = _maxFloat64
case timerType:
m.Value.MetricType = m3thrift.MetricType_TIMER
m.Value.Timer = _maxInt64
}
if len(tags) == 0 {
return m
}
m.Tags = r.convertTags(tags)
return m
}
func (r *reporter) calculateSize(m m3thrift.Metric) int32 {
r.calcLock.Lock()
m.Write(r.calcProto) //nolint:errcheck
size := r.calc.GetCount()
r.calc.ResetCount()
r.calcLock.Unlock()
return size
}
func (r *reporter) reportCopyMetric(
m m3thrift.Metric,
size int32,
bucket string,
bucketID string,
) {
r.pending.Inc()
defer r.pending.Dec()
if r.done.Load() {
return
}
m.Timestamp = r.now.Load()
sm := sizedMetric{
m: m,
size: size,
set: true,
bucket: bucket,
bucketID: bucketID,
}
select {
case r.metCh <- sm:
case <-r.donech:
}
}
// Flush sends an empty sizedMetric to signal a flush.
func (r *reporter) Flush() {
r.pending.Inc()
defer r.pending.Dec()
if r.done.Load() {
return
}
r.reportInternalMetrics()
r.metCh <- sizedMetric{}
}
// Close waits for metrics to be flushed before closing the backend.
func (r *reporter) Close() (err error) {
if !r.done.CAS(false, true) {
return errAlreadyClosed
}
// Wait for any pending reports to complete.
for r.pending.Load() > 0 {
runtime.Gosched()
}
close(r.donech)
close(r.metCh)
r.wg.Wait()
return nil
}
func (r *reporter) Capabilities() tally.Capabilities {
return r
}
func (r *reporter) Reporting() bool {
return true
}
func (r *reporter) Tagging() bool {
return true
}
func (r *reporter) process() {
var (
extraTags = sync.Pool{
New: func() interface{} {
return make([]m3thrift.MetricTag, 0, 8)
},
}
borrowedTags = make([][]m3thrift.MetricTag, 0, 128)
mets = make([]m3thrift.Metric, 0, r.freeBytes/10)
bytes int32
)
for smet := range r.metCh {
flush := !smet.set && len(mets) > 0
if flush || bytes+smet.size > r.freeBytes {
r.numMetrics.Add(int64(len(mets)))
mets = r.flush(mets)
bytes = 0
if len(borrowedTags) > 0 {
for i := range borrowedTags {
extraTags.Put(borrowedTags[i][:0])
}
borrowedTags = borrowedTags[:0]
}
}
if !smet.set {
continue
}
m := smet.m
if len(smet.bucket) > 0 {
tags := extraTags.Get().([]m3thrift.MetricTag)
tags = append(tags, m.Tags...)
tags = append(
tags,
m3thrift.MetricTag{
Name: r.bucketIDTagName,
Value: smet.bucketID,
},
m3thrift.MetricTag{
Name: r.bucketTagName,
Value: smet.bucket,
},
)
borrowedTags = append(borrowedTags, tags)
m.Tags = tags
}
mets = append(mets, m)
bytes += smet.size
}
// Final flush
r.flush(mets)
}
func (r *reporter) flush(mets []m3thrift.Metric) []m3thrift.Metric {
if len(mets) == 0 {
return mets
}
r.numBatches.Inc()
err := r.client.EmitMetricBatchV2(m3thrift.MetricBatch{
Metrics: mets,
CommonTags: r.commonTags,
})
if err != nil {
r.numWriteErrors.Inc()
}
// n.b. In the event that we had allocated additional tag storage in
// process(), clear it so that it can be reclaimed. This does not
// affect allocated metrics' tags.
for i := range mets {
mets[i].Tags = nil
}
return mets[:0]
}
func (r *reporter) convertTags(tags map[string]string) []m3thrift.MetricTag {
key := cache.TagMapKey(tags)
mtags, ok := r.tagCache.Get(key)
if !ok {
mtags = r.resourcePool.getMetricTagSlice()
for k, v := range tags {
mtags = append(mtags, m3thrift.MetricTag{
Name: r.stringInterner.Intern(k),
Value: r.stringInterner.Intern(v),
})
}
mtags = r.tagCache.Set(key, mtags)
}
return mtags
}
func (r *reporter) reportInternalMetrics() {
var (
batches = r.numBatches.Swap(0)
metrics = r.numMetrics.Swap(0)
writeErrors = r.numWriteErrors.Swap(0)
batchSize = float64(metrics) / float64(batches)
)
bucket := sort.Search(len(r.buckets), func(i int) bool {
return r.buckets[i].UpperBoundValue() >= batchSize
})
var value float64
if bucket < len(r.buckets) {
value = r.buckets[bucket].UpperBoundValue()
} else {
value = math.MaxFloat64
}
r.batchSizeHistogram.ValueBucket(0, value).ReportSamples(1)
r.numBatchesCounter.ReportCount(batches)
r.numMetricsCounter.ReportCount(metrics)
r.numWriteErrorsCounter.ReportCount(writeErrors)
}
func (r *reporter) timeLoop() {
for !r.done.Load() {
r.now.Store(time.Now().UnixNano())
time.Sleep(_timeResolution)
}
}
type cachedMetric struct {
metric m3thrift.Metric
reporter *reporter
size int32
}
func (c cachedMetric) ReportCount(value int64) {
c.metric.Value.Count = value
c.reporter.reportCopyMetric(c.metric, c.size, "", "")
}
func (c cachedMetric) ReportGauge(value float64) {
c.metric.Value.Gauge = value
c.reporter.reportCopyMetric(c.metric, c.size, "", "")
}
func (c cachedMetric) ReportTimer(interval time.Duration) {
c.metric.Value.Timer = int64(interval)
c.reporter.reportCopyMetric(c.metric, c.size, "", "")
}
type noopMetric struct{}
func (c noopMetric) ReportCount(value int64) {}
func (c noopMetric) ReportGauge(value float64) {}
func (c noopMetric) ReportTimer(interval time.Duration) {}
func (c noopMetric) ReportSamples(value int64) {}
type cachedHistogram struct {
r *reporter
name string
cachedValueBuckets []cachedHistogramBucket
cachedDurationBuckets []cachedHistogramBucket
}
func (h cachedHistogram) ValueBucket(
_ float64,
bucketUpperBound float64,
) tally.CachedHistogramBucket {
var (
n = len(h.cachedValueBuckets)
idx = sort.Search(n, func(i int) bool {
return h.cachedValueBuckets[i].valueUpperBound >= bucketUpperBound
})
)
if idx == n {
return noopMetric{}
}
var (
b = h.cachedValueBuckets[idx]
cm = b.metric
m = cm.metric
size = cm.size
bucket = b.bucket
bucketID = b.bucketID
rep = cm.reporter
)
return reportSamplesFunc(func(value int64) {
m.Value.Count = value
rep.reportCopyMetric(m, size, bucket, bucketID)
})
}
func (h cachedHistogram) DurationBucket(
_ time.Duration,
bucketUpperBound time.Duration,
) tally.CachedHistogramBucket {
var (
n = len(h.cachedDurationBuckets)
idx = sort.Search(n, func(i int) bool {
return h.cachedDurationBuckets[i].durationUpperBound >= bucketUpperBound
})
)
if idx == n {
return noopMetric{}
}
var (
b = h.cachedDurationBuckets[idx]
cm = b.metric
m = cm.metric
size = cm.size
bucket = b.bucket
bucketID = b.bucketID
rep = cm.reporter
)
return reportSamplesFunc(func(value int64) {
m.Value.Count = value
rep.reportCopyMetric(m, size, bucket, bucketID)
})
}
type cachedHistogramBucket struct {
metric *cachedMetric
durationUpperBound time.Duration
valueUpperBound float64
bucket string
bucketID string
}
type reportSamplesFunc func(value int64)
func (f reportSamplesFunc) ReportSamples(value int64) {
f(value)
}
type sizedMetric struct {
m m3thrift.Metric
size int32
set bool
bucket string
bucketID string
}
func ndigits(i int) int {
n := 1
for i/10 != 0 {
n++
i /= 10
}
return n
}
|
// Copyright 2010 The draw2d Authors. All rights reserved.
// created: 21/11/2010 by Laurent Le Goff
// Package gopher draws a gopher avatar based on a svg of:
// https://github.com/golang-samples/gopher-vector/
package gopher
import (
"image/color"
"github.com/llgcode/draw2d"
"github.com/llgcode/draw2d/samples"
)
// Main draws a left hand and ear of a gopher. Afterwards it returns
// the filename. This should only be during testing.
func Main(gc draw2d.GraphicContext, ext string) (string, error) {
gc.Save()
gc.Scale(0.5, 0.5)
// Draw a (partial) gopher
Draw(gc)
gc.Restore()
// Return the output filename
return samples.Output("gopher", ext), nil
}
// Draw a left hand and ear of a gopher using a gc thanks to
// https://github.com/golang-samples/gopher-vector/
func Draw(gc draw2d.GraphicContext) {
// Initialize Stroke Attribute
gc.SetLineWidth(3)
gc.SetLineCap(draw2d.RoundCap)
gc.SetStrokeColor(color.Black)
// Left hand
// <path fill-rule="evenodd" clip-rule="evenodd" fill="#F6D2A2" stroke="#000000" stroke-width="3" stroke-linecap="round" d="
// M10.634,300.493c0.764,15.751,16.499,8.463,23.626,3.539c6.765-4.675,8.743-0.789,9.337-10.015
// c0.389-6.064,1.088-12.128,0.744-18.216c-10.23-0.927-21.357,1.509-29.744,7.602C10.277,286.542,2.177,296.561,10.634,300.493"/>
gc.SetFillColor(color.RGBA{0xF6, 0xD2, 0xA2, 0xff})
gc.MoveTo(10.634, 300.493)
gc.RCubicCurveTo(0.764, 15.751, 16.499, 8.463, 23.626, 3.539)
gc.RCubicCurveTo(6.765, -4.675, 8.743, -0.789, 9.337, -10.015)
gc.RCubicCurveTo(0.389, -6.064, 1.088, -12.128, 0.744, -18.216)
gc.RCubicCurveTo(-10.23, -0.927, -21.357, 1.509, -29.744, 7.602)
gc.CubicCurveTo(10.277, 286.542, 2.177, 296.561, 10.634, 300.493)
gc.FillStroke()
// <path fill-rule="evenodd" clip-rule="evenodd" fill="#C6B198" stroke="#000000" stroke-width="3" stroke-linecap="round" d="
// M10.634,300.493c2.29-0.852,4.717-1.457,6.271-3.528"/>
gc.MoveTo(10.634, 300.493)
gc.RCubicCurveTo(2.29, -0.852, 4.717, -1.457, 6.271, -3.528)
gc.Stroke()
// Left Ear
// <path fill-rule="evenodd" clip-rule="evenodd" fill="#6AD7E5" stroke="#000000" stroke-width="3" stroke-linecap="round" d="
// M46.997,112.853C-13.3,95.897,31.536,19.189,79.956,50.74L46.997,112.853z"/>
gc.MoveTo(46.997, 112.853)
gc.CubicCurveTo(-13.3, 95.897, 31.536, 19.189, 79.956, 50.74)
gc.LineTo(46.997, 112.853)
gc.Close()
gc.Stroke()
}
fix typo
// Copyright 2010 The draw2d Authors. All rights reserved.
// created: 21/11/2010 by Laurent Le Goff
// Package gopher draws a gopher avatar based on a svg of:
// https://github.com/golang-samples/gopher-vector/
package gopher
import (
"image/color"
"github.com/llgcode/draw2d"
"github.com/llgcode/draw2d/samples"
)
// Main draws a left hand and ear of a gopher. Afterwards it returns
// the filename. This should only be used during testing.
func Main(gc draw2d.GraphicContext, ext string) (string, error) {
gc.Save()
gc.Scale(0.5, 0.5)
// Draw a (partial) gopher
Draw(gc)
gc.Restore()
// Return the output filename
return samples.Output("gopher", ext), nil
}
// Draw a left hand and ear of a gopher using a gc thanks to
// https://github.com/golang-samples/gopher-vector/
func Draw(gc draw2d.GraphicContext) {
// Initialize Stroke Attribute
gc.SetLineWidth(3)
gc.SetLineCap(draw2d.RoundCap)
gc.SetStrokeColor(color.Black)
// Left hand
// <path fill-rule="evenodd" clip-rule="evenodd" fill="#F6D2A2" stroke="#000000" stroke-width="3" stroke-linecap="round" d="
// M10.634,300.493c0.764,15.751,16.499,8.463,23.626,3.539c6.765-4.675,8.743-0.789,9.337-10.015
// c0.389-6.064,1.088-12.128,0.744-18.216c-10.23-0.927-21.357,1.509-29.744,7.602C10.277,286.542,2.177,296.561,10.634,300.493"/>
gc.SetFillColor(color.RGBA{0xF6, 0xD2, 0xA2, 0xff})
gc.MoveTo(10.634, 300.493)
gc.RCubicCurveTo(0.764, 15.751, 16.499, 8.463, 23.626, 3.539)
gc.RCubicCurveTo(6.765, -4.675, 8.743, -0.789, 9.337, -10.015)
gc.RCubicCurveTo(0.389, -6.064, 1.088, -12.128, 0.744, -18.216)
gc.RCubicCurveTo(-10.23, -0.927, -21.357, 1.509, -29.744, 7.602)
gc.CubicCurveTo(10.277, 286.542, 2.177, 296.561, 10.634, 300.493)
gc.FillStroke()
// <path fill-rule="evenodd" clip-rule="evenodd" fill="#C6B198" stroke="#000000" stroke-width="3" stroke-linecap="round" d="
// M10.634,300.493c2.29-0.852,4.717-1.457,6.271-3.528"/>
gc.MoveTo(10.634, 300.493)
gc.RCubicCurveTo(2.29, -0.852, 4.717, -1.457, 6.271, -3.528)
gc.Stroke()
// Left Ear
// <path fill-rule="evenodd" clip-rule="evenodd" fill="#6AD7E5" stroke="#000000" stroke-width="3" stroke-linecap="round" d="
// M46.997,112.853C-13.3,95.897,31.536,19.189,79.956,50.74L46.997,112.853z"/>
gc.MoveTo(46.997, 112.853)
gc.CubicCurveTo(-13.3, 95.897, 31.536, 19.189, 79.956, 50.74)
gc.LineTo(46.997, 112.853)
gc.Close()
gc.Stroke()
}
|
// Copyright 2018 The OpenPitrix Authors. All rights reserved.
// Use of this source code is governed by a Apache license
// that can be found in the LICENSE file.
package iam
import (
"context"
"fmt"
"openpitrix.io/openpitrix/pkg/constants"
"openpitrix.io/openpitrix/pkg/logger"
"openpitrix.io/openpitrix/pkg/manager"
"openpitrix.io/openpitrix/pkg/pb"
)
type Client struct {
pb.AccountManagerClient
}
func NewClient() (*Client, error) {
conn, err := manager.NewClient(constants.IAMServiceHost, constants.IAMServicePort)
if err != nil {
return nil, err
}
return &Client{
AccountManagerClient: pb.NewAccountManagerClient(conn),
}, nil
}
func (c *Client) GetUsers(ctx context.Context, userIds []string) ([]*pb.User, error) {
response, err := c.DescribeUsers(ctx, &pb.DescribeUsersRequest{
UserId: userIds,
})
if err != nil {
logger.Error(ctx, "Describe users %s failed: %+v", userIds, err)
return nil, err
}
if len(response.UserSet) != len(userIds) {
logger.Error(ctx, "Describe users %s with return count [%d]", userIds, len(response.UserSet))
return nil, fmt.Errorf("describe users %s with return count [%d]", userIds, len(response.UserSet))
}
return response.UserSet, nil
}
Internal users no need to call describeUsers api
// Copyright 2018 The OpenPitrix Authors. All rights reserved.
// Use of this source code is governed by a Apache license
// that can be found in the LICENSE file.
package iam
import (
"context"
"fmt"
"openpitrix.io/openpitrix/pkg/constants"
"openpitrix.io/openpitrix/pkg/logger"
"openpitrix.io/openpitrix/pkg/manager"
"openpitrix.io/openpitrix/pkg/pb"
"openpitrix.io/openpitrix/pkg/util/pbutil"
"openpitrix.io/openpitrix/pkg/util/stringutil"
)
type Client struct {
pb.AccountManagerClient
}
func NewClient() (*Client, error) {
conn, err := manager.NewClient(constants.IAMServiceHost, constants.IAMServicePort)
if err != nil {
return nil, err
}
return &Client{
AccountManagerClient: pb.NewAccountManagerClient(conn),
}, nil
}
func (c *Client) GetUsers(ctx context.Context, userIds []string) ([]*pb.User, error) {
var internalUsers []*pb.User
var noInternalUserIds []string
for _, userId := range userIds {
if stringutil.StringIn(userId, constants.InternalUsers) {
internalUsers = append(internalUsers, &pb.User{
UserId: pbutil.ToProtoString(userId),
Role: pbutil.ToProtoString(constants.RoleGlobalAdmin),
})
} else {
noInternalUserIds = append(noInternalUserIds, userId)
}
}
response, err := c.DescribeUsers(ctx, &pb.DescribeUsersRequest{
UserId: noInternalUserIds,
})
if err != nil {
logger.Error(ctx, "Describe users %s failed: %+v", noInternalUserIds, err)
return nil, err
}
if len(response.UserSet) != len(noInternalUserIds) {
logger.Error(ctx, "Describe users %s with return count [%d]", userIds, len(response.UserSet)+len(internalUsers))
return nil, fmt.Errorf("describe users %s with return count [%d]", userIds, len(response.UserSet)+len(internalUsers))
}
response.UserSet = append(response.UserSet, internalUsers...)
return response.UserSet, nil
}
|
/*
Package docstore implements a JSON-based document store
built on top of the Versioned Key-Value store and the Blob store.
Each document will get assigned a MongoDB like ObjectId:
<binary encoded uint32 (4 bytes) + 8 random bytes hex encoded >
The resulting id will have a length of 24 characters encoded as hex (12 raw bytes).
The JSON document will be stored as is and kvk entry will reference it.
docstore:<collection>:<id> => <flag (1 byte) + JSON blob hash>
Document will be automatically sorted by creation time thanks to the ID.
The raw JSON will be stored as is, but the API will add the _id field on the fly.
*/
package docstore // import "a4.io/blobstash/pkg/docstore"
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/evanphx/json-patch"
"github.com/gorilla/mux"
log "github.com/inconshreveable/log15"
logext "github.com/inconshreveable/log15/ext"
"github.com/vmihailenco/msgpack"
"github.com/yuin/gopher-lua"
"a4.io/blobstash/pkg/asof"
"a4.io/blobstash/pkg/auth"
"a4.io/blobstash/pkg/config"
"a4.io/blobstash/pkg/docstore/id"
"a4.io/blobstash/pkg/filetree"
"a4.io/blobstash/pkg/httputil"
"a4.io/blobstash/pkg/httputil/bewit"
"a4.io/blobstash/pkg/perms"
"a4.io/blobstash/pkg/stash/store"
"a4.io/blobstash/pkg/vkv"
)
// FIXME(tsileo): create a "meta" hook for handling indexing
// will need to solve few issues before:
// - do we need to check if the doc is already indexed?
var (
prefixKey = "docstore:"
prefixKeyFmt = prefixKey + "%s"
keyFmt = prefixKeyFmt + ":%s"
PrefixIndexKeyFmt = "docstore-index:%s"
IndexKeyFmt = PrefixIndexKeyFmt + ":%s"
)
// ErrUnprocessableEntity is returned when a document is faulty
var ErrUnprocessableEntity = errors.New("unprocessable entity")
var ErrDocNotFound = errors.New("document not found")
var ErrPreconditionFailed = errors.New("precondition failed")
var reservedKeys = map[string]struct{}{
"_id": struct{}{},
"_updated": struct{}{},
"_created": struct{}{},
"_version": struct{}{},
"_hooks": struct{}{},
}
func idFromKey(col, key string) (*id.ID, error) {
hexID := strings.Replace(key, fmt.Sprintf("docstore:%s:", col), "", 1)
_id, err := id.FromHex(hexID)
if err != nil {
return nil, err
}
return _id, err
}
const (
flagNoop byte = iota // Default flag
flagDeleted
)
const (
pointerBlobJSON = "@blobs/json:" // FIXME(tsileo): document the Pointer feature
// PointerBlobRef = "@blobs/ref:" // FIXME(tsileo): implements this like a @filetree/ref
pointerFiletreeRef = "@filetree/ref:"
//PointerURLInfo = "@url/info:" // XXX(tsileo): fetch OG meta data or at least title, optionally screenshot???
// TODO(tsileo): implements PointerKvRef
// PointerKvRef = "@kv/ref:"
// XXX(tsileo): allow custom Lua-defined pointer, this could be useful for implement cross-note linking in Blobs
// Sharing TTL for the bewit link of Filetree references
shareDuration = 30 * time.Minute
)
type executionStats struct {
NReturned int `json:"nReturned"`
TotalDocsExamined int `json:"totalDocsExamined"`
ExecutionTimeNano int64 `json:"executionTimeNano"`
LastID string `json:"-"`
Engine string `json:"query_engine"`
Index string `json:"index"`
Cursor string `json:"cursor"`
}
// DocStore holds the docstore manager
type DocStore struct {
kvStore store.KvStore
blobStore store.BlobStore
filetree *filetree.FileTree
conf *config.Config
// docIndex *index.HashIndexes
hooks *LuaHooks
storedQueries map[string]*storedQuery
locker *locker
indexes map[string]map[string]Indexer
exts map[string]map[string]map[string]interface{}
schemas map[string][]*LuaSchemaField
logger log.Logger
}
type storedQuery struct {
Name string
Main string
}
// New initializes the `DocStoreExt`
func New(logger log.Logger, conf *config.Config, kvStore store.KvStore, blobStore store.BlobStore, ft *filetree.FileTree) (*DocStore, error) {
logger.Debug("init")
sortIndexes := map[string]map[string]Indexer{}
var err error
// Load the docstore's stored queries from the config
storedQueries := map[string]*storedQuery{}
if conf.Docstore != nil && conf.Docstore.StoredQueries != nil {
for _, squery := range conf.Docstore.StoredQueries {
// First ensure the required match.lua is present
if _, err := os.Stat(filepath.Join(squery.Path, "main.lua")); os.IsNotExist(err) {
return nil, fmt.Errorf("missing `main.lua` for stored query %s", squery.Name)
}
storedQuery := &storedQuery{
Name: squery.Name,
Main: filepath.Join(squery.Path, "main.lua"),
}
storedQueries[squery.Name] = storedQuery
}
logger.Debug("sorted queries setup", "stored_queries", fmt.Sprintf("%+v", storedQueries))
}
// Load the sort indexes definitions if any
if conf.Docstore != nil && conf.Docstore.SortIndexes != nil {
for collection, indexes := range conf.Docstore.SortIndexes {
sortIndexes[collection] = map[string]Indexer{}
for sortIndexName, sortIndex := range indexes {
sortIndexes[collection][sortIndexName], err = newSortIndex(sortIndexName, sortIndex.Fields[0])
if err != nil {
return nil, fmt.Errorf("failed to init index: %v", err)
}
}
}
logger.Debug("indexes setup", "indexes", fmt.Sprintf("%+v", sortIndexes))
}
hooks, err := newLuaHooks(conf, ft, blobStore)
if err != nil {
return nil, err
}
return &DocStore{
kvStore: kvStore,
blobStore: blobStore,
filetree: ft,
storedQueries: storedQueries,
hooks: hooks,
conf: conf,
locker: newLocker(),
logger: logger,
indexes: sortIndexes,
schemas: map[string][]*LuaSchemaField{},
exts: map[string]map[string]map[string]interface{}{},
}, nil
}
// Close closes all the open DB files.
func (docstore *DocStore) Close() error {
for _, indexes := range docstore.indexes {
for _, index := range indexes {
if err := index.Close(); err != nil {
return err
}
}
}
// if err := docstore.docIndex.Close(); err != nil {
// return err
// }
return nil
}
func (dc *DocStore) LuaSetupSortIndex(col, name, field string) error {
if _, ok := dc.indexes[col]; !ok {
dc.indexes[col] = map[string]Indexer{}
}
if _, ok := dc.indexes[col][name]; ok {
// TODO(tsileo): return an error?
return nil
}
var err error
dc.indexes[col][name], err = newSortIndex(name, field)
if err != nil {
return fmt.Errorf("failed to init index: %v", err)
}
return nil
}
type LuaSchemaField struct {
Name string
Type string
Data map[string]interface{}
}
func (dc *DocStore) LuaRegisterSchema(name string, fields []interface{}) error {
schema := []*LuaSchemaField{}
for _, field := range fields {
fdata := field.(map[string]interface{})
var data map[string]interface{}
if dat, ok := fdata["data"]; ok {
data = dat.(map[string]interface{})
}
f := &LuaSchemaField{
Name: fdata["field_name"].(string),
Type: fdata["field_type"].(string),
Data: data,
}
schema = append(schema, f)
}
dc.schemas[name] = schema
dc.logger.Debug("setup new schema", "name", name, "schema", fmt.Sprintf("%q", schema))
return nil
}
func (dc *DocStore) LuaGetExt(col, ext string) (map[string]interface{}, error) {
if colExts, ok := dc.exts[col]; ok {
if dat, ok := colExts[ext]; ok {
return dat, nil
}
}
return nil, fmt.Errorf("no ext %s %s", col, ext)
}
func (dc *DocStore) LuaGetSchema(name string) ([]*LuaSchemaField, error) {
if schema, ok := dc.schemas[name]; ok {
return schema, nil
}
return nil, fmt.Errorf("schema %q not found", name)
}
func (dc *DocStore) SetupExt(col, ext string, data map[string]interface{}) {
if _, ok := dc.exts[col]; !ok {
dc.exts[col] = map[string]map[string]interface{}{}
}
dc.exts[col][ext] = data
dc.logger.Debug("setup new ext", "col", col, "ext", ext, "data", fmt.Sprintf("%+v", data))
}
// Register registers all the HTTP handlers for the extension
func (docstore *DocStore) Register(r *mux.Router, basicAuth func(http.Handler) http.Handler) {
r.Handle("/", basicAuth(http.HandlerFunc(docstore.collectionsHandler())))
r.Handle("/_stored_queries", basicAuth(http.HandlerFunc(docstore.storedQueriesHandler())))
r.Handle("/{collection}", basicAuth(http.HandlerFunc(docstore.docsHandler())))
r.Handle("/{collection}/_rebuild_indexes", basicAuth(http.HandlerFunc(docstore.reindexDocsHandler())))
r.Handle("/{collection}/_map_reduce", basicAuth(http.HandlerFunc(docstore.mapReduceHandler())))
// r.Handle("/{collection}/_indexes", middlewares.Auth(http.HandlerFunc(docstore.indexesHandler())))
r.Handle("/{collection}/{_id}", basicAuth(http.HandlerFunc(docstore.docHandler())))
r.Handle("/{collection}/{_id}/_versions", basicAuth(http.HandlerFunc(docstore.docVersionsHandler())))
}
// Expand a doc keys (fetch the blob as JSON, or a filesystem reference)
// e.g: {"ref": "@blobstash/json:<hash>"}
// => {"ref": {"blob": "json decoded"}}
// XXX(tsileo): expanded ref must also works for marking a blob during GC
// FIXME(tsileo): rename this to "pointers" and return {"data":{[...]}, "pointers": {}}
func (docstore *DocStore) fetchPointers(doc map[string]interface{}) (map[string]interface{}, error) {
pointers := map[string]interface{}{}
// docstore.logger.Info("expandKeys")
for _, v := range doc {
switch vv := v.(type) {
case map[string]interface{}:
docPointers, err := docstore.fetchPointers(vv)
if err != nil {
return nil, err
}
for k, v := range docPointers {
pointers[k] = v
}
continue
case string:
switch {
case strings.HasPrefix(vv, pointerBlobJSON):
if _, ok := pointers[vv]; ok {
// The reference has already been fetched
continue
}
// XXX(tsileo): here and at other place, add a util func in hashutil to detect invalid string length at least
blob, err := docstore.blobStore.Get(context.TODO(), vv[len(pointerBlobJSON):])
if err != nil {
return nil, fmt.Errorf("failed to fetch JSON ref: \"%v => %v\": %v", pointerBlobJSON, v, err)
}
p := map[string]interface{}{}
if err := json.Unmarshal(blob, &p); err != nil {
return nil, fmt.Errorf("failed to unmarshal blob \"%v => %v\": %v", pointerBlobJSON, v, err)
}
pointers[vv] = p
case strings.HasPrefix(vv, pointerFiletreeRef):
if _, ok := pointers[vv]; ok {
// The reference has already been fetched
continue
}
// XXX(tsileo): here and at other place, add a util func in hashutil to detect invalid string length at least
hash := vv[len(pointerFiletreeRef):]
// TODO(tsileo): call filetree to get a node
// blob, err := docstore.blobStore.Get(context.TODO(), hash)
// if err != nil {
// return nil, fmt.Errorf("failed to fetch JSON ref: \"%v => %v\": %v", pointerFiletreeRef, v, err)
// }
// // Reconstruct the Meta
// var p map[string]interface{}
// if err := json.Unmarshal(blob, &p); err != nil {
// return nil, fmt.Errorf("failed to unmarshal meta \"%v => %v\": %v", pointerBlobJSON, v, err)
// }
node, err := docstore.filetree.Node(context.TODO(), hash)
if err != nil {
return nil, err
}
// Create a temporary authorization for the file (with a bewit)
u := &url.URL{Path: fmt.Sprintf("/%s/%s", node.Type[0:1], hash)}
if err := bewit.Bewit(docstore.filetree.SharingCred(), u, shareDuration); err != nil {
return nil, fmt.Errorf("failed to generate bewit: %v", err)
}
node.URL = u.String()
pointers[vv] = node
}
}
}
return pointers, nil
}
// nextKey returns the next key for lexigraphical ordering (key = nextKey(lastkey))
func nextKey(key string) string {
bkey := []byte(key)
i := len(bkey)
for i > 0 {
i--
bkey[i]++
if bkey[i] != 0 {
break
}
}
return string(bkey)
}
// Collections returns all the existing collections
func (docstore *DocStore) Collections() ([]string, error) {
collections := []string{}
index := map[string]struct{}{}
var lastKey string
ksearch := fmt.Sprintf("docstore:%v", lastKey)
for {
res, cursor, err := docstore.kvStore.Keys(context.TODO(), ksearch, "docstore:\xff", 0)
ksearch = cursor
// docstore.logger.Debug("loop", "ksearch", ksearch, "len_res", len(res))
if err != nil {
return nil, err
}
if len(res) == 0 {
break
}
var col string
for _, kv := range res {
// Key = <docstore:{collection}:{_id}>
col = strings.Split(kv.Key, ":")[1]
index[col] = struct{}{}
}
}
for col, _ := range index {
collections = append(collections, col)
}
return collections, nil
}
// HTTP handler to manage indexes for a collection
// func (docstore *DocStoreExt) indexesHandler() func(http.ResponseWriter, *http.Request) {
// return func(w http.ResponseWriter, r *http.Request) {
// vars := mux.Vars(r)
// collection := vars["collection"]
// if collection == "" {
// httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing collection in the URL")
// return
// }
// // Ensure the client has the needed permissions
// permissions.CheckPerms(r, PermCollectionName, collection)
// switch r.Method {
// case "GET":
// // GET request, just list all the indexes
// srw := httputil.NewSnappyResponseWriter(w, r)
// indexes, err := docstore.Indexes(collection)
// if err != nil {
// panic(err)
// }
// httputil.WriteJSON(srw, indexes)
// srw.Close()
// case "POST":
// // POST request, create a new index from the body
// q := &index.Index{}
// if err := json.NewDecoder(r.Body).Decode(&q); err != nil {
// panic(err)
// }
// // Actually save the index
// if err := docstore.AddIndex(collection, q); err != nil {
// panic(err)
// }
// w.WriteHeader(http.StatusCreated)
// default:
// w.WriteHeader(http.StatusMethodNotAllowed)
// }
// }
// }
// HTTP handler for checking the loaded saved queries
func (docstore *DocStore) storedQueriesHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
httputil.WriteJSON(w, docstore.storedQueries)
return
default:
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
}
}
// HTTP handler for getting the collections list
func (docstore *DocStore) collectionsHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
// Ensure the client has the needed permissions
// permissions.CheckPerms(r, PermCollectionName)
collections, err := docstore.Collections()
if err != nil {
panic(err)
}
httputil.MarshalAndWrite(r, w, map[string]interface{}{
"collections": collections,
})
return
default:
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
}
}
// isQueryAll returns `true` if there's no query.
func isQueryAll(q string) bool {
if q == "" {
return true
}
return false
}
// Indexes return the list of `Index` for the given collection
// func (docstore *DocStoreExt) Indexes(collection string) ([]*index.Index, error) {
// res, err := docstore.kvStore.ReversePrefixKeys(fmt.Sprintf(PrefixIndexKeyFmt, collection), "", "\xff", 50)
// indexes := []*index.Index{}
// if err != nil {
// panic(err)
// }
// for _, kv := range res {
// // FIXME(tsileo): this check shouldn't be here, it should be handled by ReversePrefixKeys!
// if !strings.HasPrefix(kv.Key, fmt.Sprintf(IndexKeyFmt, collection, "")) {
// break
// }
// index := &index.Index{ID: strings.Replace(kv.Key, fmt.Sprintf(IndexKeyFmt, collection, ""), "", 1)}
// if err := json.Unmarshal([]byte(kv.Value), index); err != nil {
// docstore.logger.Error("failed to unmarshal log entry", "err", err, "js", kv.Value)
// // return nil, err
// continue
// }
// indexes = append(indexes, index)
// }
// return indexes, nil
// }
// func (docstore *DocStoreExt) AddIndex(collection string, idx *index.Index) error {
// if len(idx.Fields) > 1 {
// return httputil.NewPublicErrorFmt("Only single field index are support for now")
// }
// var err error
// js, err := json.Marshal(idx)
// if err != nil {
// return err
// }
// // FIXME(tsileo): ensure we can't create duplicate index
// switch len(idx.Fields) {
// case 1:
// hashKey := fmt.Sprintf("single-field-%s", idx.Fields[0])
// _, err = docstore.kvStore.PutPrefix(fmt.Sprintf(PrefixIndexKeyFmt, collection), hashKey, string(js), -1, "")
// default:
// err = httputil.NewPublicErrorFmt("Bad index")
// }
// return err
// }
// IndexDoc indexes the given doc if needed, should never be called by the client,
// this method is exported to support re-indexing at the blob level and rebuild the index from it.
// func (docstore *DocStoreExt) IndexDoc(collection string, _id *id.ID, doc *map[string]interface{}) error {
// // Check if the document should be indexed by the full-text indexer (Bleve)
// if _id.Flag() == FlagFullTextIndexed {
// if err := docstore.index.Index(_id.String(), doc); err != nil {
// return err
// }
// }
// // Check if the document need to be indexed
// indexes, err := docstore.Indexes(collection)
// if err != nil {
// return fmt.Errorf("Failed to fetch index")
// }/)
// optz := optimizer.New(docstore.logger.New("module", "query optimizer"), indexes)
// shouldIndex, idx, idxKey := optz.ShouldIndex(*doc)
// if shouldIndex {
// docstore.logger.Debug("indexing document", "idx-key", idxKey, "_id", _id.String())
// // FIXME(tsileo): returns a special status code on `index.DuplicateIndexError`
// if err := docstore.docIndex.Index(collection, idx, idxKey, _id.String()); err != nil {
// return err
// }
// }
// return nil
// }
// Insert the given doc (`*map[string]interface{}` for now) in the given collection
func (docstore *DocStore) Insert(collection string, doc map[string]interface{}) (*id.ID, error) {
// If there's already an "_id" field in the doc, remove it
if _, ok := doc["_id"]; ok {
delete(doc, "_id")
}
// Check for reserved keys
for k, _ := range doc {
if _, ok := reservedKeys[k]; ok {
// XXX(tsileo): delete them or raises an exception?
delete(doc, k)
}
}
// TODO(tsileo): track the hook execution time and log it
ok, newDoc, err := docstore.hooks.Execute(collection, "post", doc)
if err != nil {
return nil, err
}
if ok && newDoc == nil {
return nil, ErrUnprocessableEntity
}
if ok {
doc = newDoc
}
data, err := msgpack.Marshal(doc)
if err != nil {
return nil, err
}
// Build the ID and add some meta data
now := time.Now().UTC()
_id, err := id.New(now.UnixNano())
if err != nil {
return nil, err
}
_id.SetFlag(flagNoop)
// Create a pointer in the key-value store
kv, err := docstore.kvStore.Put(
context.TODO(), fmt.Sprintf(keyFmt, collection, _id.String()), "", append([]byte{_id.Flag()}, data...), now.UnixNano(),
)
if err != nil {
return nil, err
}
_id.SetVersion(kv.Version)
// Index the doc if needed
// FIXME(tsileo): move this to the hub via the kvstore
if indexes, ok := docstore.indexes[collection]; ok {
for _, index := range indexes {
if err := index.Index(_id, doc); err != nil {
panic(err)
}
}
}
return _id, nil
}
type query struct {
storedQuery string
storedQueryArgs interface{}
basicQuery string
script string
lfunc *lua.LFunction
sortIndex string
}
func queryToScript(q *query) string {
if q.basicQuery != "" {
return `return function(doc)
if ` + q.basicQuery + ` then return true else return false end
end
`
}
if q.script != "" {
return q.script
}
// Must be a stored query, return an empty string
return ""
}
func (q *query) isMatchAll() bool {
if q.lfunc == nil && q.script == "" && q.basicQuery == "" && q.storedQuery == "" && q.storedQueryArgs == nil {
return true
}
return false
}
func addSpecialFields(doc map[string]interface{}, _id *id.ID) {
doc["_id"] = _id
doc["_version"] = _id.VersionString()
doc["_created"] = time.Unix(0, _id.Ts()).UTC().Format(time.RFC3339)
updated := _id.Version()
if updated != doc["_created"] {
doc["_updated"] = time.Unix(0, int64(updated)).UTC().Format(time.RFC3339)
}
}
func (docstore *DocStore) Update(collection, sid string, newDoc map[string]interface{}, ifMatch string) (*id.ID, error) {
docstore.locker.Lock(sid)
defer docstore.locker.Unlock(sid)
ctx := context.Background()
// Fetch the actual doc
doc := map[string]interface{}{}
_id, _, err := docstore.Fetch(collection, sid, &doc, false, -1)
if err != nil {
if err == vkv.ErrNotFound || _id.Flag() == flagDeleted {
return nil, ErrDocNotFound
}
return nil, err
}
// Pre-condition (done via If-Match header/status precondition failed)
if ifMatch != "" && ifMatch != _id.VersionString() {
return nil, ErrPreconditionFailed
}
// Field/key starting with `_` are forbidden, remove them
for k := range newDoc {
if _, ok := reservedKeys[k]; ok {
delete(newDoc, k)
}
}
data, err := msgpack.Marshal(newDoc)
if err != nil {
panic(err)
}
docstore.logger.Debug("Update", "_id", sid, "new_doc", newDoc)
kv, err := docstore.kvStore.Put(ctx, fmt.Sprintf(keyFmt, collection, _id.String()), "", append([]byte{_id.Flag()}, data...), -1)
if err != nil {
panic(err)
}
_id.SetVersion(kv.Version)
// FIXME(tsileo): move this to the hub via the kvstore
if indexes, ok := docstore.indexes[collection]; ok {
for _, index := range indexes {
if err := index.Index(_id, newDoc); err != nil {
panic(err)
}
}
}
return nil, err
}
func (docstore *DocStore) Remove(collection, sid string) (*id.ID, error) {
docstore.locker.Lock(sid)
defer docstore.locker.Unlock(sid)
_id, _, err := docstore.Fetch(collection, sid, nil, false, -1)
if err != nil {
if err == vkv.ErrNotFound || _id.Flag() == flagDeleted {
return nil, ErrDocNotFound
}
return nil, err
}
kv, err := docstore.kvStore.Put(context.TODO(), fmt.Sprintf(keyFmt, collection, sid), "", []byte{flagDeleted}, -1)
if err != nil {
return nil, err
}
_id.SetVersion(kv.Version)
_id.SetFlag(flagDeleted)
// FIXME(tsileo): move this to the hub via the kvstore
if indexes, ok := docstore.indexes[collection]; ok {
for _, index := range indexes {
if err := index.Index(_id, nil); err != nil {
return nil, err
}
}
}
return _id, nil
}
// LuaQuery performs a Lua query
func (docstore *DocStore) LuaQuery(L *lua.LState, lfunc *lua.LFunction, collection string, cursor string, sortIndex string, limit int) ([]map[string]interface{}, map[string]interface{}, string, *executionStats, error) {
query := &query{
lfunc: lfunc,
sortIndex: sortIndex,
}
docs, pointers, stats, err := docstore.query(L, collection, query, cursor, limit, true, 0)
if err != nil {
return nil, nil, "", nil, err
}
return docs, pointers, vkv.PrevKey(stats.LastID), stats, nil
}
// Query performs a query
func (docstore *DocStore) Query(collection string, query *query, cursor string, limit int, asOf int64) ([]map[string]interface{}, map[string]interface{}, *executionStats, error) {
docs, pointers, stats, err := docstore.query(nil, collection, query, cursor, limit, true, asOf)
if err != nil {
return nil, nil, nil, err
}
// TODO(tsileo): fix this
return docs, pointers, stats, nil
}
// query returns a JSON list as []byte for the given query
// docs are unmarhsalled to JSON only when needed.
func (docstore *DocStore) query(L *lua.LState, collection string, query *query, cursor string, limit int, fetchPointers bool, asOf int64) ([]map[string]interface{}, map[string]interface{}, *executionStats, error) {
// Init some stuff
tstart := time.Now()
stats := &executionStats{}
var err error
var docPointers map[string]interface{}
pointers := map[string]interface{}{}
docs := []map[string]interface{}{}
// Tweak the internal query batch limit
fetchLimit := int(float64(limit) * 1.3)
// Select the ID iterator (XXX sort indexes are a WIP)
var it IDIterator
if query.sortIndex == "" || query.sortIndex == "-_id" {
// Use the default ID iterator (iter IDs in reverse order
it = newNoIndexIterator(docstore.kvStore)
} else {
if indexes, ok := docstore.indexes[collection]; ok {
if idx, ok := indexes[query.sortIndex]; ok {
it = idx
}
}
if it == nil {
return nil, nil, stats, fmt.Errorf("failed to select sort_index %q", query.sortIndex)
}
}
stats.Index = it.Name()
// Select the query matcher
var qmatcher QueryMatcher
switch {
case query.isMatchAll():
stats.Engine = "match_all"
qmatcher = &MatchAllEngine{}
default:
qmatcher, err = docstore.newLuaQueryEngine(L, query)
if err != nil {
return nil, nil, stats, err
}
stats.Engine = "lua"
}
defer qmatcher.Close()
start := cursor
// Init the logger
qLogger := docstore.logger.New("query", query, "query_engine", stats.Engine, "id", logext.RandId(8))
qLogger.Info("new query")
QUERY:
for {
// Loop until we have the number of requested documents, or if we scanned everything
qLogger.Debug("internal query", "limit", limit, "start", start, "cursor", cursor, "nreturned", stats.NReturned)
// FIXME(tsileo): use `PrefixKeys` if ?sort=_id (-_id by default).
// Fetch a batch from the iterator
_ids, cursor, err := it.Iter(collection, start, fetchLimit, asOf)
if err != nil {
panic(err)
}
for _, _id := range _ids {
if _id.Flag() == flagDeleted {
qLogger.Debug("skipping deleted doc", "_id", _id, "as_of", asOf)
continue
}
qLogger.Debug("fetch doc", "_id", _id, "as_of", asOf)
stats.Cursor = _id.Cursor()
doc := map[string]interface{}{}
var err error
// Fetch the version tied to the ID (the iterator is taking care of selecting an ID version)
if _id, docPointers, err = docstore.Fetch(collection, _id.String(), &doc, fetchPointers, _id.Version()); err != nil {
panic(err)
}
stats.TotalDocsExamined++
// Check if the doc match the query
ok, err := qmatcher.Match(doc)
if err != nil {
return nil, nil, stats, err
}
if ok {
// The document matches the query
addSpecialFields(doc, _id)
if fetchPointers {
for k, v := range docPointers {
pointers[k] = v
}
}
docs = append(docs, doc)
stats.NReturned++
stats.LastID = _id.String()
if stats.NReturned == limit {
break QUERY
}
}
}
if len(_ids) == 0 { // || len(_ids) < fetchLimit {
break
}
start = cursor
}
duration := time.Since(tstart)
qLogger.Debug("scan done", "duration", duration, "nReturned", stats.NReturned, "scanned", stats.TotalDocsExamined, "cursor", stats.Cursor)
stats.ExecutionTimeNano = duration.Nanoseconds()
return docs, pointers, stats, nil
}
func (docstore *DocStore) RebuildIndexes(collection string) error {
// FIXME(tsileo): locking
if indexes, ok := docstore.indexes[collection]; ok {
for _, index := range indexes {
// FIXME(tsileo): make an preprareRebuild interface optional
if err := index.(*sortIndex).prepareRebuild(); err != nil {
panic(err)
}
}
}
end := fmt.Sprintf(keyFmt, collection, "")
start := fmt.Sprintf(keyFmt, collection, "\xff")
// List keys from the kvstore
res, _, err := docstore.kvStore.ReverseKeys(context.TODO(), end, start, -1)
if err != nil {
return err
}
for _, kv := range res {
// Build the ID
_id, err := idFromKey(collection, kv.Key)
if err != nil {
return err
}
// Add the extra metadata to the ID
_id.SetFlag(kv.Data[0])
_id.SetVersion(kv.Version)
// Check if the document has a valid version for the given asOf
kvv, _, err := docstore.kvStore.Versions(context.TODO(), fmt.Sprintf(keyFmt, collection, _id.String()), "0", -1)
if err != nil {
if err == vkv.ErrNotFound {
continue
}
return err
}
// No anterior versions, skip it
if len(kvv.Versions) == 0 {
continue
}
// Reverse the versions
for i := len(kvv.Versions)/2 - 1; i >= 0; i-- {
opp := len(kvv.Versions) - 1 - i
kvv.Versions[i], kvv.Versions[opp] = kvv.Versions[opp], kvv.Versions[i]
}
// Re-index each versions in chronological order
for _, version := range kvv.Versions {
_id.SetFlag(version.Data[0])
_id.SetVersion(version.Version)
var doc map[string]interface{}
if _id.Flag() != flagDeleted {
doc = map[string]interface{}{}
if err := msgpack.Unmarshal(version.Data[1:], &doc); err != nil {
return err
}
}
if indexes, ok := docstore.indexes[collection]; ok {
for _, index := range indexes {
if err := index.Index(_id, doc); err != nil {
return err
}
}
}
// fmt.Printf("_id=%+v|%d|%+v\n", _id, version.Version, doc)
// FIXME(tsileo): re-index the doc if needed
}
}
return nil
}
// HTTP handler for the collection (handle listing+query+insert)
func (docstore *DocStore) reindexDocsHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
collection := vars["collection"]
if collection == "" {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing collection in the URL")
return
}
switch r.Method {
case "POST":
if !auth.Can(
w,
r,
// TODO(tsileo): tweak the perms
perms.Action(perms.List, perms.JSONDocument),
perms.Resource(perms.DocStore, perms.JSONDocument),
) {
auth.Forbidden(w)
return
}
if err := docstore.RebuildIndexes(collection); err != nil {
panic(err)
}
w.WriteHeader(http.StatusCreated)
default:
w.WriteHeader(http.StatusMethodNotAllowed)
}
}
}
// HTTP handler for the collection (handle listing+query+insert)
func (docstore *DocStore) docsHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
q := httputil.NewQuery(r.URL.Query())
vars := mux.Vars(r)
collection := vars["collection"]
if collection == "" {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing collection in the URL")
return
}
switch r.Method {
case "GET", "HEAD":
if !auth.Can(
w,
r,
perms.Action(perms.List, perms.JSONDocument),
perms.Resource(perms.DocStore, perms.JSONDocument),
) {
auth.Forbidden(w)
return
}
var asOf int64
var err error
// Parse the cursor
cursor := q.Get("cursor")
if v := q.Get("as_of"); v != "" {
asOf, err = asof.ParseAsOf(v)
}
if asOf == 0 {
asOf, err = q.GetInt64Default("as_of_nano", 0)
if err != nil {
panic(err)
}
}
// Parse the query (JSON-encoded)
var queryArgs interface{}
jsQuery := q.Get("stored_query_args")
if jsQuery != "" {
if err := json.Unmarshal([]byte(jsQuery), &queryArgs); err != nil {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Failed to decode JSON query")
return
}
}
limit, err := q.GetInt("limit", 50, 1000)
if err != nil {
httputil.Error(w, err)
return
}
docs, pointers, stats, err := docstore.query(nil, collection, &query{
storedQueryArgs: queryArgs,
storedQuery: q.Get("stored_query"),
script: q.Get("script"),
basicQuery: q.Get("query"),
sortIndex: q.Get("sort_index"),
}, cursor, limit, true, asOf)
if err != nil {
docstore.logger.Error("query failed", "err", err)
httputil.Error(w, err)
return
}
// Set some meta headers to help the client build subsequent query
// (iterator/cursor handling)
var hasMore bool
// Guess if they're are still results on client-side,
// by checking if NReturned < limit, we can deduce there's no more results.
// The cursor should be the start of the next query
if stats.NReturned == limit {
hasMore = true
}
w.Header().Set("BlobStash-DocStore-Iter-Has-More", strconv.FormatBool(hasMore))
w.Header().Set("BlobStash-DocStore-Iter-Cursor", stats.Cursor)
// w.Header().Set("BlobStash-DocStore-Query-Optimizer", stats.Optimizer)
// if stats.Optimizer != optimizer.Linear {
// w.Header().Set("BlobStash-DocStore-Query-Index", stats.Index)
// }
// Set headers for the query stats
w.Header().Set("BlobStash-DocStore-Query-Index", stats.Index)
w.Header().Set("BlobStash-DocStore-Query-Engine", stats.Engine)
w.Header().Set("BlobStash-DocStore-Query-Returned", strconv.Itoa(stats.NReturned))
w.Header().Set("BlobStash-DocStore-Query-Examined", strconv.Itoa(stats.TotalDocsExamined))
w.Header().Set("BlobStash-DocStore-Query-Exec-Time-Nano", strconv.FormatInt(stats.ExecutionTimeNano, 10))
w.Header().Set("BlobStash-DocStore-Results-Count", strconv.Itoa(stats.NReturned))
// This way, HEAD request can acts as a count query
if r.Method == "HEAD" {
return
}
// Write the JSON response (encoded if requested)
httputil.MarshalAndWrite(r, w, &map[string]interface{}{
"pointers": pointers,
"data": docs,
"pagination": map[string]interface{}{
"cursor": stats.Cursor,
"has_more": hasMore,
"count": stats.NReturned,
"per_page": limit,
},
})
case "POST":
// permissions.CheckPerms(r, PermCollectionName, collection, PermWrite)
// Read the whole body
blob, err := ioutil.ReadAll(r.Body)
if err != nil {
panic(err)
}
// Ensure it's JSON encoded
doc := map[string]interface{}{}
if err := json.Unmarshal(blob, &doc); err != nil {
docstore.logger.Error("Failed to parse JSON input", "collection", collection, "err", err)
panic(httputil.NewPublicErrorFmt("Invalid JSON document"))
}
// Check for reserved keys
for k, _ := range doc {
if _, ok := reservedKeys[k]; ok {
// XXX(tsileo): delete them or raises an exception?
delete(doc, k)
}
}
// Actually insert the doc
_id, err := docstore.Insert(collection, doc)
if err == ErrUnprocessableEntity {
// FIXME(tsileo): returns an object with field errors (set via the Lua API in the hook)
w.WriteHeader(http.StatusUnprocessableEntity)
return
}
if err != nil {
panic(err)
}
// Output some headers
w.Header().Set("BlobStash-DocStore-Doc-Id", _id.String())
w.Header().Set("BlobStash-DocStore-Doc-Version", _id.VersionString())
w.Header().Set("BlobStash-DocStore-Doc-CreatedAt", strconv.FormatInt(_id.Ts(), 10))
created := time.Unix(0, _id.Ts()).UTC().Format(time.RFC3339)
httputil.MarshalAndWrite(r, w, map[string]interface{}{
"_id": _id.String(),
"_created": created,
"_version": _id.VersionString(),
},
httputil.WithStatusCode(http.StatusCreated))
return
default:
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
}
}
// JSON input for the map reduce endpoint
type mapReduceInput struct {
Map string `json:"map"`
MapScope map[string]interface{} `json:"map_scope"`
Reduce string `json:"reduce"`
ReduceScope map[string]interface{} `json:"reduce_scope"`
}
func (docstore *DocStore) mapReduceHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
q := httputil.NewQuery(r.URL.Query())
vars := mux.Vars(r)
collection := vars["collection"]
if collection == "" {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing collection in the URL")
return
}
switch r.Method {
case "POST":
input := &mapReduceInput{}
if err := json.NewDecoder(r.Body).Decode(input); err != nil {
panic(httputil.NewPublicErrorFmt("Invalid JSON input"))
}
var asOf int64
var err error
if v := q.Get("as_of"); v != "" {
t, err := time.Parse("2006-1-2 15:4:5", v)
if err != nil {
panic(err)
}
asOf = t.UTC().UnixNano()
}
if asOf == 0 {
asOf, err = q.GetInt64Default("as_of_nano", 0)
if err != nil {
panic(err)
}
}
// Parse the query (JSON-encoded)
var queryArgs interface{}
jsQuery := q.Get("stored_query_args")
if jsQuery != "" {
if err := json.Unmarshal([]byte(jsQuery), &queryArgs); err != nil {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Failed to decode JSON query")
return
}
}
rootMre := NewMapReduceEngine()
defer rootMre.Close()
if err := rootMre.SetupReduce(input.Reduce); err != nil {
panic(err)
}
if err := rootMre.SetupMap(input.Map); err != nil {
panic(err)
}
batches := make(chan *MapReduceEngine)
// Reduce the batches into a single one as they're done
// TODO(tsileo): find a way to interrupt the pipeline on error
inFlight := 6
limiter := make(chan struct{}, inFlight)
errc := make(chan error, inFlight)
stop := make(chan struct{}, inFlight)
// Prepare the process of the batch result
go func() {
var discard bool
for batch := range batches {
if discard {
continue
}
if batch.err != nil {
// propagate the error
discard = true
stop <- struct{}{}
errc <- batch.err
}
if err := rootMre.Reduce(batch); err != nil {
// propagate the error
discard = true
stop <- struct{}{}
errc <- err
}
}
errc <- nil
}()
hasMore := true
var cursor string
// Batch size
limit := 50
q := &query{
storedQueryArgs: queryArgs,
storedQuery: q.Get("stored_query"),
script: q.Get("script"),
basicQuery: q.Get("query"),
}
var wg sync.WaitGroup
QUERY_LOOP:
for {
select {
case <-stop:
break QUERY_LOOP
default:
// Fetch a page
if !hasMore {
time.Sleep(50 * time.Millisecond)
break
}
docs, _, stats, err := docstore.query(nil, collection, q, cursor, limit, true, asOf)
if err != nil {
docstore.logger.Error("query failed", "err", err)
httputil.Error(w, err)
return
}
// Process the batch in parallel
wg.Add(1)
limiter <- struct{}{}
go func(doc []map[string]interface{}) {
defer func() {
wg.Done()
<-limiter
}()
mre, err := rootMre.Duplicate()
if err != nil {
panic(err)
}
defer mre.Close()
// XXX(tsileo): pass the pointers in the Lua map?
// Call Map for each document
for _, doc := range docs {
if err := mre.Map(doc); err != nil {
mre.err = err
batches <- mre
return
}
}
if err := mre.Reduce(nil); err != nil {
mre.err = err
}
batches <- mre
}(docs)
// Guess if they're are still results on client-side,
// by checking if NReturned < limit, we can deduce there's no more results.
// The cursor should be the start of the next query
if stats.NReturned < limit {
hasMore = false
break QUERY_LOOP
}
cursor = vkv.PrevKey(stats.LastID)
}
}
wg.Wait()
close(batches)
// Wait for the reduce step to be done
if err := <-errc; err != nil {
docstore.logger.Error("reduce failed", "err", err)
httputil.Error(w, err)
return
}
result, err := rootMre.Finalize()
if err != nil {
docstore.logger.Error("finalize failed", "err", err)
httputil.Error(w, err)
return
}
// Write the JSON response (encoded if requested)
httputil.MarshalAndWrite(r, w, &map[string]interface{}{
"data": result,
})
default:
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
}
}
// FetchVersions returns all verions/revisions for the given doc ID
func (docstore *DocStore) FetchVersions(collection, sid string, start int64, limit int, fetchPointers bool) ([]map[string]interface{}, map[string]interface{}, int64, error) {
var cursor int64
// TODO(tsileo): better output than a slice of `map[string]interface{}`
if collection == "" {
return nil, nil, cursor, errors.New("missing collection query arg")
}
// Fetch the KV versions entry for this _id
// XXX(tsileo): use int64 for start/end
kvv, _, err := docstore.kvStore.Versions(context.TODO(), fmt.Sprintf(keyFmt, collection, sid), strconv.FormatInt(start, 10), limit)
// FIXME(tsileo): return the cursor from Versions
if err != nil {
return nil, nil, cursor, err
}
// Parse the ID
// _id, err := id.FromHex(sid)
// if err != nil {
// return nil, nil, fmt.Errorf("invalid _id: %v", err)
// }
docs := []map[string]interface{}{}
pointers := map[string]interface{}{}
for _, kv := range kvv.Versions {
var doc map[string]interface{}
// Extract the hash (first byte is the Flag)
// XXX(tsileo): add/handle a `Deleted` flag
// kv.Value[1:len(kv.Value)]
// Build the doc
if err := msgpack.Unmarshal(kv.Data[1:], &doc); err != nil {
return nil, nil, cursor, fmt.Errorf("failed to unmarshal blob")
}
_id, err := id.FromHex(sid)
if err != nil {
panic(err)
}
_id.SetVersion(kv.Version)
addSpecialFields(doc, _id)
if fetchPointers {
docPointers, err := docstore.fetchPointers(doc)
if err != nil {
return nil, nil, cursor, err
}
for k, v := range docPointers {
pointers[k] = v
}
}
docs = append(docs, doc)
cursor = kv.Version - 1
// _id.SetFlag(byte(kv.Data[0]))
// _id.SetVersion(kv.Version)
}
return docs, pointers, cursor, nil
}
// Fetch a single document into `res` and returns the `id.ID`
func (docstore *DocStore) Fetch(collection, sid string, res interface{}, fetchPointers bool, version int64) (*id.ID, map[string]interface{}, error) {
if collection == "" {
return nil, nil, errors.New("missing collection query arg")
}
// Fetch the VKV entry for this _id
kv, err := docstore.kvStore.Get(context.TODO(), fmt.Sprintf(keyFmt, collection, sid), version)
if err != nil {
return nil, nil, err
}
// Parse the ID
_id, err := id.FromHex(sid)
if err != nil {
return nil, nil, fmt.Errorf("invalid _id: %v", err)
}
// Extract the hash (first byte is the Flag)
// XXX(tsileo): add/handle a `Deleted` flag
blob := kv.Data[1:]
var pointers map[string]interface{}
// FIXME(tsileo): handle deleted docs (also in the admin/query)
if len(blob) > 0 {
// Build the doc
switch idoc := res.(type) {
case nil:
// Do nothing
case *map[string]interface{}:
if err := msgpack.Unmarshal(blob, idoc); err != nil {
return nil, nil, fmt.Errorf("failed to unmarshal blob: %s", blob)
}
// TODO(tsileo): set the special fields _created/_updated/_hash
if fetchPointers {
pointers, err = docstore.fetchPointers(*idoc)
if err != nil {
return nil, nil, err
}
}
case *[]byte:
// Decode the doc and encode it to JSON
out := map[string]interface{}{}
if err := msgpack.Unmarshal(blob, &out); err != nil {
return nil, nil, fmt.Errorf("failed to unmarshal blob: %s", blob)
}
// TODO(tsileo): set the special fields _created/_updated/_hash
js, err := json.Marshal(out)
if err != nil {
return nil, nil, err
}
// Just the copy if JSON if a []byte is provided
*idoc = append(*idoc, js...)
}
}
_id.SetFlag(kv.Data[0])
_id.SetVersion(kv.Version)
return _id, pointers, nil
}
// HTTP handler for serving/updating a single doc
func (docstore *DocStore) docHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
collection := vars["collection"]
if collection == "" {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing collection in the URL")
return
}
sid := vars["_id"]
if sid == "" {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing _id in the URL")
return
}
var _id *id.ID
var err error
switch r.Method {
case "GET", "HEAD":
// Serve the document JSON encoded
// permissions.CheckPerms(r, PermCollectionName, collection, PermRead)
// js := []byte{}
var doc, pointers map[string]interface{}
// FIXME(tsileo): support asOf?
if _id, pointers, err = docstore.Fetch(collection, sid, &doc, true, -1); err != nil {
if err == vkv.ErrNotFound || _id.Flag() == flagDeleted {
// Document doesn't exist, returns a status 404
w.WriteHeader(http.StatusNotFound)
return
}
panic(err)
}
// FIXME(tsileo): fix-precondition, suport If-Match
if etag := r.Header.Get("If-None-Match"); etag != "" {
if etag == _id.VersionString() {
w.WriteHeader(http.StatusNotModified)
return
}
}
w.Header().Set("ETag", _id.VersionString())
addSpecialFields(doc, _id)
if r.Method == "GET" {
httputil.MarshalAndWrite(r, w, map[string]interface{}{
"data": doc,
"pointers": pointers,
})
}
return
case "PATCH":
// Patch the document (JSON-Patch/RFC6902)
// Lock the document before making any change to it, this way the PATCH operation is *truly* atomic/safe
docstore.locker.Lock(sid)
defer docstore.locker.Unlock(sid)
ctx := context.Background()
// Fetch the current doc
js := []byte{}
if _id, _, err = docstore.Fetch(collection, sid, &js, false, -1); err != nil {
if err == vkv.ErrNotFound {
// Document doesn't exist, returns a status 404
w.WriteHeader(http.StatusNotFound)
return
}
panic(err)
}
// FIXME(tsileo): make it required?
if etag := r.Header.Get("If-Match"); etag != "" {
if etag != _id.VersionString() {
w.WriteHeader(http.StatusPreconditionFailed)
return
}
}
buf, err := ioutil.ReadAll(r.Body)
if err != nil {
panic(err)
}
patch, err := jsonpatch.DecodePatch(buf)
if err != nil {
panic(err)
}
docstore.logger.Debug("patch decoded", "patch", patch)
pdata, err := patch.Apply(js)
if err != nil {
panic(err)
}
// Back to msgpack
ndoc := map[string]interface{}{}
if err := json.Unmarshal(pdata, &ndoc); err != nil {
panic(err)
}
data, err := msgpack.Marshal(ndoc)
if err != nil {
panic(err)
}
// TODO(tsileo): also check for reserved keys here
nkv, err := docstore.kvStore.Put(ctx, fmt.Sprintf(keyFmt, collection, _id.String()), "", append([]byte{_id.Flag()}, data...), -1)
if err != nil {
panic(err)
}
_id.SetVersion(nkv.Version)
// FIXME(tsileo): move this to the hub via the kvstore
if indexes, ok := docstore.indexes[collection]; ok {
for _, index := range indexes {
if err := index.Index(_id, ndoc); err != nil {
panic(err)
}
}
}
w.Header().Set("ETag", _id.VersionString())
created := time.Unix(0, _id.Ts()).UTC().Format(time.RFC3339)
httputil.MarshalAndWrite(r, w, map[string]interface{}{
"_id": _id.String(),
"_created": created,
"_version": _id.VersionString(),
})
return
case "POST":
// Update the whole document
// Parse the update query
var newDoc map[string]interface{}
if err := json.NewDecoder(r.Body).Decode(&newDoc); err != nil {
panic(err)
}
// Perform the update
_id, err := docstore.Update(collection, sid, newDoc, r.Header.Get("If-Match"))
switch err {
case nil:
case ErrDocNotFound:
w.WriteHeader(http.StatusNotFound)
case ErrPreconditionFailed:
w.WriteHeader(http.StatusPreconditionFailed)
return
default:
panic(err)
}
w.Header().Set("ETag", _id.VersionString())
created := time.Unix(0, _id.Ts()).UTC().Format(time.RFC3339)
httputil.MarshalAndWrite(r, w, map[string]interface{}{
"_id": _id.String(),
"_created": created,
"_version": _id.VersionString(),
})
return
case "DELETE":
_, err := docstore.Remove(collection, sid)
switch err {
case nil:
case ErrDocNotFound:
w.WriteHeader(http.StatusNotFound)
default:
panic(err)
}
}
}
}
// HTTP handler for serving/updating a single doc
func (docstore *DocStore) docVersionsHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
collection := vars["collection"]
if collection == "" {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing collection in the URL")
return
}
sid := vars["_id"]
if sid == "" {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing _id in the URL")
return
}
var _id *id.ID
switch r.Method {
case "GET", "HEAD":
q := httputil.NewQuery(r.URL.Query())
limit, err := q.GetIntDefault("limit", 50)
if err != nil {
httputil.Error(w, err)
return
}
cursor, err := q.GetInt64Default("cursor", time.Now().UTC().UnixNano())
if err != nil {
httputil.Error(w, err)
return
}
fetchPointers, err := q.GetBoolDefault("fetch_pointers", true)
if err != nil {
httputil.Error(w, err)
return
}
// Serve the document JSON encoded
// permissions.CheckPerms(r, PermCollectionName, collection, PermRead)
// js := []byte{}
docs, pointers, cursor, err := docstore.FetchVersions(collection, sid, cursor, limit, fetchPointers)
if err != nil {
if err == vkv.ErrNotFound || _id.Flag() == flagDeleted {
// Document doesn't exist, returns a status 404
w.WriteHeader(http.StatusNotFound)
return
}
panic(err)
}
if r.Method == "GET" {
httputil.MarshalAndWrite(r, w, map[string]interface{}{
"pointers": pointers,
"data": docs,
"pagination": map[string]interface{}{
"cursor": cursor,
"has_more": len(docs) == limit,
"count": len(docs),
"per_page": limit,
},
})
}
default:
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
}
}
docstore: cleanup docs
/*
Package docstore implements a JSON-based document store
built on top of the Versioned Key-Value store and the Blob store.
Each document will get assigned a MongoDB like ObjectId:
<binary encoded uint32 (4 bytes) + 8 random bytes hex encoded >
The resulting id will have a length of 24 characters encoded as hex (12 raw bytes).
The JSON document will be stored directly inside the vkv entry.
docstore:<collection>:<id> => <flag (1 byte) + JSON blob>
Document will be automatically sorted by creation time thanks to the ID.
The raw JSON will be stored as is, but the API will add the _id and other special fields on the fly.
*/
package docstore // import "a4.io/blobstash/pkg/docstore"
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/evanphx/json-patch"
"github.com/gorilla/mux"
log "github.com/inconshreveable/log15"
logext "github.com/inconshreveable/log15/ext"
"github.com/vmihailenco/msgpack"
"github.com/yuin/gopher-lua"
"a4.io/blobstash/pkg/asof"
"a4.io/blobstash/pkg/auth"
"a4.io/blobstash/pkg/config"
"a4.io/blobstash/pkg/docstore/id"
"a4.io/blobstash/pkg/filetree"
"a4.io/blobstash/pkg/httputil"
"a4.io/blobstash/pkg/httputil/bewit"
"a4.io/blobstash/pkg/perms"
"a4.io/blobstash/pkg/stash/store"
"a4.io/blobstash/pkg/vkv"
)
// FIXME(tsileo): create a "meta" hook for handling indexing
// will need to solve few issues before:
// - do we need to check if the doc is already indexed?
var (
prefixKey = "docstore:"
prefixKeyFmt = prefixKey + "%s"
keyFmt = prefixKeyFmt + ":%s"
PrefixIndexKeyFmt = "docstore-index:%s"
IndexKeyFmt = PrefixIndexKeyFmt + ":%s"
)
// ErrUnprocessableEntity is returned when a document is faulty
var ErrUnprocessableEntity = errors.New("unprocessable entity")
var ErrDocNotFound = errors.New("document not found")
var ErrPreconditionFailed = errors.New("precondition failed")
var reservedKeys = map[string]struct{}{
"_id": struct{}{},
"_updated": struct{}{},
"_created": struct{}{},
"_version": struct{}{},
"_hooks": struct{}{},
}
func idFromKey(col, key string) (*id.ID, error) {
hexID := strings.Replace(key, fmt.Sprintf("docstore:%s:", col), "", 1)
_id, err := id.FromHex(hexID)
if err != nil {
return nil, err
}
return _id, err
}
const (
flagNoop byte = iota // Default flag
flagDeleted
)
const (
pointerBlobJSON = "@blobs/json:" // FIXME(tsileo): document the Pointer feature
// PointerBlobRef = "@blobs/ref:" // FIXME(tsileo): implements this like a @filetree/ref
pointerFiletreeRef = "@filetree/ref:"
//PointerURLInfo = "@url/info:" // XXX(tsileo): fetch OG meta data or at least title, optionally screenshot???
// TODO(tsileo): implements PointerKvRef
// PointerKvRef = "@kv/ref:"
// XXX(tsileo): allow custom Lua-defined pointer, this could be useful for implement cross-note linking in Blobs
// Sharing TTL for the bewit link of Filetree references
shareDuration = 30 * time.Minute
)
type executionStats struct {
NReturned int `json:"nReturned"`
TotalDocsExamined int `json:"totalDocsExamined"`
ExecutionTimeNano int64 `json:"executionTimeNano"`
LastID string `json:"-"`
Engine string `json:"query_engine"`
Index string `json:"index"`
Cursor string `json:"cursor"`
}
// DocStore holds the docstore manager
type DocStore struct {
kvStore store.KvStore
blobStore store.BlobStore
filetree *filetree.FileTree
conf *config.Config
// docIndex *index.HashIndexes
hooks *LuaHooks
storedQueries map[string]*storedQuery
locker *locker
indexes map[string]map[string]Indexer
exts map[string]map[string]map[string]interface{}
schemas map[string][]*LuaSchemaField
logger log.Logger
}
type storedQuery struct {
Name string
Main string
}
// New initializes the `DocStoreExt`
func New(logger log.Logger, conf *config.Config, kvStore store.KvStore, blobStore store.BlobStore, ft *filetree.FileTree) (*DocStore, error) {
logger.Debug("init")
sortIndexes := map[string]map[string]Indexer{}
var err error
// Load the docstore's stored queries from the config
storedQueries := map[string]*storedQuery{}
if conf.Docstore != nil && conf.Docstore.StoredQueries != nil {
for _, squery := range conf.Docstore.StoredQueries {
// First ensure the required match.lua is present
if _, err := os.Stat(filepath.Join(squery.Path, "main.lua")); os.IsNotExist(err) {
return nil, fmt.Errorf("missing `main.lua` for stored query %s", squery.Name)
}
storedQuery := &storedQuery{
Name: squery.Name,
Main: filepath.Join(squery.Path, "main.lua"),
}
storedQueries[squery.Name] = storedQuery
}
logger.Debug("sorted queries setup", "stored_queries", fmt.Sprintf("%+v", storedQueries))
}
// Load the sort indexes definitions if any
if conf.Docstore != nil && conf.Docstore.SortIndexes != nil {
for collection, indexes := range conf.Docstore.SortIndexes {
sortIndexes[collection] = map[string]Indexer{}
for sortIndexName, sortIndex := range indexes {
sortIndexes[collection][sortIndexName], err = newSortIndex(sortIndexName, sortIndex.Fields[0])
if err != nil {
return nil, fmt.Errorf("failed to init index: %v", err)
}
}
}
logger.Debug("indexes setup", "indexes", fmt.Sprintf("%+v", sortIndexes))
}
hooks, err := newLuaHooks(conf, ft, blobStore)
if err != nil {
return nil, err
}
return &DocStore{
kvStore: kvStore,
blobStore: blobStore,
filetree: ft,
storedQueries: storedQueries,
hooks: hooks,
conf: conf,
locker: newLocker(),
logger: logger,
indexes: sortIndexes,
schemas: map[string][]*LuaSchemaField{},
exts: map[string]map[string]map[string]interface{}{},
}, nil
}
// Close closes all the open DB files.
func (docstore *DocStore) Close() error {
for _, indexes := range docstore.indexes {
for _, index := range indexes {
if err := index.Close(); err != nil {
return err
}
}
}
return nil
}
func (dc *DocStore) LuaSetupSortIndex(col, name, field string) error {
if _, ok := dc.indexes[col]; !ok {
dc.indexes[col] = map[string]Indexer{}
}
if _, ok := dc.indexes[col][name]; ok {
return nil
}
var err error
dc.indexes[col][name], err = newSortIndex(name, field)
if err != nil {
return fmt.Errorf("failed to init index: %v", err)
}
return nil
}
type LuaSchemaField struct {
Name string
Type string
Data map[string]interface{}
}
func (dc *DocStore) LuaRegisterSchema(name string, fields []interface{}) error {
schema := []*LuaSchemaField{}
for _, field := range fields {
fdata := field.(map[string]interface{})
var data map[string]interface{}
if dat, ok := fdata["data"]; ok {
data = dat.(map[string]interface{})
}
f := &LuaSchemaField{
Name: fdata["field_name"].(string),
Type: fdata["field_type"].(string),
Data: data,
}
schema = append(schema, f)
}
dc.schemas[name] = schema
dc.logger.Debug("setup new schema", "name", name, "schema", fmt.Sprintf("%q", schema))
return nil
}
func (dc *DocStore) LuaGetExt(col, ext string) (map[string]interface{}, error) {
if colExts, ok := dc.exts[col]; ok {
if dat, ok := colExts[ext]; ok {
return dat, nil
}
}
return nil, fmt.Errorf("no ext %s %s", col, ext)
}
func (dc *DocStore) LuaGetSchema(name string) ([]*LuaSchemaField, error) {
if schema, ok := dc.schemas[name]; ok {
return schema, nil
}
return nil, fmt.Errorf("schema %q not found", name)
}
func (dc *DocStore) SetupExt(col, ext string, data map[string]interface{}) {
if _, ok := dc.exts[col]; !ok {
dc.exts[col] = map[string]map[string]interface{}{}
}
dc.exts[col][ext] = data
dc.logger.Debug("setup new ext", "col", col, "ext", ext, "data", fmt.Sprintf("%+v", data))
}
// Register registers all the HTTP handlers for the extension
func (docstore *DocStore) Register(r *mux.Router, basicAuth func(http.Handler) http.Handler) {
r.Handle("/", basicAuth(http.HandlerFunc(docstore.collectionsHandler())))
r.Handle("/_stored_queries", basicAuth(http.HandlerFunc(docstore.storedQueriesHandler())))
r.Handle("/{collection}", basicAuth(http.HandlerFunc(docstore.docsHandler())))
r.Handle("/{collection}/_rebuild_indexes", basicAuth(http.HandlerFunc(docstore.reindexDocsHandler())))
r.Handle("/{collection}/_map_reduce", basicAuth(http.HandlerFunc(docstore.mapReduceHandler())))
// r.Handle("/{collection}/_indexes", middlewares.Auth(http.HandlerFunc(docstore.indexesHandler())))
r.Handle("/{collection}/{_id}", basicAuth(http.HandlerFunc(docstore.docHandler())))
r.Handle("/{collection}/{_id}/_versions", basicAuth(http.HandlerFunc(docstore.docVersionsHandler())))
}
// Expand a doc keys (fetch the blob as JSON, or a filesystem reference)
// e.g: {"ref": "@blobstash/json:<hash>"}
// => {"ref": {"blob": "json decoded"}}
// XXX(tsileo): expanded ref must also works for marking a blob during GC
// FIXME(tsileo): rename this to "pointers" and return {"data":{[...]}, "pointers": {}}
func (docstore *DocStore) fetchPointers(doc map[string]interface{}) (map[string]interface{}, error) {
pointers := map[string]interface{}{}
// docstore.logger.Info("expandKeys")
for _, v := range doc {
switch vv := v.(type) {
case map[string]interface{}:
docPointers, err := docstore.fetchPointers(vv)
if err != nil {
return nil, err
}
for k, v := range docPointers {
pointers[k] = v
}
continue
case string:
switch {
case strings.HasPrefix(vv, pointerBlobJSON):
if _, ok := pointers[vv]; ok {
// The reference has already been fetched
continue
}
// XXX(tsileo): here and at other place, add a util func in hashutil to detect invalid string length at least
blob, err := docstore.blobStore.Get(context.TODO(), vv[len(pointerBlobJSON):])
if err != nil {
return nil, fmt.Errorf("failed to fetch JSON ref: \"%v => %v\": %v", pointerBlobJSON, v, err)
}
p := map[string]interface{}{}
if err := json.Unmarshal(blob, &p); err != nil {
return nil, fmt.Errorf("failed to unmarshal blob \"%v => %v\": %v", pointerBlobJSON, v, err)
}
pointers[vv] = p
case strings.HasPrefix(vv, pointerFiletreeRef):
if _, ok := pointers[vv]; ok {
// The reference has already been fetched
continue
}
// XXX(tsileo): here and at other place, add a util func in hashutil to detect invalid string length at least
hash := vv[len(pointerFiletreeRef):]
// TODO(tsileo): call filetree to get a node
// blob, err := docstore.blobStore.Get(context.TODO(), hash)
// if err != nil {
// return nil, fmt.Errorf("failed to fetch JSON ref: \"%v => %v\": %v", pointerFiletreeRef, v, err)
// }
// // Reconstruct the Meta
// var p map[string]interface{}
// if err := json.Unmarshal(blob, &p); err != nil {
// return nil, fmt.Errorf("failed to unmarshal meta \"%v => %v\": %v", pointerBlobJSON, v, err)
// }
node, err := docstore.filetree.Node(context.TODO(), hash)
if err != nil {
return nil, err
}
// Create a temporary authorization for the file (with a bewit)
u := &url.URL{Path: fmt.Sprintf("/%s/%s", node.Type[0:1], hash)}
if err := bewit.Bewit(docstore.filetree.SharingCred(), u, shareDuration); err != nil {
return nil, fmt.Errorf("failed to generate bewit: %v", err)
}
node.URL = u.String()
pointers[vv] = node
}
}
}
return pointers, nil
}
// nextKey returns the next key for lexigraphical ordering (key = nextKey(lastkey))
func nextKey(key string) string {
bkey := []byte(key)
i := len(bkey)
for i > 0 {
i--
bkey[i]++
if bkey[i] != 0 {
break
}
}
return string(bkey)
}
// Collections returns all the existing collections
func (docstore *DocStore) Collections() ([]string, error) {
collections := []string{}
index := map[string]struct{}{}
var lastKey string
ksearch := fmt.Sprintf("docstore:%v", lastKey)
for {
res, cursor, err := docstore.kvStore.Keys(context.TODO(), ksearch, "docstore:\xff", 0)
ksearch = cursor
// docstore.logger.Debug("loop", "ksearch", ksearch, "len_res", len(res))
if err != nil {
return nil, err
}
if len(res) == 0 {
break
}
var col string
for _, kv := range res {
// Key = <docstore:{collection}:{_id}>
col = strings.Split(kv.Key, ":")[1]
index[col] = struct{}{}
}
}
for col, _ := range index {
collections = append(collections, col)
}
return collections, nil
}
// HTTP handler to manage indexes for a collection
// func (docstore *DocStoreExt) indexesHandler() func(http.ResponseWriter, *http.Request) {
// return func(w http.ResponseWriter, r *http.Request) {
// vars := mux.Vars(r)
// collection := vars["collection"]
// if collection == "" {
// httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing collection in the URL")
// return
// }
// // Ensure the client has the needed permissions
// permissions.CheckPerms(r, PermCollectionName, collection)
// switch r.Method {
// case "GET":
// // GET request, just list all the indexes
// srw := httputil.NewSnappyResponseWriter(w, r)
// indexes, err := docstore.Indexes(collection)
// if err != nil {
// panic(err)
// }
// httputil.WriteJSON(srw, indexes)
// srw.Close()
// case "POST":
// // POST request, create a new index from the body
// q := &index.Index{}
// if err := json.NewDecoder(r.Body).Decode(&q); err != nil {
// panic(err)
// }
// // Actually save the index
// if err := docstore.AddIndex(collection, q); err != nil {
// panic(err)
// }
// w.WriteHeader(http.StatusCreated)
// default:
// w.WriteHeader(http.StatusMethodNotAllowed)
// }
// }
// }
// HTTP handler for checking the loaded saved queries
func (docstore *DocStore) storedQueriesHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
httputil.WriteJSON(w, docstore.storedQueries)
return
default:
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
}
}
// HTTP handler for getting the collections list
func (docstore *DocStore) collectionsHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
// Ensure the client has the needed permissions
// permissions.CheckPerms(r, PermCollectionName)
collections, err := docstore.Collections()
if err != nil {
panic(err)
}
httputil.MarshalAndWrite(r, w, map[string]interface{}{
"collections": collections,
})
return
default:
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
}
}
// isQueryAll returns `true` if there's no query.
func isQueryAll(q string) bool {
if q == "" {
return true
}
return false
}
// Indexes return the list of `Index` for the given collection
// func (docstore *DocStoreExt) Indexes(collection string) ([]*index.Index, error) {
// res, err := docstore.kvStore.ReversePrefixKeys(fmt.Sprintf(PrefixIndexKeyFmt, collection), "", "\xff", 50)
// indexes := []*index.Index{}
// if err != nil {
// panic(err)
// }
// for _, kv := range res {
// // FIXME(tsileo): this check shouldn't be here, it should be handled by ReversePrefixKeys!
// if !strings.HasPrefix(kv.Key, fmt.Sprintf(IndexKeyFmt, collection, "")) {
// break
// }
// index := &index.Index{ID: strings.Replace(kv.Key, fmt.Sprintf(IndexKeyFmt, collection, ""), "", 1)}
// if err := json.Unmarshal([]byte(kv.Value), index); err != nil {
// docstore.logger.Error("failed to unmarshal log entry", "err", err, "js", kv.Value)
// // return nil, err
// continue
// }
// indexes = append(indexes, index)
// }
// return indexes, nil
// }
// func (docstore *DocStoreExt) AddIndex(collection string, idx *index.Index) error {
// if len(idx.Fields) > 1 {
// return httputil.NewPublicErrorFmt("Only single field index are support for now")
// }
// var err error
// js, err := json.Marshal(idx)
// if err != nil {
// return err
// }
// // FIXME(tsileo): ensure we can't create duplicate index
// switch len(idx.Fields) {
// case 1:
// hashKey := fmt.Sprintf("single-field-%s", idx.Fields[0])
// _, err = docstore.kvStore.PutPrefix(fmt.Sprintf(PrefixIndexKeyFmt, collection), hashKey, string(js), -1, "")
// default:
// err = httputil.NewPublicErrorFmt("Bad index")
// }
// return err
// }
// IndexDoc indexes the given doc if needed, should never be called by the client,
// this method is exported to support re-indexing at the blob level and rebuild the index from it.
// func (docstore *DocStoreExt) IndexDoc(collection string, _id *id.ID, doc *map[string]interface{}) error {
// // Check if the document should be indexed by the full-text indexer (Bleve)
// if _id.Flag() == FlagFullTextIndexed {
// if err := docstore.index.Index(_id.String(), doc); err != nil {
// return err
// }
// }
// // Check if the document need to be indexed
// indexes, err := docstore.Indexes(collection)
// if err != nil {
// return fmt.Errorf("Failed to fetch index")
// }/)
// optz := optimizer.New(docstore.logger.New("module", "query optimizer"), indexes)
// shouldIndex, idx, idxKey := optz.ShouldIndex(*doc)
// if shouldIndex {
// docstore.logger.Debug("indexing document", "idx-key", idxKey, "_id", _id.String())
// // FIXME(tsileo): returns a special status code on `index.DuplicateIndexError`
// if err := docstore.docIndex.Index(collection, idx, idxKey, _id.String()); err != nil {
// return err
// }
// }
// return nil
// }
// Insert the given doc (`*map[string]interface{}` for now) in the given collection
func (docstore *DocStore) Insert(collection string, doc map[string]interface{}) (*id.ID, error) {
// If there's already an "_id" field in the doc, remove it
if _, ok := doc["_id"]; ok {
delete(doc, "_id")
}
// Check for reserved keys
for k, _ := range doc {
if _, ok := reservedKeys[k]; ok {
// XXX(tsileo): delete them or raises an exception?
delete(doc, k)
}
}
// TODO(tsileo): track the hook execution time and log it
ok, newDoc, err := docstore.hooks.Execute(collection, "post", doc)
if err != nil {
return nil, err
}
if ok && newDoc == nil {
return nil, ErrUnprocessableEntity
}
if ok {
doc = newDoc
}
data, err := msgpack.Marshal(doc)
if err != nil {
return nil, err
}
// Build the ID and add some meta data
now := time.Now().UTC()
_id, err := id.New(now.UnixNano())
if err != nil {
return nil, err
}
_id.SetFlag(flagNoop)
// Create a pointer in the key-value store
kv, err := docstore.kvStore.Put(
context.TODO(), fmt.Sprintf(keyFmt, collection, _id.String()), "", append([]byte{_id.Flag()}, data...), now.UnixNano(),
)
if err != nil {
return nil, err
}
_id.SetVersion(kv.Version)
// Index the doc if needed
// FIXME(tsileo): move this to the hub via the kvstore
if indexes, ok := docstore.indexes[collection]; ok {
for _, index := range indexes {
if err := index.Index(_id, doc); err != nil {
panic(err)
}
}
}
return _id, nil
}
type query struct {
storedQuery string
storedQueryArgs interface{}
basicQuery string
script string
lfunc *lua.LFunction
sortIndex string
}
func queryToScript(q *query) string {
if q.basicQuery != "" {
return `return function(doc)
if ` + q.basicQuery + ` then return true else return false end
end
`
}
if q.script != "" {
return q.script
}
// Must be a stored query, return an empty string
return ""
}
func (q *query) isMatchAll() bool {
if q.lfunc == nil && q.script == "" && q.basicQuery == "" && q.storedQuery == "" && q.storedQueryArgs == nil {
return true
}
return false
}
func addSpecialFields(doc map[string]interface{}, _id *id.ID) {
doc["_id"] = _id
doc["_version"] = _id.VersionString()
doc["_created"] = time.Unix(0, _id.Ts()).UTC().Format(time.RFC3339)
updated := _id.Version()
if updated != doc["_created"] {
doc["_updated"] = time.Unix(0, int64(updated)).UTC().Format(time.RFC3339)
}
}
func (docstore *DocStore) Update(collection, sid string, newDoc map[string]interface{}, ifMatch string) (*id.ID, error) {
docstore.locker.Lock(sid)
defer docstore.locker.Unlock(sid)
ctx := context.Background()
// Fetch the actual doc
doc := map[string]interface{}{}
_id, _, err := docstore.Fetch(collection, sid, &doc, false, -1)
if err != nil {
if err == vkv.ErrNotFound || _id.Flag() == flagDeleted {
return nil, ErrDocNotFound
}
return nil, err
}
// Pre-condition (done via If-Match header/status precondition failed)
if ifMatch != "" && ifMatch != _id.VersionString() {
return nil, ErrPreconditionFailed
}
// Field/key starting with `_` are forbidden, remove them
for k := range newDoc {
if _, ok := reservedKeys[k]; ok {
delete(newDoc, k)
}
}
data, err := msgpack.Marshal(newDoc)
if err != nil {
panic(err)
}
docstore.logger.Debug("Update", "_id", sid, "new_doc", newDoc)
kv, err := docstore.kvStore.Put(ctx, fmt.Sprintf(keyFmt, collection, _id.String()), "", append([]byte{_id.Flag()}, data...), -1)
if err != nil {
panic(err)
}
_id.SetVersion(kv.Version)
// FIXME(tsileo): move this to the hub via the kvstore
if indexes, ok := docstore.indexes[collection]; ok {
for _, index := range indexes {
if err := index.Index(_id, newDoc); err != nil {
panic(err)
}
}
}
return nil, err
}
func (docstore *DocStore) Remove(collection, sid string) (*id.ID, error) {
docstore.locker.Lock(sid)
defer docstore.locker.Unlock(sid)
_id, _, err := docstore.Fetch(collection, sid, nil, false, -1)
if err != nil {
if err == vkv.ErrNotFound || _id.Flag() == flagDeleted {
return nil, ErrDocNotFound
}
return nil, err
}
kv, err := docstore.kvStore.Put(context.TODO(), fmt.Sprintf(keyFmt, collection, sid), "", []byte{flagDeleted}, -1)
if err != nil {
return nil, err
}
_id.SetVersion(kv.Version)
_id.SetFlag(flagDeleted)
// FIXME(tsileo): move this to the hub via the kvstore
if indexes, ok := docstore.indexes[collection]; ok {
for _, index := range indexes {
if err := index.Index(_id, nil); err != nil {
return nil, err
}
}
}
return _id, nil
}
// LuaQuery performs a Lua query
func (docstore *DocStore) LuaQuery(L *lua.LState, lfunc *lua.LFunction, collection string, cursor string, sortIndex string, limit int) ([]map[string]interface{}, map[string]interface{}, string, *executionStats, error) {
query := &query{
lfunc: lfunc,
sortIndex: sortIndex,
}
docs, pointers, stats, err := docstore.query(L, collection, query, cursor, limit, true, 0)
if err != nil {
return nil, nil, "", nil, err
}
return docs, pointers, vkv.PrevKey(stats.LastID), stats, nil
}
// Query performs a query
func (docstore *DocStore) Query(collection string, query *query, cursor string, limit int, asOf int64) ([]map[string]interface{}, map[string]interface{}, *executionStats, error) {
docs, pointers, stats, err := docstore.query(nil, collection, query, cursor, limit, true, asOf)
if err != nil {
return nil, nil, nil, err
}
// TODO(tsileo): fix this
return docs, pointers, stats, nil
}
// query returns a JSON list as []byte for the given query
// docs are unmarhsalled to JSON only when needed.
func (docstore *DocStore) query(L *lua.LState, collection string, query *query, cursor string, limit int, fetchPointers bool, asOf int64) ([]map[string]interface{}, map[string]interface{}, *executionStats, error) {
// Init some stuff
tstart := time.Now()
stats := &executionStats{}
var err error
var docPointers map[string]interface{}
pointers := map[string]interface{}{}
docs := []map[string]interface{}{}
// Tweak the internal query batch limit
fetchLimit := int(float64(limit) * 1.3)
// Select the ID iterator (XXX sort indexes are a WIP)
var it IDIterator
if query.sortIndex == "" || query.sortIndex == "-_id" {
// Use the default ID iterator (iter IDs in reverse order
it = newNoIndexIterator(docstore.kvStore)
} else {
if indexes, ok := docstore.indexes[collection]; ok {
if idx, ok := indexes[query.sortIndex]; ok {
it = idx
}
}
if it == nil {
return nil, nil, stats, fmt.Errorf("failed to select sort_index %q", query.sortIndex)
}
}
stats.Index = it.Name()
// Select the query matcher
var qmatcher QueryMatcher
switch {
case query.isMatchAll():
stats.Engine = "match_all"
qmatcher = &MatchAllEngine{}
default:
qmatcher, err = docstore.newLuaQueryEngine(L, query)
if err != nil {
return nil, nil, stats, err
}
stats.Engine = "lua"
}
defer qmatcher.Close()
start := cursor
// Init the logger
qLogger := docstore.logger.New("query", query, "query_engine", stats.Engine, "id", logext.RandId(8))
qLogger.Info("new query")
QUERY:
for {
// Loop until we have the number of requested documents, or if we scanned everything
qLogger.Debug("internal query", "limit", limit, "start", start, "cursor", cursor, "nreturned", stats.NReturned)
// FIXME(tsileo): use `PrefixKeys` if ?sort=_id (-_id by default).
// Fetch a batch from the iterator
_ids, cursor, err := it.Iter(collection, start, fetchLimit, asOf)
if err != nil {
panic(err)
}
for _, _id := range _ids {
if _id.Flag() == flagDeleted {
qLogger.Debug("skipping deleted doc", "_id", _id, "as_of", asOf)
continue
}
qLogger.Debug("fetch doc", "_id", _id, "as_of", asOf)
stats.Cursor = _id.Cursor()
doc := map[string]interface{}{}
var err error
// Fetch the version tied to the ID (the iterator is taking care of selecting an ID version)
if _id, docPointers, err = docstore.Fetch(collection, _id.String(), &doc, fetchPointers, _id.Version()); err != nil {
panic(err)
}
stats.TotalDocsExamined++
// Check if the doc match the query
ok, err := qmatcher.Match(doc)
if err != nil {
return nil, nil, stats, err
}
if ok {
// The document matches the query
addSpecialFields(doc, _id)
if fetchPointers {
for k, v := range docPointers {
pointers[k] = v
}
}
docs = append(docs, doc)
stats.NReturned++
stats.LastID = _id.String()
if stats.NReturned == limit {
break QUERY
}
}
}
if len(_ids) == 0 { // || len(_ids) < fetchLimit {
break
}
start = cursor
}
duration := time.Since(tstart)
qLogger.Debug("scan done", "duration", duration, "nReturned", stats.NReturned, "scanned", stats.TotalDocsExamined, "cursor", stats.Cursor)
stats.ExecutionTimeNano = duration.Nanoseconds()
return docs, pointers, stats, nil
}
func (docstore *DocStore) RebuildIndexes(collection string) error {
// FIXME(tsileo): locking
if indexes, ok := docstore.indexes[collection]; ok {
for _, index := range indexes {
// FIXME(tsileo): make an preprareRebuild interface optional
if err := index.(*sortIndex).prepareRebuild(); err != nil {
panic(err)
}
}
}
end := fmt.Sprintf(keyFmt, collection, "")
start := fmt.Sprintf(keyFmt, collection, "\xff")
// List keys from the kvstore
res, _, err := docstore.kvStore.ReverseKeys(context.TODO(), end, start, -1)
if err != nil {
return err
}
for _, kv := range res {
// Build the ID
_id, err := idFromKey(collection, kv.Key)
if err != nil {
return err
}
// Add the extra metadata to the ID
_id.SetFlag(kv.Data[0])
_id.SetVersion(kv.Version)
// Check if the document has a valid version for the given asOf
kvv, _, err := docstore.kvStore.Versions(context.TODO(), fmt.Sprintf(keyFmt, collection, _id.String()), "0", -1)
if err != nil {
if err == vkv.ErrNotFound {
continue
}
return err
}
// No anterior versions, skip it
if len(kvv.Versions) == 0 {
continue
}
// Reverse the versions
for i := len(kvv.Versions)/2 - 1; i >= 0; i-- {
opp := len(kvv.Versions) - 1 - i
kvv.Versions[i], kvv.Versions[opp] = kvv.Versions[opp], kvv.Versions[i]
}
// Re-index each versions in chronological order
for _, version := range kvv.Versions {
_id.SetFlag(version.Data[0])
_id.SetVersion(version.Version)
var doc map[string]interface{}
if _id.Flag() != flagDeleted {
doc = map[string]interface{}{}
if err := msgpack.Unmarshal(version.Data[1:], &doc); err != nil {
return err
}
}
if indexes, ok := docstore.indexes[collection]; ok {
for _, index := range indexes {
if err := index.Index(_id, doc); err != nil {
return err
}
}
}
// fmt.Printf("_id=%+v|%d|%+v\n", _id, version.Version, doc)
// FIXME(tsileo): re-index the doc if needed
}
}
return nil
}
// HTTP handler for the collection (handle listing+query+insert)
func (docstore *DocStore) reindexDocsHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
collection := vars["collection"]
if collection == "" {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing collection in the URL")
return
}
switch r.Method {
case "POST":
if !auth.Can(
w,
r,
// TODO(tsileo): tweak the perms
perms.Action(perms.List, perms.JSONDocument),
perms.Resource(perms.DocStore, perms.JSONDocument),
) {
auth.Forbidden(w)
return
}
if err := docstore.RebuildIndexes(collection); err != nil {
panic(err)
}
w.WriteHeader(http.StatusCreated)
default:
w.WriteHeader(http.StatusMethodNotAllowed)
}
}
}
// HTTP handler for the collection (handle listing+query+insert)
func (docstore *DocStore) docsHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
q := httputil.NewQuery(r.URL.Query())
vars := mux.Vars(r)
collection := vars["collection"]
if collection == "" {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing collection in the URL")
return
}
switch r.Method {
case "GET", "HEAD":
if !auth.Can(
w,
r,
perms.Action(perms.List, perms.JSONDocument),
perms.Resource(perms.DocStore, perms.JSONDocument),
) {
auth.Forbidden(w)
return
}
var asOf int64
var err error
// Parse the cursor
cursor := q.Get("cursor")
if v := q.Get("as_of"); v != "" {
asOf, err = asof.ParseAsOf(v)
}
if asOf == 0 {
asOf, err = q.GetInt64Default("as_of_nano", 0)
if err != nil {
panic(err)
}
}
// Parse the query (JSON-encoded)
var queryArgs interface{}
jsQuery := q.Get("stored_query_args")
if jsQuery != "" {
if err := json.Unmarshal([]byte(jsQuery), &queryArgs); err != nil {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Failed to decode JSON query")
return
}
}
limit, err := q.GetInt("limit", 50, 1000)
if err != nil {
httputil.Error(w, err)
return
}
docs, pointers, stats, err := docstore.query(nil, collection, &query{
storedQueryArgs: queryArgs,
storedQuery: q.Get("stored_query"),
script: q.Get("script"),
basicQuery: q.Get("query"),
sortIndex: q.Get("sort_index"),
}, cursor, limit, true, asOf)
if err != nil {
docstore.logger.Error("query failed", "err", err)
httputil.Error(w, err)
return
}
// Set some meta headers to help the client build subsequent query
// (iterator/cursor handling)
var hasMore bool
// Guess if they're are still results on client-side,
// by checking if NReturned < limit, we can deduce there's no more results.
// The cursor should be the start of the next query
if stats.NReturned == limit {
hasMore = true
}
w.Header().Set("BlobStash-DocStore-Iter-Has-More", strconv.FormatBool(hasMore))
w.Header().Set("BlobStash-DocStore-Iter-Cursor", stats.Cursor)
// w.Header().Set("BlobStash-DocStore-Query-Optimizer", stats.Optimizer)
// if stats.Optimizer != optimizer.Linear {
// w.Header().Set("BlobStash-DocStore-Query-Index", stats.Index)
// }
// Set headers for the query stats
w.Header().Set("BlobStash-DocStore-Query-Index", stats.Index)
w.Header().Set("BlobStash-DocStore-Query-Engine", stats.Engine)
w.Header().Set("BlobStash-DocStore-Query-Returned", strconv.Itoa(stats.NReturned))
w.Header().Set("BlobStash-DocStore-Query-Examined", strconv.Itoa(stats.TotalDocsExamined))
w.Header().Set("BlobStash-DocStore-Query-Exec-Time-Nano", strconv.FormatInt(stats.ExecutionTimeNano, 10))
w.Header().Set("BlobStash-DocStore-Results-Count", strconv.Itoa(stats.NReturned))
// This way, HEAD request can acts as a count query
if r.Method == "HEAD" {
return
}
// Write the JSON response (encoded if requested)
httputil.MarshalAndWrite(r, w, &map[string]interface{}{
"pointers": pointers,
"data": docs,
"pagination": map[string]interface{}{
"cursor": stats.Cursor,
"has_more": hasMore,
"count": stats.NReturned,
"per_page": limit,
},
})
case "POST":
// permissions.CheckPerms(r, PermCollectionName, collection, PermWrite)
// Read the whole body
blob, err := ioutil.ReadAll(r.Body)
if err != nil {
panic(err)
}
// Ensure it's JSON encoded
doc := map[string]interface{}{}
if err := json.Unmarshal(blob, &doc); err != nil {
docstore.logger.Error("Failed to parse JSON input", "collection", collection, "err", err)
panic(httputil.NewPublicErrorFmt("Invalid JSON document"))
}
// Check for reserved keys
for k, _ := range doc {
if _, ok := reservedKeys[k]; ok {
// XXX(tsileo): delete them or raises an exception?
delete(doc, k)
}
}
// Actually insert the doc
_id, err := docstore.Insert(collection, doc)
if err == ErrUnprocessableEntity {
// FIXME(tsileo): returns an object with field errors (set via the Lua API in the hook)
w.WriteHeader(http.StatusUnprocessableEntity)
return
}
if err != nil {
panic(err)
}
// Output some headers
w.Header().Set("BlobStash-DocStore-Doc-Id", _id.String())
w.Header().Set("BlobStash-DocStore-Doc-Version", _id.VersionString())
w.Header().Set("BlobStash-DocStore-Doc-CreatedAt", strconv.FormatInt(_id.Ts(), 10))
created := time.Unix(0, _id.Ts()).UTC().Format(time.RFC3339)
httputil.MarshalAndWrite(r, w, map[string]interface{}{
"_id": _id.String(),
"_created": created,
"_version": _id.VersionString(),
},
httputil.WithStatusCode(http.StatusCreated))
return
default:
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
}
}
// JSON input for the map reduce endpoint
type mapReduceInput struct {
Map string `json:"map"`
MapScope map[string]interface{} `json:"map_scope"`
Reduce string `json:"reduce"`
ReduceScope map[string]interface{} `json:"reduce_scope"`
}
func (docstore *DocStore) mapReduceHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
q := httputil.NewQuery(r.URL.Query())
vars := mux.Vars(r)
collection := vars["collection"]
if collection == "" {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing collection in the URL")
return
}
switch r.Method {
case "POST":
input := &mapReduceInput{}
if err := json.NewDecoder(r.Body).Decode(input); err != nil {
panic(httputil.NewPublicErrorFmt("Invalid JSON input"))
}
var asOf int64
var err error
if v := q.Get("as_of"); v != "" {
t, err := time.Parse("2006-1-2 15:4:5", v)
if err != nil {
panic(err)
}
asOf = t.UTC().UnixNano()
}
if asOf == 0 {
asOf, err = q.GetInt64Default("as_of_nano", 0)
if err != nil {
panic(err)
}
}
// Parse the query (JSON-encoded)
var queryArgs interface{}
jsQuery := q.Get("stored_query_args")
if jsQuery != "" {
if err := json.Unmarshal([]byte(jsQuery), &queryArgs); err != nil {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Failed to decode JSON query")
return
}
}
rootMre := NewMapReduceEngine()
defer rootMre.Close()
if err := rootMre.SetupReduce(input.Reduce); err != nil {
panic(err)
}
if err := rootMre.SetupMap(input.Map); err != nil {
panic(err)
}
batches := make(chan *MapReduceEngine)
// Reduce the batches into a single one as they're done
// TODO(tsileo): find a way to interrupt the pipeline on error
inFlight := 6
limiter := make(chan struct{}, inFlight)
errc := make(chan error, inFlight)
stop := make(chan struct{}, inFlight)
// Prepare the process of the batch result
go func() {
var discard bool
for batch := range batches {
if discard {
continue
}
if batch.err != nil {
// propagate the error
discard = true
stop <- struct{}{}
errc <- batch.err
}
if err := rootMre.Reduce(batch); err != nil {
// propagate the error
discard = true
stop <- struct{}{}
errc <- err
}
}
errc <- nil
}()
hasMore := true
var cursor string
// Batch size
limit := 50
q := &query{
storedQueryArgs: queryArgs,
storedQuery: q.Get("stored_query"),
script: q.Get("script"),
basicQuery: q.Get("query"),
}
var wg sync.WaitGroup
QUERY_LOOP:
for {
select {
case <-stop:
break QUERY_LOOP
default:
// Fetch a page
if !hasMore {
time.Sleep(50 * time.Millisecond)
break
}
docs, _, stats, err := docstore.query(nil, collection, q, cursor, limit, true, asOf)
if err != nil {
docstore.logger.Error("query failed", "err", err)
httputil.Error(w, err)
return
}
// Process the batch in parallel
wg.Add(1)
limiter <- struct{}{}
go func(doc []map[string]interface{}) {
defer func() {
wg.Done()
<-limiter
}()
mre, err := rootMre.Duplicate()
if err != nil {
panic(err)
}
defer mre.Close()
// XXX(tsileo): pass the pointers in the Lua map?
// Call Map for each document
for _, doc := range docs {
if err := mre.Map(doc); err != nil {
mre.err = err
batches <- mre
return
}
}
if err := mre.Reduce(nil); err != nil {
mre.err = err
}
batches <- mre
}(docs)
// Guess if they're are still results on client-side,
// by checking if NReturned < limit, we can deduce there's no more results.
// The cursor should be the start of the next query
if stats.NReturned < limit {
hasMore = false
break QUERY_LOOP
}
cursor = vkv.PrevKey(stats.LastID)
}
}
wg.Wait()
close(batches)
// Wait for the reduce step to be done
if err := <-errc; err != nil {
docstore.logger.Error("reduce failed", "err", err)
httputil.Error(w, err)
return
}
result, err := rootMre.Finalize()
if err != nil {
docstore.logger.Error("finalize failed", "err", err)
httputil.Error(w, err)
return
}
// Write the JSON response (encoded if requested)
httputil.MarshalAndWrite(r, w, &map[string]interface{}{
"data": result,
})
default:
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
}
}
// FetchVersions returns all verions/revisions for the given doc ID
func (docstore *DocStore) FetchVersions(collection, sid string, start int64, limit int, fetchPointers bool) ([]map[string]interface{}, map[string]interface{}, int64, error) {
var cursor int64
// TODO(tsileo): better output than a slice of `map[string]interface{}`
if collection == "" {
return nil, nil, cursor, errors.New("missing collection query arg")
}
// Fetch the KV versions entry for this _id
// XXX(tsileo): use int64 for start/end
kvv, _, err := docstore.kvStore.Versions(context.TODO(), fmt.Sprintf(keyFmt, collection, sid), strconv.FormatInt(start, 10), limit)
// FIXME(tsileo): return the cursor from Versions
if err != nil {
return nil, nil, cursor, err
}
// Parse the ID
// _id, err := id.FromHex(sid)
// if err != nil {
// return nil, nil, fmt.Errorf("invalid _id: %v", err)
// }
docs := []map[string]interface{}{}
pointers := map[string]interface{}{}
for _, kv := range kvv.Versions {
var doc map[string]interface{}
// Extract the hash (first byte is the Flag)
// XXX(tsileo): add/handle a `Deleted` flag
// kv.Value[1:len(kv.Value)]
// Build the doc
if err := msgpack.Unmarshal(kv.Data[1:], &doc); err != nil {
return nil, nil, cursor, fmt.Errorf("failed to unmarshal blob")
}
_id, err := id.FromHex(sid)
if err != nil {
panic(err)
}
_id.SetVersion(kv.Version)
addSpecialFields(doc, _id)
if fetchPointers {
docPointers, err := docstore.fetchPointers(doc)
if err != nil {
return nil, nil, cursor, err
}
for k, v := range docPointers {
pointers[k] = v
}
}
docs = append(docs, doc)
cursor = kv.Version - 1
// _id.SetFlag(byte(kv.Data[0]))
// _id.SetVersion(kv.Version)
}
return docs, pointers, cursor, nil
}
// Fetch a single document into `res` and returns the `id.ID`
func (docstore *DocStore) Fetch(collection, sid string, res interface{}, fetchPointers bool, version int64) (*id.ID, map[string]interface{}, error) {
if collection == "" {
return nil, nil, errors.New("missing collection query arg")
}
// Fetch the VKV entry for this _id
kv, err := docstore.kvStore.Get(context.TODO(), fmt.Sprintf(keyFmt, collection, sid), version)
if err != nil {
return nil, nil, err
}
// Parse the ID
_id, err := id.FromHex(sid)
if err != nil {
return nil, nil, fmt.Errorf("invalid _id: %v", err)
}
// Extract the hash (first byte is the Flag)
// XXX(tsileo): add/handle a `Deleted` flag
blob := kv.Data[1:]
var pointers map[string]interface{}
// FIXME(tsileo): handle deleted docs (also in the admin/query)
if len(blob) > 0 {
// Build the doc
switch idoc := res.(type) {
case nil:
// Do nothing
case *map[string]interface{}:
if err := msgpack.Unmarshal(blob, idoc); err != nil {
return nil, nil, fmt.Errorf("failed to unmarshal blob: %s", blob)
}
// TODO(tsileo): set the special fields _created/_updated/_hash
if fetchPointers {
pointers, err = docstore.fetchPointers(*idoc)
if err != nil {
return nil, nil, err
}
}
case *[]byte:
// Decode the doc and encode it to JSON
out := map[string]interface{}{}
if err := msgpack.Unmarshal(blob, &out); err != nil {
return nil, nil, fmt.Errorf("failed to unmarshal blob: %s", blob)
}
// TODO(tsileo): set the special fields _created/_updated/_hash
js, err := json.Marshal(out)
if err != nil {
return nil, nil, err
}
// Just the copy if JSON if a []byte is provided
*idoc = append(*idoc, js...)
}
}
_id.SetFlag(kv.Data[0])
_id.SetVersion(kv.Version)
return _id, pointers, nil
}
// HTTP handler for serving/updating a single doc
func (docstore *DocStore) docHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
collection := vars["collection"]
if collection == "" {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing collection in the URL")
return
}
sid := vars["_id"]
if sid == "" {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing _id in the URL")
return
}
var _id *id.ID
var err error
switch r.Method {
case "GET", "HEAD":
// Serve the document JSON encoded
// permissions.CheckPerms(r, PermCollectionName, collection, PermRead)
// js := []byte{}
var doc, pointers map[string]interface{}
// FIXME(tsileo): support asOf?
if _id, pointers, err = docstore.Fetch(collection, sid, &doc, true, -1); err != nil {
if err == vkv.ErrNotFound || _id.Flag() == flagDeleted {
// Document doesn't exist, returns a status 404
w.WriteHeader(http.StatusNotFound)
return
}
panic(err)
}
// FIXME(tsileo): fix-precondition, suport If-Match
if etag := r.Header.Get("If-None-Match"); etag != "" {
if etag == _id.VersionString() {
w.WriteHeader(http.StatusNotModified)
return
}
}
w.Header().Set("ETag", _id.VersionString())
addSpecialFields(doc, _id)
if r.Method == "GET" {
httputil.MarshalAndWrite(r, w, map[string]interface{}{
"data": doc,
"pointers": pointers,
})
}
return
case "PATCH":
// Patch the document (JSON-Patch/RFC6902)
// Lock the document before making any change to it, this way the PATCH operation is *truly* atomic/safe
docstore.locker.Lock(sid)
defer docstore.locker.Unlock(sid)
ctx := context.Background()
// Fetch the current doc
js := []byte{}
if _id, _, err = docstore.Fetch(collection, sid, &js, false, -1); err != nil {
if err == vkv.ErrNotFound {
// Document doesn't exist, returns a status 404
w.WriteHeader(http.StatusNotFound)
return
}
panic(err)
}
// FIXME(tsileo): make it required?
if etag := r.Header.Get("If-Match"); etag != "" {
if etag != _id.VersionString() {
w.WriteHeader(http.StatusPreconditionFailed)
return
}
}
buf, err := ioutil.ReadAll(r.Body)
if err != nil {
panic(err)
}
patch, err := jsonpatch.DecodePatch(buf)
if err != nil {
panic(err)
}
docstore.logger.Debug("patch decoded", "patch", patch)
pdata, err := patch.Apply(js)
if err != nil {
panic(err)
}
// Back to msgpack
ndoc := map[string]interface{}{}
if err := json.Unmarshal(pdata, &ndoc); err != nil {
panic(err)
}
data, err := msgpack.Marshal(ndoc)
if err != nil {
panic(err)
}
// TODO(tsileo): also check for reserved keys here
nkv, err := docstore.kvStore.Put(ctx, fmt.Sprintf(keyFmt, collection, _id.String()), "", append([]byte{_id.Flag()}, data...), -1)
if err != nil {
panic(err)
}
_id.SetVersion(nkv.Version)
// FIXME(tsileo): move this to the hub via the kvstore
if indexes, ok := docstore.indexes[collection]; ok {
for _, index := range indexes {
if err := index.Index(_id, ndoc); err != nil {
panic(err)
}
}
}
w.Header().Set("ETag", _id.VersionString())
created := time.Unix(0, _id.Ts()).UTC().Format(time.RFC3339)
httputil.MarshalAndWrite(r, w, map[string]interface{}{
"_id": _id.String(),
"_created": created,
"_version": _id.VersionString(),
})
return
case "POST":
// Update the whole document
// Parse the update query
var newDoc map[string]interface{}
if err := json.NewDecoder(r.Body).Decode(&newDoc); err != nil {
panic(err)
}
// Perform the update
_id, err := docstore.Update(collection, sid, newDoc, r.Header.Get("If-Match"))
switch err {
case nil:
case ErrDocNotFound:
w.WriteHeader(http.StatusNotFound)
case ErrPreconditionFailed:
w.WriteHeader(http.StatusPreconditionFailed)
return
default:
panic(err)
}
w.Header().Set("ETag", _id.VersionString())
created := time.Unix(0, _id.Ts()).UTC().Format(time.RFC3339)
httputil.MarshalAndWrite(r, w, map[string]interface{}{
"_id": _id.String(),
"_created": created,
"_version": _id.VersionString(),
})
return
case "DELETE":
_, err := docstore.Remove(collection, sid)
switch err {
case nil:
case ErrDocNotFound:
w.WriteHeader(http.StatusNotFound)
default:
panic(err)
}
}
}
}
// HTTP handler for serving/updating a single doc
func (docstore *DocStore) docVersionsHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
collection := vars["collection"]
if collection == "" {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing collection in the URL")
return
}
sid := vars["_id"]
if sid == "" {
httputil.WriteJSONError(w, http.StatusInternalServerError, "Missing _id in the URL")
return
}
var _id *id.ID
switch r.Method {
case "GET", "HEAD":
q := httputil.NewQuery(r.URL.Query())
limit, err := q.GetIntDefault("limit", 50)
if err != nil {
httputil.Error(w, err)
return
}
cursor, err := q.GetInt64Default("cursor", time.Now().UTC().UnixNano())
if err != nil {
httputil.Error(w, err)
return
}
fetchPointers, err := q.GetBoolDefault("fetch_pointers", true)
if err != nil {
httputil.Error(w, err)
return
}
// Serve the document JSON encoded
// permissions.CheckPerms(r, PermCollectionName, collection, PermRead)
// js := []byte{}
docs, pointers, cursor, err := docstore.FetchVersions(collection, sid, cursor, limit, fetchPointers)
if err != nil {
if err == vkv.ErrNotFound || _id.Flag() == flagDeleted {
// Document doesn't exist, returns a status 404
w.WriteHeader(http.StatusNotFound)
return
}
panic(err)
}
if r.Method == "GET" {
httputil.MarshalAndWrite(r, w, map[string]interface{}{
"pointers": pointers,
"data": docs,
"pagination": map[string]interface{}{
"cursor": cursor,
"has_more": len(docs) == limit,
"count": len(docs),
"per_page": limit,
},
})
}
default:
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
}
}
|
package mandira
import (
"testing"
)
type M map[string]interface{}
type Test struct {
template string
context interface{}
expected string
}
func (t *Test) Run(tt *testing.T) {
output := Render(t.template, t.context)
if output != t.expected {
tt.Errorf("%v expected %v, got %v", t.template, t.expected, output)
}
}
type Data struct {
A bool
B string
}
type User struct {
Name string
Id int64
}
type settings struct {
Allow bool
}
func (u User) Func1() string {
return u.Name
}
func (u *User) Func2() string {
return u.Name
}
func (u *User) Func3() (map[string]string, error) {
return map[string]string{"name": u.Name}, nil
}
func (u *User) Func4() (map[string]string, error) {
return nil, nil
}
func (u *User) Func5() (*settings, error) {
return &settings{true}, nil
}
func (u *User) Func6() ([]interface{}, error) {
var v []interface{}
v = append(v, &settings{true})
return v, nil
}
func (u User) Truefunc1() bool {
return true
}
func (u *User) Truefunc2() bool {
return true
}
func makeVector(n int) []interface{} {
var v []interface{}
for i := 0; i < n; i++ {
v = append(v, &User{"Mike", 1})
}
return v
}
type Category struct {
Tag string
Description string
}
func (c Category) DisplayName() string {
return c.Tag + " - " + c.Description
}
func TestMustacheEquivalentBasics(t *testing.T) {
tests := []Test{
{"Hello, World", nil, "Hello, World"},
{"Hello, {{name}}", M{"name": "World"}, "Hello, World"},
{"{{var}}", M{"var": "5 > 2"}, "5 > 2"},
{"{{{var}}}", M{"var": "5 > 2"}, "5 > 2"},
{"{{a}}{{b}}{{c}}{{d}}", M{"a": "a", "b": "b", "c": "c", "d": "d"}, "abcd"},
{"0{{a}}1{{b}}23{{c}}456{{d}}89", M{"a": "a", "b": "b", "c": "c", "d": "d"}, "0a1b23c456d89"},
{"hello {{! comment }}world", M{}, "hello world"},
//does not exist
{`{{dne}}`, M{"name": "world"}, ""},
{`{{dne}}`, User{"Mike", 1}, ""},
{`{{dne}}`, &User{"Mike", 1}, ""},
{`{{#has}}hi{{/has}}`, &User{"Mike", 1}, ""},
//section tests
{`{{#A}}{{B}}{{/A}}`, Data{true, "hello"}, "hello"},
{`{{#A}}{{{B}}}{{/A}}`, Data{true, "5 > 2"}, "5 > 2"},
{`{{#A}}{{B}}{{/A}}`, Data{true, "5 > 2"}, "5 > 2"},
{`{{#A}}{{B}}{{/A}}`, Data{false, "hello"}, ""},
{`{{a}}{{#b}}{{b}}{{/b}}{{c}}`, M{"a": "a", "b": "b", "c": "c"}, "abc"},
{`{{#A}}{{B}}{{/A}}`, struct{ A []struct{ B string } }{
[]struct{ B string }{{"a"}, {"b"}, {"c"}}},
"abc",
},
{`{{#A}}{{b}}{{/A}}`, struct{ A []map[string]string }{
[]map[string]string{{"b": "a"}, {"b": "b"}, {"b": "c"}}}, "abc"},
{`{{#users}}{{Name}}{{/users}}`, M{"users": []User{{"Mike", 1}}}, "Mike"},
{`{{#users}}gone{{Name}}{{/users}}`, M{"users": nil}, ""},
{`{{#users}}gone{{Name}}{{/users}}`, M{"users": (*User)(nil)}, ""},
{`{{#users}}gone{{Name}}{{/users}}`, M{"users": []User{}}, ""},
{`{{#users}}{{Name}}{{/users}}`, M{"users": []*User{{"Mike", 1}}}, "Mike"},
{`{{#users}}{{Name}}{{/users}}`, M{"users": []interface{}{&User{"Mike", 12}}}, "Mike"},
{`{{#users}}{{Name}}{{/users}}`, M{"users": makeVector(1)}, "Mike"},
{`{{Name}}`, User{"Mike", 1}, "Mike"},
{`{{Name}}`, &User{"Mike", 1}, "Mike"},
{"{{#users}}\n{{Name}}\n{{/users}}", M{"users": makeVector(2)}, "Mike\nMike\n"},
{"{{#users}}\r\n{{Name}}\r\n{{/users}}", M{"users": makeVector(2)}, "Mike\r\nMike\r\n"},
//function tests
{`{{#users}}{{Func1}}{{/users}}`, M{"users": []User{{"Mike", 1}}}, "Mike"},
{`{{#users}}{{Func1}}{{/users}}`, M{"users": []*User{{"Mike", 1}}}, "Mike"},
{`{{#users}}{{Func2}}{{/users}}`, M{"users": []*User{{"Mike", 1}}}, "Mike"},
{`{{#users}}{{#Func3}}{{name}}{{/Func3}}{{/users}}`, M{"users": []*User{{"Mike", 1}}}, "Mike"},
{`{{#users}}{{#Func4}}{{name}}{{/Func4}}{{/users}}`, M{"users": []*User{{"Mike", 1}}}, ""},
{`{{#Truefunc1}}abcd{{/Truefunc1}}`, User{"Mike", 1}, "abcd"},
{`{{#Truefunc1}}abcd{{/Truefunc1}}`, &User{"Mike", 1}, "abcd"},
{`{{#Truefunc2}}abcd{{/Truefunc2}}`, &User{"Mike", 1}, "abcd"},
{`{{#Func5}}{{#Allow}}abcd{{/Allow}}{{/Func5}}`, &User{"Mike", 1}, "abcd"},
{`{{#user}}{{#Func5}}{{#Allow}}abcd{{/Allow}}{{/Func5}}{{/user}}`, M{"user": &User{"Mike", 1}}, "abcd"},
{`{{#user}}{{#Func6}}{{#Allow}}abcd{{/Allow}}{{/Func6}}{{/user}}`, M{"user": &User{"Mike", 1}}, "abcd"},
//context chaining
{`hello {{#section}}{{name}}{{/section}}`, M{"section": map[string]string{"name": "world"}}, "hello world"},
{`hello {{#section}}{{name}}{{/section}}`, M{"name": "bob", "section": map[string]string{"name": "world"}}, "hello world"},
{`hello {{#bool}}{{#section}}{{name}}{{/section}}{{/bool}}`, M{"bool": true, "section": map[string]string{"name": "world"}}, "hello world"},
{`{{#users}}{{canvas}}{{/users}}`, M{"canvas": "hello", "users": []User{{"Mike", 1}}}, "hello"},
{`{{#categories}}{{DisplayName}}{{/categories}}`, map[string][]*Category{
"categories": {&Category{"a", "b"}},
}, "a - b"},
}
for _, test := range tests {
test.Run(t)
}
}
func TestSample(t *testing.T) {
tests := []Test{
{`Hello {{name}}
You have just won ${{value}}!
{{?if in_monaco}}
Well, ${{taxed_value}}, after taxes.
{{/if}}`, M{
"name": "Jason",
"value": 10000,
"taxed_value": 10000.0,
"in_monaco": true,
},
`Hello Jason
You have just won $10000!
Well, $10000.0, after taxes.`},
{`Hello {{name}}
You have just won ${{value}}!
{{?if in_monaco}}
Well, ${{taxed_value}}, after taxes.
{{/if}}`, M{
"name": "Jason",
"value": 10000,
"taxed_value": 10000.0,
"in_monaco": false,
},
`Hello Jason
You have just won $10000!
`},
}
for _, test := range tests {
test.Run(t)
}
}
func TestFilters(t *testing.T) {
// TODO: test date filter, which must be written probably
names := []string{"john", "bob", "fred"}
tests := []Test{
{"{{name}}", M{"name": "Jason"}, "Jason"},
{"{{name|upper}}", M{"name": "jason"}, "JASON"},
{"{{name|len}}", M{"name": "jason"}, "5"},
{"{{name|index(3)}}", M{"name": "jason"}, "o"},
{"{{name|index(0)}}", M{"name": []string{"john", "bob", "fred"}}, "john"},
{"{{name|index(0)|upper}}", M{"name": names}, "JOHN"},
{"{{name|index(1)|title}}", M{"name": names}, "Bob"},
// index error returns empty string
{"{{name|index(5)}}", M{"name": names}, ""},
// index error doesn't blow up later on a filter chain
{"{{name|index(5)|title}}", M{"name": names}, ""},
{`{{name|format(">%s<")}}`, M{"name": "jason"}, ">jason<"},
{`{{{name|format(">%s<")}}}`, M{"name": "jason"}, ">jason<"},
{`{{names|join(", ")}}`, M{"names": names}, "john, bob, fred"},
{`{{names|len|divisibleby(2)}}`, M{"names": names}, "false"},
{`{{names|len|divisibleby(3)}}`, M{"names": names}, "true"},
}
for _, test := range tests {
test.Run(t)
}
}
func TestIfBlocks(t *testing.T) {
}
check in eval & filters code
package mandira
import (
"testing"
)
type M map[string]interface{}
type Test struct {
template string
context interface{}
expected string
}
func (t *Test) Run(tt *testing.T) {
output := Render(t.template, t.context)
if output != t.expected {
tt.Errorf("%v expected %v, got %v", t.template, t.expected, output)
}
}
type Data struct {
A bool
B string
}
type User struct {
Name string
Id int64
}
type settings struct {
Allow bool
}
func (u User) Func1() string {
return u.Name
}
func (u *User) Func2() string {
return u.Name
}
func (u *User) Func3() (map[string]string, error) {
return map[string]string{"name": u.Name}, nil
}
func (u *User) Func4() (map[string]string, error) {
return nil, nil
}
func (u *User) Func5() (*settings, error) {
return &settings{true}, nil
}
func (u *User) Func6() ([]interface{}, error) {
var v []interface{}
v = append(v, &settings{true})
return v, nil
}
func (u User) Truefunc1() bool {
return true
}
func (u *User) Truefunc2() bool {
return true
}
func makeVector(n int) []interface{} {
var v []interface{}
for i := 0; i < n; i++ {
v = append(v, &User{"Mike", 1})
}
return v
}
type Category struct {
Tag string
Description string
}
func (c Category) DisplayName() string {
return c.Tag + " - " + c.Description
}
func TestMustacheEquivalentBasics(t *testing.T) {
tests := []Test{
{"Hello, World", nil, "Hello, World"},
{"Hello, {{name}}", M{"name": "World"}, "Hello, World"},
{"{{var}}", M{"var": "5 > 2"}, "5 > 2"},
{"{{{var}}}", M{"var": "5 > 2"}, "5 > 2"},
{"{{a}}{{b}}{{c}}{{d}}", M{"a": "a", "b": "b", "c": "c", "d": "d"}, "abcd"},
{"0{{a}}1{{b}}23{{c}}456{{d}}89", M{"a": "a", "b": "b", "c": "c", "d": "d"}, "0a1b23c456d89"},
{"hello {{! comment }}world", M{}, "hello world"},
//does not exist
{`{{dne}}`, M{"name": "world"}, ""},
{`{{dne}}`, User{"Mike", 1}, ""},
{`{{dne}}`, &User{"Mike", 1}, ""},
{`{{#has}}hi{{/has}}`, &User{"Mike", 1}, ""},
//section tests
{`{{#A}}{{B}}{{/A}}`, Data{true, "hello"}, "hello"},
{`{{#A}}{{{B}}}{{/A}}`, Data{true, "5 > 2"}, "5 > 2"},
{`{{#A}}{{B}}{{/A}}`, Data{true, "5 > 2"}, "5 > 2"},
{`{{#A}}{{B}}{{/A}}`, Data{false, "hello"}, ""},
{`{{a}}{{#b}}{{b}}{{/b}}{{c}}`, M{"a": "a", "b": "b", "c": "c"}, "abc"},
{`{{#A}}{{B}}{{/A}}`, struct{ A []struct{ B string } }{
[]struct{ B string }{{"a"}, {"b"}, {"c"}}},
"abc",
},
{`{{#A}}{{b}}{{/A}}`, struct{ A []map[string]string }{
[]map[string]string{{"b": "a"}, {"b": "b"}, {"b": "c"}}}, "abc"},
{`{{#users}}{{Name}}{{/users}}`, M{"users": []User{{"Mike", 1}}}, "Mike"},
{`{{#users}}gone{{Name}}{{/users}}`, M{"users": nil}, ""},
{`{{#users}}gone{{Name}}{{/users}}`, M{"users": (*User)(nil)}, ""},
{`{{#users}}gone{{Name}}{{/users}}`, M{"users": []User{}}, ""},
{`{{#users}}{{Name}}{{/users}}`, M{"users": []*User{{"Mike", 1}}}, "Mike"},
{`{{#users}}{{Name}}{{/users}}`, M{"users": []interface{}{&User{"Mike", 12}}}, "Mike"},
{`{{#users}}{{Name}}{{/users}}`, M{"users": makeVector(1)}, "Mike"},
{`{{Name}}`, User{"Mike", 1}, "Mike"},
{`{{Name}}`, &User{"Mike", 1}, "Mike"},
{"{{#users}}\n{{Name}}\n{{/users}}", M{"users": makeVector(2)}, "Mike\nMike\n"},
{"{{#users}}\r\n{{Name}}\r\n{{/users}}", M{"users": makeVector(2)}, "Mike\r\nMike\r\n"},
//function tests
{`{{#users}}{{Func1}}{{/users}}`, M{"users": []User{{"Mike", 1}}}, "Mike"},
{`{{#users}}{{Func1}}{{/users}}`, M{"users": []*User{{"Mike", 1}}}, "Mike"},
{`{{#users}}{{Func2}}{{/users}}`, M{"users": []*User{{"Mike", 1}}}, "Mike"},
{`{{#users}}{{#Func3}}{{name}}{{/Func3}}{{/users}}`, M{"users": []*User{{"Mike", 1}}}, "Mike"},
{`{{#users}}{{#Func4}}{{name}}{{/Func4}}{{/users}}`, M{"users": []*User{{"Mike", 1}}}, ""},
{`{{#Truefunc1}}abcd{{/Truefunc1}}`, User{"Mike", 1}, "abcd"},
{`{{#Truefunc1}}abcd{{/Truefunc1}}`, &User{"Mike", 1}, "abcd"},
{`{{#Truefunc2}}abcd{{/Truefunc2}}`, &User{"Mike", 1}, "abcd"},
{`{{#Func5}}{{#Allow}}abcd{{/Allow}}{{/Func5}}`, &User{"Mike", 1}, "abcd"},
{`{{#user}}{{#Func5}}{{#Allow}}abcd{{/Allow}}{{/Func5}}{{/user}}`, M{"user": &User{"Mike", 1}}, "abcd"},
{`{{#user}}{{#Func6}}{{#Allow}}abcd{{/Allow}}{{/Func6}}{{/user}}`, M{"user": &User{"Mike", 1}}, "abcd"},
//context chaining
{`hello {{#section}}{{name}}{{/section}}`, M{"section": map[string]string{"name": "world"}}, "hello world"},
{`hello {{#section}}{{name}}{{/section}}`, M{"name": "bob", "section": map[string]string{"name": "world"}}, "hello world"},
{`hello {{#bool}}{{#section}}{{name}}{{/section}}{{/bool}}`, M{"bool": true, "section": map[string]string{"name": "world"}}, "hello world"},
{`{{#users}}{{canvas}}{{/users}}`, M{"canvas": "hello", "users": []User{{"Mike", 1}}}, "hello"},
{`{{#categories}}{{DisplayName}}{{/categories}}`, map[string][]*Category{
"categories": {&Category{"a", "b"}},
}, "a - b"},
}
for _, test := range tests {
test.Run(t)
}
}
func TestSample(t *testing.T) {
tests := []Test{
{`Hello {{name}}
You have just won ${{value}}!
{{?if in_monaco}}
Well, ${{taxed_value}}, after taxes.
{{/if}}`, M{
"name": "Jason",
"value": 10000,
"taxed_value": 10000.0,
"in_monaco": true,
},
`Hello Jason
You have just won $10000!
Well, $10000.0, after taxes.`},
{`Hello {{name}}
You have just won ${{value}}!
{{?if in_monaco}}
Well, ${{taxed_value}}, after taxes.
{{/if}}`, M{
"name": "Jason",
"value": 10000,
"taxed_value": 10000.0,
"in_monaco": false,
},
`Hello Jason
You have just won $10000!
`},
}
for _, test := range tests {
test.Run(t)
}
}
func TestFilters(t *testing.T) {
// TODO: test date filter, which must be written probably
names := []string{"john", "bob", "fred"}
tests := []Test{
{"{{name}}", M{"name": "Jason"}, "Jason"},
{"{{name|upper}}", M{"name": "jason"}, "JASON"},
{"{{name|len}}", M{"name": "jason"}, "5"},
{"{{name|index(3)}}", M{"name": "jason"}, "o"},
{"{{name|index(0)}}", M{"name": []string{"john", "bob", "fred"}}, "john"},
{"{{name|index(0)|upper}}", M{"name": names}, "JOHN"},
{"{{name|index(1)|title}}", M{"name": names}, "Bob"},
// index error returns empty string
{"{{name|index(5)}}", M{"name": names}, ""},
// index error doesn't blow up later on a filter chain
{"{{name|index(5)|title}}", M{"name": names}, ""},
{`{{name|format(">%s<")}}`, M{"name": "jason"}, ">jason<"},
{`{{{name|format(">%s<")}}}`, M{"name": "jason"}, ">jason<"},
{`{{names|join(", ")}}`, M{"names": names}, "john, bob, fred"},
{`{{names|len|divisibleby(2)}}`, M{"names": names}, "false"},
{`{{names|len|divisibleby(3)}}`, M{"names": names}, "true"},
{`{{names|join(joiner)}}`, M{"names": names, "joiner": ", "}, "john, bob, fred"},
}
for _, test := range tests {
test.Run(t)
}
}
func TestIfBlocks(t *testing.T) {
}
|
// Copyright 2016-2018 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package endpoint
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net"
"os"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"unsafe"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/common/addressing"
"github.com/cilium/cilium/pkg/bpf"
"github.com/cilium/cilium/pkg/completion"
"github.com/cilium/cilium/pkg/controller"
identityPkg "github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/k8s"
"github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
cilium_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
cilium_client_v2 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2"
pkgLabels "github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/mac"
"github.com/cilium/cilium/pkg/maps/cidrmap"
"github.com/cilium/cilium/pkg/maps/ctmap"
"github.com/cilium/cilium/pkg/maps/lxcmap"
"github.com/cilium/cilium/pkg/maps/policymap"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/monitor/notifications"
"github.com/cilium/cilium/pkg/node"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy"
"github.com/cilium/cilium/pkg/proxy/accesslog"
"github.com/cilium/cilium/pkg/u8proto"
"github.com/cilium/cilium/pkg/versioncheck"
go_version "github.com/hashicorp/go-version"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
"github.com/sirupsen/logrus"
)
const (
maxLogs = 256
)
var (
EndpointMutableOptionLibrary = option.GetEndpointMutableOptionLibrary()
// ciliumEPControllerLimit is the range of k8s versions with which we are
// willing to run the EndpointCRD controllers
ciliumEPControllerLimit = versioncheck.MustCompile("> 1.6")
// ciliumEndpointSyncControllerK8sClient is a k8s client shared by the
// RunK8sCiliumEndpointSync and RunK8sCiliumEndpointSyncGC. They obtain the
// controller via getCiliumClient and the sync.Once is used to avoid race.
ciliumEndpointSyncControllerOnce sync.Once
ciliumEndpointSyncControllerK8sClient clientset.Interface
// ciliumUpdateStatusVerConstr is the minimal version supported for
// to perform a CRD UpdateStatus.
ciliumUpdateStatusVerConstr = versioncheck.MustCompile(">= 1.11.0")
k8sServerVer *go_version.Version
)
// getCiliumClient builds and returns a k8s auto-generated client for cilium
// objects
func getCiliumClient() (ciliumClient cilium_client_v2.CiliumV2Interface, err error) {
// This allows us to reuse the k8s client
ciliumEndpointSyncControllerOnce.Do(func() {
var (
restConfig *rest.Config
k8sClient *clientset.Clientset
)
restConfig, err = k8s.CreateConfig()
if err != nil {
return
}
k8sClient, err = clientset.NewForConfig(restConfig)
if err != nil {
return
}
ciliumEndpointSyncControllerK8sClient = k8sClient
})
if err != nil {
return nil, err
}
// This guards against the situation where another invocation of this
// function (in another thread or previous in time) might have returned an
// error and not initialized ciliumEndpointSyncControllerK8sClient
if ciliumEndpointSyncControllerK8sClient == nil {
return nil, errors.New("No initialised k8s Cilium CRD client")
}
return ciliumEndpointSyncControllerK8sClient.CiliumV2(), nil
}
// RunK8sCiliumEndpointSyncGC starts the node-singleton sweeper for
// CiliumEndpoint objects where the managing node is no longer running. These
// objects are created by the sync-to-k8s-ciliumendpoint controller on each
// Endpoint.
// The general steps are:
// - get list of nodes
// - only run with probability 1/nodes
// - get list of CEPs
// - for each CEP
// delete CEP if the corresponding pod does not exist
// CiliumEndpoint objects have the same name as the pod they represent
func RunK8sCiliumEndpointSyncGC() {
var (
controllerName = fmt.Sprintf("sync-to-k8s-ciliumendpoint-gc (%v)", node.GetName())
scopedLog = log.WithField("controller", controllerName)
// random source to throttle how often this controller runs cluster-wide
runThrottler = rand.New(rand.NewSource(time.Now().UnixNano()))
)
// this is a sanity check
if !k8s.IsEnabled() {
scopedLog.WithField("name", controllerName).Warn("Not running controller because k8s is disabled")
return
}
sv, err := k8s.GetServerVersion()
if err != nil {
scopedLog.WithError(err).Error("unable to retrieve kubernetes serverversion")
return
}
if !ciliumEPControllerLimit.Check(sv) {
scopedLog.WithFields(logrus.Fields{
"expected": sv,
"found": ciliumEPControllerLimit,
}).Warn("cannot run with this k8s version")
return
}
ciliumClient, err := getCiliumClient()
if err != nil {
scopedLog.WithError(err).Error("Not starting controller because unable to get cilium k8s client")
return
}
k8sClient := k8s.Client()
// this dummy manager is needed only to add this controller to the global list
controller.NewManager().UpdateController(controllerName,
controller.ControllerParams{
RunInterval: 1 * time.Minute,
DoFunc: func() error {
// Don't run if there are no other known nodes
// Only run with a probability of 1/(number of nodes in cluster). This
// is because this controller runs on every node on the same interval
// but only one is neede to run.
nodes := node.GetNodes()
if len(nodes) <= 1 || runThrottler.Int63n(int64(len(nodes))) != 0 {
return nil
}
clusterPodSet := map[string]bool{}
clusterPods, err := k8sClient.CoreV1().Pods("").List(meta_v1.ListOptions{})
if err != nil {
return err
}
for _, pod := range clusterPods.Items {
podFullName := pod.Name + ":" + pod.Namespace
clusterPodSet[podFullName] = true
}
// "" is all-namespaces
ceps, err := ciliumClient.CiliumEndpoints(meta_v1.NamespaceAll).List(meta_v1.ListOptions{})
if err != nil {
scopedLog.WithError(err).Debug("Cannot list CEPs")
return err
}
for _, cep := range ceps.Items {
cepFullName := cep.Name + ":" + cep.Namespace
if _, found := clusterPodSet[cepFullName]; !found {
// delete
scopedLog = scopedLog.WithFields(logrus.Fields{
logfields.EndpointID: cep.Status.ID,
logfields.K8sPodName: cepFullName,
})
scopedLog.Debug("Orphaned CiliumEndpoint is being garbage collected")
if err := ciliumClient.CiliumEndpoints(cep.Namespace).Delete(cep.Name, &meta_v1.DeleteOptions{}); err != nil {
scopedLog.WithError(err).Debug("Unable to delete CEP")
return err
}
}
}
return nil
},
})
}
const (
// StateCreating is used to set the endpoint is being created.
StateCreating = string(models.EndpointStateCreating)
// StateWaitingForIdentity is used to set if the endpoint is waiting
// for an identity from the KVStore.
StateWaitingForIdentity = string(models.EndpointStateWaitingForIdentity)
// StateReady specifies if the endpoint is ready to be used.
StateReady = string(models.EndpointStateReady)
// StateWaitingToRegenerate specifies when the endpoint needs to be regenerated, but regeneration has not started yet.
StateWaitingToRegenerate = string(models.EndpointStateWaitingToRegenerate)
// StateRegenerating specifies when the endpoint is being regenerated.
StateRegenerating = string(models.EndpointStateRegenerating)
// StateDisconnecting indicates that the endpoint is being disconnected
StateDisconnecting = string(models.EndpointStateDisconnecting)
// StateDisconnected is used to set the endpoint is disconnected.
StateDisconnected = string(models.EndpointStateDisconnected)
// StateRestoring is used to set the endpoint is being restored.
StateRestoring = string(models.EndpointStateRestoring)
// CallsMapName specifies the base prefix for EP specific call map.
CallsMapName = "cilium_calls_"
// PolicyGlobalMapName specifies the global tail call map for EP handle_policy() lookup.
PolicyGlobalMapName = "cilium_policy"
// HealthCEPPrefix is the prefix used to name the cilium health endpoints' CEP
HealthCEPPrefix = "cilium-health-"
)
// compile time interface check
var _ notifications.RegenNotificationInfo = &Endpoint{}
// PolicyMapState is a state of a policy map.
type PolicyMapState map[policymap.PolicyKey]PolicyMapStateEntry
// PolicyMapStateEntry is the configuration associated with a PolicyKey in a
// PolicyMapState. This is a minimized version of policymap.PolicyEntry.
type PolicyMapStateEntry struct {
// The proxy port, in host byte order.
// If 0 (default), there is no proxy redirection for the corresponding
// PolicyKey.
ProxyPort uint16
}
// Endpoint represents a container or similar which can be individually
// addresses on L3 with its own IP addresses. This structured is managed by the
// endpoint manager in pkg/endpointmanager.
//
//
// WARNING - STABLE API
// This structure is written as JSON to StateDir/{ID}/lxc_config.h to allow to
// restore endpoints when the agent is being restarted. The restore operation
// will read the file and re-create all endpoints with all fields which are not
// marked as private to JSON marshal. Do NOT modify this structure in ways which
// is not JSON forward compatible.
//
type Endpoint struct {
// ID of the endpoint, unique in the scope of the node
ID uint16
// mutex protects write operations to this endpoint structure except
// for the logger field which has its own mutex
mutex lock.RWMutex
// ContainerName is the name given to the endpoint by the container runtime
ContainerName string
// ContainerID is the container ID that docker has assigned to the endpoint
// Note: The JSON tag was kept for backward compatibility.
ContainerID string `json:"dockerID,omitempty"`
// DockerNetworkID is the network ID of the libnetwork network if the
// endpoint is a docker managed container which uses libnetwork
DockerNetworkID string
// DockerEndpointID is the Docker network endpoint ID if managed by
// libnetwork
DockerEndpointID string
// IfName is the name of the host facing interface (veth pair) which
// connects into the endpoint
IfName string
// IfIndex is the interface index of the host face interface (veth pair)
IfIndex int
// OpLabels is the endpoint's label configuration
//
// FIXME: Rename this field to Labels
OpLabels pkgLabels.OpLabels
// identityRevision is incremented each time the identity label
// information of the endpoint has changed
identityRevision int
// LXCMAC is the MAC address of the endpoint
//
// FIXME: Rename this field to MAC
LXCMAC mac.MAC // Container MAC address.
// IPv6 is the IPv6 address of the endpoint
IPv6 addressing.CiliumIPv6
// IPv4 is the IPv4 address of the endpoint
IPv4 addressing.CiliumIPv4
// NodeMAC is the MAC of the node (agent). The MAC is different for every endpoint.
NodeMAC mac.MAC
// SecurityIdentity is the security identity of this endpoint. This is computed from
// the endpoint's labels.
SecurityIdentity *identityPkg.Identity `json:"SecLabel"`
// hasSidecarProxy indicates whether the endpoint has been injected by
// Istio with a Cilium-compatible sidecar proxy. If true, the sidecar proxy
// will be used to apply L7 policy rules. Otherwise, Cilium's node-wide
// proxy will be used.
// TODO: Currently this applies only to HTTP L7 rules. Kafka L7 rules are still enforced by Cilium's node-wide Kafka proxy.
hasSidecarProxy bool
// prevIdentityCache is the set of all security identities used in the
// previous policy computation
prevIdentityCache *identityPkg.IdentityCache
// RealizedL4Policy is the L4Policy in effect for the endpoint.
RealizedL4Policy *policy.L4Policy `json:"-"`
// DesiredL4Policy is the desired L4Policy for the endpoint. It is populated
// when the policy for this endpoint is generated.
DesiredL4Policy *policy.L4Policy `json:"-"`
// PolicyMap is the policy related state of the datapath including
// reference to all policy related BPF
PolicyMap *policymap.PolicyMap `json:"-"`
// CIDRPolicy is the CIDR based policy configuration of the endpoint.
L3Policy *policy.CIDRPolicy `json:"-"`
// Options determine the datapath configuration of the endpoint.
Options *option.IntOptions
// Status are the last n state transitions this endpoint went through
Status *EndpointStatus
// state is the state the endpoint is in. See SetStateLocked()
state string
// bpfHeaderfileHash is the hash of the last BPF headerfile that has been
// compiled and installed.
bpfHeaderfileHash string
k8sPodName string
k8sNamespace string
// policyRevision is the policy revision this endpoint is currently on
// to modify this field please use endpoint.setPolicyRevision instead
policyRevision uint64
// policyRevisionSignals contains a map of PolicyRevision signals that
// should be triggered once the policyRevision reaches the wanted wantedRev.
policyRevisionSignals map[policySignal]bool
// proxyPolicyRevision is the policy revision that has been applied to
// the proxy.
proxyPolicyRevision uint64
// proxyStatisticsMutex is the mutex that must be held to read or write
// proxyStatistics.
proxyStatisticsMutex lock.RWMutex
// proxyStatistics contains statistics of proxy redirects.
// They keys in this map are the ProxyStatistics with their
// AllocatedProxyPort and Statistics fields set to 0 and nil.
// You must hold Endpoint.proxyStatisticsMutex to read or write it.
proxyStatistics map[models.ProxyStatistics]*models.ProxyStatistics
// nextPolicyRevision is the policy revision that the endpoint has
// updated to and that will become effective with the next regenerate
nextPolicyRevision uint64
// forcePolicyCompute full endpoint policy recomputation
// Set when endpoint options have been changed. Cleared right before releasing the
// endpoint mutex after policy recalculation.
forcePolicyCompute bool
// BuildMutex synchronizes builds of individual endpoints and locks out
// deletion during builds
//
// FIXME: Mark private once endpoint deletion can be moved into
// `pkg/endpoint`
BuildMutex lock.Mutex `json:"-"`
// logger is a logrus object with fields set to report an endpoints information.
// You must hold Endpoint.Mutex to read or write it (but not to log with it).
logger unsafe.Pointer
// controllers is the list of async controllers syncing the endpoint to
// other resources
controllers controller.Manager
// realizedRedirects maps the ID of each proxy redirect that has been
// successfully added into a proxy for this endpoint, to the redirect's
// proxy port number.
// You must hold Endpoint.Mutex to read or write it.
realizedRedirects map[string]uint16
// realizedMapState maps each PolicyKey which is presently
// inserted (realized) in the endpoint's BPF PolicyMap to a proxy port.
// Proxy port 0 indicates no proxy redirection.
// All fields within the PolicyKey and the proxy port must be in host byte-order.
realizedMapState PolicyMapState
// desiredMapState maps each PolicyKeys which should be synched
// with, but may not yet be synched with, the endpoint's BPF PolicyMap, to
// a proxy port.
// This map is updated upon regeneration of policy for an endpoint.
// Proxy port 0 indicates no proxy redirection.
// All fields within the PolicyKey and the proxy port must be in host byte-order.
desiredMapState PolicyMapState
// ctCleaned indicates whether the conntrack table has already been
// cleaned when this endpoint was first created
ctCleaned bool
// ingressPolicyEnabled specifies whether policy enforcement on ingress
// is enabled for this endpoint.
ingressPolicyEnabled bool
// egressPolicyEnabled specifies whether policy enforcement on egress
// is enabled for this endpoint.
egressPolicyEnabled bool
///////////////////////
// DEPRECATED FIELDS //
///////////////////////
// DeprecatedOpts represents the mutable options for the endpoint, in
// the format understood by Cilium 1.1 or earlier.
//
// Deprecated: Use Options instead.
DeprecatedOpts deprecatedOptions `json:"Opts"`
}
// GetIngressPolicyEnabledLocked returns whether ingress policy enforcement is
// enabled for endpoint or not. The endpoint's mutex must be held.
func (e *Endpoint) GetIngressPolicyEnabledLocked() bool {
return e.ingressPolicyEnabled
}
// GetEgressPolicyEnabledLocked returns whether egress policy enforcement is
// enabled for endpoint or not. The endpoint's mutex must be held.
func (e *Endpoint) GetEgressPolicyEnabledLocked() bool {
return e.egressPolicyEnabled
}
// SetIngressPolicyEnabled sets Endpoint's ingress policy enforcement
// configuration to the specified value. The endpoint's mutex must not be held.
func (e *Endpoint) SetIngressPolicyEnabled(ingress bool) {
e.UnconditionalLock()
e.ingressPolicyEnabled = ingress
e.Unlock()
}
// SetEgressPolicyEnabled sets Endpoint's egress policy enforcement
// configuration to the specified value. The endpoint's mutex must not be held.
func (e *Endpoint) SetEgressPolicyEnabled(egress bool) {
e.UnconditionalLock()
e.egressPolicyEnabled = egress
e.Unlock()
}
// SetIngressPolicyEnabledLocked sets Endpoint's ingress policy enforcement
// configuration to the specified value. The endpoint's mutex must be held.
func (e *Endpoint) SetIngressPolicyEnabledLocked(ingress bool) {
e.ingressPolicyEnabled = ingress
}
// SetEgressPolicyEnabledLocked sets Endpoint's egress policy enforcement
// configuration to the specified value. The endpoint's mutex must be held.
func (e *Endpoint) SetEgressPolicyEnabledLocked(egress bool) {
e.egressPolicyEnabled = egress
}
// WaitForProxyCompletions blocks until all proxy changes have been completed.
// Called with BuildMutex held.
func (e *Endpoint) WaitForProxyCompletions(proxyWaitGroup *completion.WaitGroup) error {
if proxyWaitGroup == nil {
return nil
}
err := proxyWaitGroup.Context().Err()
if err != nil {
return fmt.Errorf("context cancelled before waiting for proxy updates: %s", err)
}
start := time.Now()
e.Logger().Debug("Waiting for proxy updates to complete...")
err = proxyWaitGroup.Wait()
if err != nil {
return fmt.Errorf("proxy state changes failed: %s", err)
}
e.Logger().Debug("Wait time for proxy updates: ", time.Since(start))
return nil
}
// RunK8sCiliumEndpointSync starts a controller that syncronizes the endpoint
// to the corresponding k8s CiliumEndpoint CRD
// CiliumEndpoint objects have the same name as the pod they represent
func (e *Endpoint) RunK8sCiliumEndpointSync() {
var (
endpointID = e.ID
controllerName = fmt.Sprintf("sync-to-k8s-ciliumendpoint (%v)", endpointID)
scopedLog = e.Logger().WithField("controller", controllerName)
err error
)
if !k8s.IsEnabled() {
scopedLog.Debug("Not starting controller because k8s is disabled")
return
}
k8sServerVer, err = k8s.GetServerVersion()
if err != nil {
scopedLog.WithError(err).Error("unable to retrieve kubernetes serverversion")
return
}
if !ciliumEPControllerLimit.Check(k8sServerVer) {
scopedLog.WithFields(logrus.Fields{
"expected": k8sServerVer,
"found": ciliumEPControllerLimit,
}).Warn("cannot run with this k8s version")
return
}
ciliumClient, err := getCiliumClient()
if err != nil {
scopedLog.WithError(err).Error("Not starting controller because unable to get cilium k8s client")
return
}
// The health endpoint doesn't really exist in k8s and updates to it caused
// arbitrary errors. Disable the controller for these endpoints.
if isHealthEP := e.HasLabels(pkgLabels.LabelHealth); isHealthEP {
scopedLog.Debug("Not starting unnecessary CEP controller for cilium-health endpoint")
return
}
var (
lastMdl *models.Endpoint
firstRun = true
)
// NOTE: The controller functions do NOT hold the endpoint locks
e.controllers.UpdateController(controllerName,
controller.ControllerParams{
RunInterval: 10 * time.Second,
DoFunc: func() (err error) {
// Update logger as scopeLog might not have the podName when it
// was created.
scopedLog = e.Logger().WithField("controller", controllerName)
podName := e.GetK8sPodName()
if podName == "" {
scopedLog.Debug("Skipping CiliumEndpoint update because it has no k8s pod name")
return nil
}
namespace := e.GetK8sNamespace()
if namespace == "" {
scopedLog.Debug("Skipping CiliumEndpoint update because it has no k8s namespace")
return nil
}
mdl := e.GetModel()
if reflect.DeepEqual(mdl, lastMdl) {
scopedLog.Debug("Skipping CiliumEndpoint update because it has not changed")
return nil
}
k8sMdl := (*cilium_v2.CiliumEndpointDetail)(mdl)
cep, err := ciliumClient.CiliumEndpoints(namespace).Get(podName, meta_v1.GetOptions{})
switch {
// The CEP doesn't exist. We will fall through to the create code below
case err != nil && k8serrors.IsNotFound(err):
break
// Delete the CEP on the first ever run. We will fall through to the create code below
case firstRun:
firstRun = false
scopedLog.Debug("Deleting CEP on first run")
err := ciliumClient.CiliumEndpoints(namespace).Delete(podName, &meta_v1.DeleteOptions{})
if err != nil {
scopedLog.WithError(err).Warn("Error deleting CEP")
return err
}
// Delete an invalid CEP. We will fall through to the create code below
case err != nil && k8serrors.IsInvalid(err):
scopedLog.WithError(err).Warn("Invalid CEP during update")
err := ciliumClient.CiliumEndpoints(namespace).Delete(podName, &meta_v1.DeleteOptions{})
if err != nil {
scopedLog.WithError(err).Warn("Error deleting invalid CEP during update")
return err
}
// A real error
case err != nil && !k8serrors.IsNotFound(err):
scopedLog.WithError(err).Error("Cannot get CEP for update")
return err
// do an update
case err == nil:
// Update the copy of the cep
k8sMdl.DeepCopyInto(&cep.Status)
var err2 error
switch {
case ciliumUpdateStatusVerConstr.Check(k8sServerVer):
_, err2 = ciliumClient.CiliumEndpoints(namespace).UpdateStatus(cep)
default:
_, err2 = ciliumClient.CiliumEndpoints(namespace).Update(cep)
}
if err2 != nil {
scopedLog.WithError(err2).Error("Cannot update CEP")
return err2
}
lastMdl = mdl
return nil
}
// The CEP was not found, this is the first creation of the endpoint
cep = &cilium_v2.CiliumEndpoint{
ObjectMeta: meta_v1.ObjectMeta{
Name: podName,
},
Status: *k8sMdl,
}
_, err = ciliumClient.CiliumEndpoints(namespace).Create(cep)
if err != nil {
scopedLog.WithError(err).Error("Cannot create CEP")
return err
}
return nil
},
StopFunc: func() error {
podName := e.GetK8sPodName()
namespace := e.GetK8sNamespace()
if err := ciliumClient.CiliumEndpoints(namespace).Delete(podName, &meta_v1.DeleteOptions{}); err != nil {
scopedLog.WithError(err).Error("Unable to delete CEP")
return err
}
return nil
},
})
}
// NewEndpointWithState creates a new endpoint useful for testing purposes
func NewEndpointWithState(ID uint16, state string) *Endpoint {
ep := &Endpoint{
ID: ID,
Options: option.NewIntOptions(&EndpointMutableOptionLibrary),
Status: NewEndpointStatus(),
state: state,
}
ep.UpdateLogger(nil)
return ep
}
// NewEndpointFromChangeModel creates a new endpoint from a request
func NewEndpointFromChangeModel(base *models.EndpointChangeRequest) (*Endpoint, error) {
if base == nil {
return nil, nil
}
ep := &Endpoint{
ID: uint16(base.ID),
ContainerName: base.ContainerName,
ContainerID: base.ContainerID,
DockerNetworkID: base.DockerNetworkID,
DockerEndpointID: base.DockerEndpointID,
IfName: base.InterfaceName,
IfIndex: int(base.InterfaceIndex),
OpLabels: pkgLabels.OpLabels{
Custom: pkgLabels.Labels{},
Disabled: pkgLabels.Labels{},
OrchestrationIdentity: pkgLabels.Labels{},
OrchestrationInfo: pkgLabels.Labels{},
},
state: "",
Status: NewEndpointStatus(),
}
ep.UpdateLogger(nil)
ep.SetStateLocked(string(base.State), "Endpoint creation")
if base.Mac != "" {
m, err := mac.ParseMAC(base.Mac)
if err != nil {
return nil, err
}
ep.LXCMAC = m
}
if base.HostMac != "" {
m, err := mac.ParseMAC(base.HostMac)
if err != nil {
return nil, err
}
ep.NodeMAC = m
}
if base.Addressing != nil {
if ip := base.Addressing.IPV6; ip != "" {
ip6, err := addressing.NewCiliumIPv6(ip)
if err != nil {
return nil, err
}
ep.IPv6 = ip6
}
if ip := base.Addressing.IPV4; ip != "" {
ip4, err := addressing.NewCiliumIPv4(ip)
if err != nil {
return nil, err
}
ep.IPv4 = ip4
}
}
return ep, nil
}
// GetModelRLocked returns the API model of endpoint e.
// e.mutex must be RLocked.
func (e *Endpoint) GetModelRLocked() *models.Endpoint {
if e == nil {
return nil
}
currentState := models.EndpointState(e.state)
if currentState == models.EndpointStateReady && e.Status.CurrentStatus() != OK {
currentState = models.EndpointStateNotReady
}
// This returns the most recent log entry for this endpoint. It is backwards
// compatible with the json from before we added `cilium endpoint log` but it
// only returns 1 entry.
statusLog := e.Status.GetModel()
if len(statusLog) > 0 {
statusLog = statusLog[:1]
}
lblSpec := &models.LabelConfigurationSpec{
User: e.OpLabels.Custom.GetModel(),
}
lblMdl := &models.LabelConfigurationStatus{
Realized: lblSpec,
SecurityRelevant: e.OpLabels.OrchestrationIdentity.GetModel(),
Derived: e.OpLabels.OrchestrationInfo.GetModel(),
Disabled: e.OpLabels.Disabled.GetModel(),
}
// Sort these slices since they come out in random orders. This allows
// reflect.DeepEqual to succeed.
sort.StringSlice(lblSpec.User).Sort()
sort.StringSlice(lblMdl.Disabled).Sort()
sort.StringSlice(lblMdl.SecurityRelevant).Sort()
sort.StringSlice(lblMdl.Derived).Sort()
controllerMdl := e.controllers.GetStatusModel()
sort.Slice(controllerMdl, func(i, j int) bool { return controllerMdl[i].Name < controllerMdl[j].Name })
spec := &models.EndpointConfigurationSpec{
LabelConfiguration: lblSpec,
Options: *e.Options.GetMutableModel(),
}
mdl := &models.Endpoint{
ID: int64(e.ID),
Spec: spec,
Status: &models.EndpointStatus{
// FIXME GH-3280 When we begin implementing revision numbers this will
// diverge from models.Endpoint.Spec to reflect the in-datapath config
Realized: spec,
Identity: e.SecurityIdentity.GetModel(),
Labels: lblMdl,
Networking: &models.EndpointNetworking{
Addressing: []*models.AddressPair{{
IPV4: e.IPv4.String(),
IPV6: e.IPv6.String(),
}},
InterfaceIndex: int64(e.IfIndex),
InterfaceName: e.IfName,
Mac: e.LXCMAC.String(),
HostMac: e.NodeMAC.String(),
},
ExternalIdentifiers: &models.EndpointIdentifiers{
ContainerID: e.ContainerID,
ContainerName: e.ContainerName,
DockerEndpointID: e.DockerEndpointID,
DockerNetworkID: e.DockerNetworkID,
PodName: e.GetK8sNamespaceAndPodNameLocked(),
},
// FIXME GH-3280 When we begin returning endpoint revisions this should
// change to return the configured and in-datapath policies.
Policy: e.GetPolicyModel(),
Log: statusLog,
Controllers: controllerMdl,
State: currentState, // TODO: Validate
Health: e.getHealthModel(),
},
}
return mdl
}
// GetHealthModel returns the endpoint's health object.
//
// Must be called with e.Mutex locked.
func (e *Endpoint) getHealthModel() *models.EndpointHealth {
// Duplicated from GetModelRLocked.
currentState := models.EndpointState(e.state)
if currentState == models.EndpointStateReady && e.Status.CurrentStatus() != OK {
currentState = models.EndpointStateNotReady
}
h := models.EndpointHealth{
Bpf: models.EndpointHealthStatusDisabled,
Policy: models.EndpointHealthStatusDisabled,
Connected: false,
OverallHealth: models.EndpointHealthStatusDisabled,
}
switch currentState {
case models.EndpointStateRegenerating, models.EndpointStateWaitingToRegenerate, models.EndpointStateDisconnecting:
h = models.EndpointHealth{
Bpf: models.EndpointHealthStatusPending,
Policy: models.EndpointHealthStatusPending,
Connected: true,
OverallHealth: models.EndpointHealthStatusPending,
}
case models.EndpointStateCreating:
h = models.EndpointHealth{
Bpf: models.EndpointHealthStatusBootstrap,
Policy: models.EndpointHealthStatusDisabled,
Connected: true,
OverallHealth: models.EndpointHealthStatusDisabled,
}
case models.EndpointStateWaitingForIdentity:
h = models.EndpointHealth{
Bpf: models.EndpointHealthStatusDisabled,
Policy: models.EndpointHealthStatusBootstrap,
Connected: true,
OverallHealth: models.EndpointHealthStatusDisabled,
}
case models.EndpointStateNotReady:
h = models.EndpointHealth{
Bpf: models.EndpointHealthStatusWarning,
Policy: models.EndpointHealthStatusWarning,
Connected: true,
OverallHealth: models.EndpointHealthStatusWarning,
}
case models.EndpointStateDisconnected:
h = models.EndpointHealth{
Bpf: models.EndpointHealthStatusDisabled,
Policy: models.EndpointHealthStatusDisabled,
Connected: false,
OverallHealth: models.EndpointHealthStatusDisabled,
}
case models.EndpointStateReady:
h = models.EndpointHealth{
Bpf: models.EndpointHealthStatusOK,
Policy: models.EndpointHealthStatusOK,
Connected: true,
OverallHealth: models.EndpointHealthStatusOK,
}
}
return &h
}
// GetHealthModel returns the endpoint's health object.
func (e *Endpoint) GetHealthModel() *models.EndpointHealth {
// NOTE: Using rlock on mutex directly because getHealthModel handles removed endpoint properly
e.mutex.RLock()
defer e.mutex.RUnlock()
return e.getHealthModel()
}
// GetModel returns the API model of endpoint e.
func (e *Endpoint) GetModel() *models.Endpoint {
if e == nil {
return nil
}
// NOTE: Using rlock on mutex directly because GetModelRLocked handles removed endpoint properly
e.mutex.RLock()
defer e.mutex.RUnlock()
return e.GetModelRLocked()
}
// GetPolicyModel returns the endpoint's policy as an API model.
//
// Must be called with e.Mutex locked.
func (e *Endpoint) GetPolicyModel() *models.EndpointPolicyStatus {
if e == nil {
return nil
}
if e.SecurityIdentity == nil {
return nil
}
realizedIngressIdentities := make([]int64, 0)
realizedEgressIdentities := make([]int64, 0)
for policyMapKey := range e.realizedMapState {
if policyMapKey.DestPort != 0 {
// If the port is non-zero, then the PolicyKey no longer only applies
// at L3. AllowedIngressIdentities and AllowedEgressIdentities
// contain sets of which identities (i.e., label-based L3 only)
// are allowed, so anything which contains L4-related policy should
// not be added to these sets.
continue
}
switch policymap.TrafficDirection(policyMapKey.TrafficDirection) {
case policymap.Ingress:
realizedIngressIdentities = append(realizedIngressIdentities, int64(policyMapKey.Identity))
case policymap.Egress:
realizedEgressIdentities = append(realizedEgressIdentities, int64(policyMapKey.Identity))
default:
log.WithField(logfields.TrafficDirection, policymap.TrafficDirection(policyMapKey.TrafficDirection)).Error("Unexpected traffic direction present in realized PolicyMap state for endpoint")
}
}
desiredIngressIdentities := make([]int64, 0)
desiredEgressIdentities := make([]int64, 0)
for policyMapKey := range e.desiredMapState {
if policyMapKey.DestPort != 0 {
// If the port is non-zero, then the PolicyKey no longer only applies
// at L3. AllowedIngressIdentities and AllowedEgressIdentities
// contain sets of which identities (i.e., label-based L3 only)
// are allowed, so anything which contains L4-related policy should
// not be added to these sets.
continue
}
switch policymap.TrafficDirection(policyMapKey.TrafficDirection) {
case policymap.Ingress:
desiredIngressIdentities = append(desiredIngressIdentities, int64(policyMapKey.Identity))
case policymap.Egress:
desiredEgressIdentities = append(desiredEgressIdentities, int64(policyMapKey.Identity))
default:
log.WithField(logfields.TrafficDirection, policymap.TrafficDirection(policyMapKey.TrafficDirection)).Error("Unexpected traffic direction present in desired PolicyMap state for endpoint")
}
}
policyEnabled := e.policyStatus()
// Make a shallow copy of the stats.
e.proxyStatisticsMutex.RLock()
proxyStats := make([]*models.ProxyStatistics, 0, len(e.proxyStatistics))
for _, stats := range e.proxyStatistics {
statsCopy := *stats
proxyStats = append(proxyStats, &statsCopy)
}
e.proxyStatisticsMutex.RUnlock()
sortProxyStats(proxyStats)
mdl := &models.EndpointPolicy{
ID: int64(e.SecurityIdentity.ID),
// This field should be removed.
Build: int64(e.policyRevision),
PolicyRevision: int64(e.policyRevision),
AllowedIngressIdentities: realizedIngressIdentities,
AllowedEgressIdentities: realizedEgressIdentities,
CidrPolicy: e.L3Policy.GetModel(),
L4: e.RealizedL4Policy.GetModel(),
PolicyEnabled: policyEnabled,
}
desiredMdl := &models.EndpointPolicy{
ID: int64(e.SecurityIdentity.ID),
// This field should be removed.
Build: int64(e.nextPolicyRevision),
PolicyRevision: int64(e.nextPolicyRevision),
AllowedIngressIdentities: desiredIngressIdentities,
AllowedEgressIdentities: desiredEgressIdentities,
CidrPolicy: e.L3Policy.GetModel(),
L4: e.DesiredL4Policy.GetModel(),
PolicyEnabled: policyEnabled,
}
// FIXME GH-3280 Once we start returning revisions Realized should be the
// policy implemented in the data path
return &models.EndpointPolicyStatus{
Spec: desiredMdl,
Realized: mdl,
ProxyPolicyRevision: int64(e.proxyPolicyRevision),
ProxyStatistics: proxyStats,
}
}
// policyStatus returns the endpoint's policy status
//
// Must be called with e.Mutex locked.
func (e *Endpoint) policyStatus() models.EndpointPolicyEnabled {
policyEnabled := models.EndpointPolicyEnabledNone
switch {
case e.ingressPolicyEnabled && e.egressPolicyEnabled:
policyEnabled = models.EndpointPolicyEnabledBoth
case e.ingressPolicyEnabled:
policyEnabled = models.EndpointPolicyEnabledIngress
case e.egressPolicyEnabled:
policyEnabled = models.EndpointPolicyEnabledEgress
}
return policyEnabled
}
// GetID returns the endpoint's ID
func (e *Endpoint) GetID() uint64 {
return uint64(e.ID)
}
// GetLabels returns the labels as slice
func (e *Endpoint) GetLabels() []string {
if e.SecurityIdentity == nil {
return []string{}
}
return e.SecurityIdentity.Labels.GetModel()
}
// GetK8sPodLabels returns all labels that exist in the endpoint and were
// derived from k8s pod.
func (e *Endpoint) GetK8sPodLabels() pkgLabels.Labels {
e.UnconditionalRLock()
defer e.RUnlock()
allLabels := e.OpLabels.AllLabels()
if allLabels == nil {
return nil
}
allLabelsFromK8s := allLabels.GetFromSource(pkgLabels.LabelSourceK8s)
k8sEPPodLabels := pkgLabels.Labels{}
for k, v := range allLabelsFromK8s {
if !strings.HasPrefix(v.Key, ciliumio.PodNamespaceMetaLabels) &&
!strings.HasPrefix(v.Key, ciliumio.PolicyLabelServiceAccount) &&
!strings.HasPrefix(v.Key, ciliumio.PodNamespaceLabel) {
k8sEPPodLabels[k] = v
}
}
return k8sEPPodLabels
}
// GetLabelsSHA returns the SHA of labels
func (e *Endpoint) GetLabelsSHA() string {
if e.SecurityIdentity == nil {
return ""
}
return e.SecurityIdentity.GetLabelsSHA256()
}
// GetOpLabels returns the labels as slice
func (e *Endpoint) GetOpLabels() []string {
e.UnconditionalRLock()
defer e.RUnlock()
return e.OpLabels.IdentityLabels().GetModel()
}
// GetIPv4Address returns the IPv4 address of the endpoint
func (e *Endpoint) GetIPv4Address() string {
return e.IPv4.String()
}
// GetIPv6Address returns the IPv6 address of the endpoint
func (e *Endpoint) GetIPv6Address() string {
return e.IPv6.String()
}
func (e *Endpoint) HasSidecarProxy() bool {
return e.hasSidecarProxy
}
// statusLogMsg represents a log message.
type statusLogMsg struct {
Status Status `json:"status"`
Timestamp time.Time `json:"timestamp"`
}
// statusLog represents a slice of statusLogMsg.
type statusLog []*statusLogMsg
// componentStatus represents a map of a single statusLogMsg by StatusType.
type componentStatus map[StatusType]*statusLogMsg
// contains checks if the given `s` statusLogMsg is present in the
// priorityStatus.
func (ps componentStatus) contains(s *statusLogMsg) bool {
return ps[s.Status.Type] == s
}
// statusTypeSlice represents a slice of StatusType, is used for sorting
// purposes.
type statusTypeSlice []StatusType
// Len returns the length of the slice.
func (p statusTypeSlice) Len() int { return len(p) }
// Less returns true if the element `j` is less than element `i`.
// *It's reversed* so that we can sort the slice by high to lowest priority.
func (p statusTypeSlice) Less(i, j int) bool { return p[i] > p[j] }
// Swap swaps element in `i` with element in `j`.
func (p statusTypeSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// sortByPriority returns a statusLog ordered from highest priority to lowest.
func (ps componentStatus) sortByPriority() statusLog {
prs := statusTypeSlice{}
for k := range ps {
prs = append(prs, k)
}
sort.Sort(prs)
slogSorted := statusLog{}
for _, pr := range prs {
slogSorted = append(slogSorted, ps[pr])
}
return slogSorted
}
// EndpointStatus represents the endpoint status.
type EndpointStatus struct {
// CurrentStatuses is the last status of a given priority.
CurrentStatuses componentStatus `json:"current-status,omitempty"`
// Contains the last maxLogs messages for this endpoint.
Log statusLog `json:"log,omitempty"`
// Index is the index in the statusLog, is used to keep track the next
// available position to write a new log message.
Index int `json:"index"`
// indexMU is the Mutex for the CurrentStatus and Log RW operations.
indexMU lock.RWMutex
}
func NewEndpointStatus() *EndpointStatus {
return &EndpointStatus{
CurrentStatuses: componentStatus{},
Log: statusLog{},
}
}
func (e *EndpointStatus) lastIndex() int {
lastIndex := e.Index - 1
if lastIndex < 0 {
return maxLogs - 1
}
return lastIndex
}
// getAndIncIdx returns current free slot index and increments the index to the
// next index that can be overwritten.
func (e *EndpointStatus) getAndIncIdx() int {
idx := e.Index
e.Index++
if e.Index >= maxLogs {
e.Index = 0
}
// Lets skip the CurrentStatus message from the log to prevent removing
// non-OK status!
if e.Index < len(e.Log) &&
e.CurrentStatuses.contains(e.Log[e.Index]) &&
e.Log[e.Index].Status.Code != OK {
e.Index++
if e.Index >= maxLogs {
e.Index = 0
}
}
return idx
}
// addStatusLog adds statusLogMsg to endpoint log.
// example of e.Log's contents where maxLogs = 3 and Index = 0
// [index] - Priority - Code
// [0] - BPF - OK
// [1] - Policy - Failure
// [2] - BPF - OK
// With this log, the CurrentStatus will keep [1] for Policy priority and [2]
// for BPF priority.
//
// Whenever a new statusLogMsg is received, that log will be kept in the
// CurrentStatus map for the statusLogMsg's priority.
// The CurrentStatus map, ensures non of the failure messages are deleted for
// higher priority messages and vice versa.
func (e *EndpointStatus) addStatusLog(s *statusLogMsg) {
e.CurrentStatuses[s.Status.Type] = s
idx := e.getAndIncIdx()
if len(e.Log) < maxLogs {
e.Log = append(e.Log, s)
} else {
e.Log[idx] = s
}
}
func (e *EndpointStatus) GetModel() []*models.EndpointStatusChange {
e.indexMU.RLock()
defer e.indexMU.RUnlock()
list := []*models.EndpointStatusChange{}
for i := e.lastIndex(); ; i-- {
if i < 0 {
i = maxLogs - 1
}
if i < len(e.Log) && e.Log[i] != nil {
list = append(list, &models.EndpointStatusChange{
Timestamp: e.Log[i].Timestamp.Format(time.RFC3339),
Code: e.Log[i].Status.Code.String(),
Message: e.Log[i].Status.Msg,
State: models.EndpointState(e.Log[i].Status.State),
})
}
if i == e.Index {
break
}
}
return list
}
func (e *EndpointStatus) CurrentStatus() StatusCode {
e.indexMU.RLock()
defer e.indexMU.RUnlock()
sP := e.CurrentStatuses.sortByPriority()
for _, v := range sP {
if v.Status.Code != OK {
return v.Status.Code
}
}
return OK
}
func (e *EndpointStatus) String() string {
return e.CurrentStatus().String()
}
// StringID returns the endpoint's ID in a string.
func (e *Endpoint) StringID() string {
return strconv.Itoa(int(e.ID))
}
func (e *Endpoint) GetIdentity() identityPkg.NumericIdentity {
if e.SecurityIdentity != nil {
return e.SecurityIdentity.ID
}
return identityPkg.InvalidIdentity
}
func (e *Endpoint) Allows(id identityPkg.NumericIdentity) bool {
e.UnconditionalRLock()
defer e.RUnlock()
keyToLookup := policymap.PolicyKey{
Identity: uint32(id),
TrafficDirection: policymap.Ingress.Uint8(),
}
_, ok := e.desiredMapState[keyToLookup]
return ok
}
// String returns endpoint on a JSON format.
func (e *Endpoint) String() string {
e.UnconditionalRLock()
defer e.RUnlock()
b, err := json.MarshalIndent(e, "", " ")
if err != nil {
return err.Error()
}
return string(b)
}
// optionChanged is a callback used with pkg/option to apply the options to an
// endpoint. Not used for anything at the moment.
func optionChanged(key string, value option.OptionSetting, data interface{}) {
}
// applyOptsLocked applies the given options to the endpoint's options and
// returns true if there were any options changed.
func (e *Endpoint) applyOptsLocked(opts option.OptionMap) bool {
changed := e.Options.ApplyValidated(opts, optionChanged, e) > 0
_, exists := opts[option.Debug]
if exists && changed {
e.UpdateLogger(nil)
}
return changed
}
// ForcePolicyCompute marks the endpoint for forced bpf regeneration.
func (e *Endpoint) ForcePolicyCompute() {
e.forcePolicyCompute = true
}
func (e *Endpoint) SetDefaultOpts(opts *option.IntOptions) {
if e.Options == nil {
e.Options = option.NewIntOptions(&EndpointMutableOptionLibrary)
}
if e.Options.Library == nil {
e.Options.Library = &EndpointMutableOptionLibrary
}
if opts != nil {
epOptLib := option.GetEndpointMutableOptionLibrary()
for k := range epOptLib {
e.Options.SetValidated(k, opts.GetValue(k))
}
}
e.UpdateLogger(nil)
}
// ConntrackLocal determines whether this endpoint is currently using a local
// table to handle connection tracking (true), or the global table (false).
func (e *Endpoint) ConntrackLocal() bool {
e.UnconditionalRLock()
defer e.RUnlock()
return e.ConntrackLocalLocked()
}
// ConntrackLocalLocked is the same as ConntrackLocal, but assumes that the
// endpoint is already locked for reading.
func (e *Endpoint) ConntrackLocalLocked() bool {
if e.SecurityIdentity == nil || e.Options == nil ||
!e.Options.IsEnabled(option.ConntrackLocal) {
return false
}
return true
}
type orderEndpoint func(e1, e2 *models.Endpoint) bool
// OrderEndpointAsc orders the slice of Endpoint in ascending ID order.
func OrderEndpointAsc(eps []*models.Endpoint) {
ascPriority := func(e1, e2 *models.Endpoint) bool {
return e1.ID < e2.ID
}
orderEndpoint(ascPriority).sort(eps)
}
func (by orderEndpoint) sort(eps []*models.Endpoint) {
dS := &epSorter{
eps: eps,
by: by,
}
sort.Sort(dS)
}
type epSorter struct {
eps []*models.Endpoint
by func(e1, e2 *models.Endpoint) bool
}
func (epS *epSorter) Len() int {
return len(epS.eps)
}
func (epS *epSorter) Swap(i, j int) {
epS.eps[i], epS.eps[j] = epS.eps[j], epS.eps[i]
}
func (epS *epSorter) Less(i, j int) bool {
return epS.by(epS.eps[i], epS.eps[j])
}
// base64 returns the endpoint in a base64 format.
func (e *Endpoint) base64() (string, error) {
var (
jsonBytes []byte
err error
)
transformEndpointForDowngrade(e)
jsonBytes, err = json.Marshal(e)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(jsonBytes), nil
}
// parseBase64ToEndpoint parses the endpoint stored in the given base64 string.
func parseBase64ToEndpoint(str string, ep *Endpoint) error {
jsonBytes, err := base64.StdEncoding.DecodeString(str)
if err != nil {
return err
}
return json.Unmarshal(jsonBytes, ep)
}
// FilterEPDir returns a list of directories' names that possible belong to an endpoint.
func FilterEPDir(dirFiles []os.FileInfo) []string {
eptsID := []string{}
for _, file := range dirFiles {
if file.IsDir() {
_, err := strconv.ParseUint(file.Name(), 10, 16)
if err == nil || strings.HasSuffix(file.Name(), "_next_fail") {
eptsID = append(eptsID, file.Name())
}
}
}
return eptsID
}
// ParseEndpoint parses the given strEp which is in the form of:
// common.CiliumCHeaderPrefix + common.Version + ":" + endpointBase64
func ParseEndpoint(strEp string) (*Endpoint, error) {
// TODO: Provide a better mechanism to update from old version once we bump
// TODO: cilium version.
strEpSlice := strings.Split(strEp, ":")
if len(strEpSlice) != 2 {
return nil, fmt.Errorf("invalid format %q. Should contain a single ':'", strEp)
}
var ep Endpoint
if err := parseBase64ToEndpoint(strEpSlice[1], &ep); err != nil {
return nil, fmt.Errorf("failed to parse base64toendpoint: %s", err)
}
// We need to check for nil in Status, CurrentStatuses and Log, since in
// some use cases, status will be not nil and Cilium will eventually
// error/panic if CurrentStatus or Log are not initialized correctly.
// Reference issue GH-2477
if ep.Status == nil || ep.Status.CurrentStatuses == nil || ep.Status.Log == nil {
ep.Status = NewEndpointStatus()
}
ep.UpdateLogger(nil)
ep.SetStateLocked(StateRestoring, "Endpoint restoring")
return &ep, nil
}
func (e *Endpoint) RemoveFromGlobalPolicyMap() error {
gpm, err := policymap.OpenGlobalMap(e.PolicyGlobalMapPathLocked())
if err == nil {
// We need to remove ourselves from global map, so that
// resources (prog/map reference counts) can be released.
gpm.Delete(uint32(e.ID), policymap.AllPorts, u8proto.All, policymap.Ingress)
gpm.Delete(uint32(e.ID), policymap.AllPorts, u8proto.All, policymap.Egress)
gpm.Close()
}
return err
}
// GetBPFKeys returns all keys which should represent this endpoint in the BPF
// endpoints map
func (e *Endpoint) GetBPFKeys() []*lxcmap.EndpointKey {
key := lxcmap.NewEndpointKey(e.IPv6.IP())
if e.IPv4 != nil {
key4 := lxcmap.NewEndpointKey(e.IPv4.IP())
return []*lxcmap.EndpointKey{key, key4}
}
return []*lxcmap.EndpointKey{key}
}
// GetBPFValue returns the value which should represent this endpoint in the
// BPF endpoints map
func (e *Endpoint) GetBPFValue() (*lxcmap.EndpointInfo, error) {
mac, err := e.LXCMAC.Uint64()
if err != nil {
return nil, fmt.Errorf("invalid LXC MAC: %v", err)
}
nodeMAC, err := e.NodeMAC.Uint64()
if err != nil {
return nil, fmt.Errorf("invalid node MAC: %v", err)
}
info := &lxcmap.EndpointInfo{
IfIndex: uint32(e.IfIndex),
// Store security identity in network byte order so it can be
// written into the packet without an additional byte order
// conversion.
LxcID: e.ID,
MAC: lxcmap.MAC(mac),
NodeMAC: lxcmap.MAC(nodeMAC),
}
return info, nil
}
// mapPath returns the path to a map for endpoint ID.
func mapPath(mapname string, id int) string {
return bpf.MapPath(mapname + strconv.Itoa(id))
}
// PolicyMapPathLocked returns the path to the policy map of endpoint.
func (e *Endpoint) PolicyMapPathLocked() string {
return mapPath(policymap.MapName, int(e.ID))
}
// IPv6IngressMapPathLocked returns the path to policy map of endpoint.
func (e *Endpoint) IPv6IngressMapPathLocked() string {
return mapPath(cidrmap.MapName+"ingress6_", int(e.ID))
}
// IPv6EgressMapPathLocked returns the path to policy map of endpoint.
func (e *Endpoint) IPv6EgressMapPathLocked() string {
return mapPath(cidrmap.MapName+"egress6_", int(e.ID))
}
// IPv4IngressMapPathLocked returns the path to policy map of endpoint.
func (e *Endpoint) IPv4IngressMapPathLocked() string {
return mapPath(cidrmap.MapName+"ingress4_", int(e.ID))
}
// IPv4EgressMapPathLocked returns the path to policy map of endpoint.
func (e *Endpoint) IPv4EgressMapPathLocked() string {
return mapPath(cidrmap.MapName+"egress4_", int(e.ID))
}
// PolicyGlobalMapPathLocked returns the path to the global policy map.
func (e *Endpoint) PolicyGlobalMapPathLocked() string {
return bpf.MapPath(PolicyGlobalMapName)
}
func CallsMapPath(id int) string {
return bpf.MapPath(CallsMapName + strconv.Itoa(id))
}
// CallsMapPathLocked returns the path to cilium tail calls map of an endpoint.
func (e *Endpoint) CallsMapPathLocked() string {
return CallsMapPath(int(e.ID))
}
func (e *Endpoint) LogStatus(typ StatusType, code StatusCode, msg string) {
e.UnconditionalLock()
defer e.Unlock()
// FIXME GH2323 instead of a mutex we could use a channel to send the status
// log message to a single writer?
e.logStatusLocked(typ, code, msg)
}
func (e *Endpoint) LogStatusOK(typ StatusType, msg string) {
e.LogStatus(typ, OK, msg)
}
// LogStatusOKLocked will log an OK message of the given status type with the
// given msg string.
// must be called with endpoint.Mutex held
func (e *Endpoint) LogStatusOKLocked(typ StatusType, msg string) {
e.logStatusLocked(typ, OK, msg)
}
// logStatusLocked logs a status message
// must be called with endpoint.Mutex held
func (e *Endpoint) logStatusLocked(typ StatusType, code StatusCode, msg string) {
e.Status.indexMU.Lock()
defer e.Status.indexMU.Unlock()
sts := &statusLogMsg{
Status: Status{
Code: code,
Msg: msg,
Type: typ,
State: e.state,
},
Timestamp: time.Now().UTC(),
}
e.Status.addStatusLog(sts)
e.Logger().WithFields(logrus.Fields{
"code": sts.Status.Code,
"type": sts.Status.Type,
logfields.EndpointState: sts.Status.State,
logfields.PolicyRevision: e.policyRevision,
}).Debug(msg)
}
type UpdateValidationError struct {
msg string
}
func (e UpdateValidationError) Error() string { return e.msg }
type UpdateCompilationError struct {
msg string
}
func (e UpdateCompilationError) Error() string { return e.msg }
// UpdateStateChangeError is an error that indicates that updating the state
// of an endpoint was unsuccessful.
// Implements error interface.
type UpdateStateChangeError struct {
msg string
}
func (e UpdateStateChangeError) Error() string { return e.msg }
// Update modifies the endpoint options and *always* tries to regenerate the
// endpoint's program. Returns an error if the provided options are not valid,
// if there was an issue triggering policy updates for the given endpoint,
// or if endpoint regeneration was unable to be triggered. Note that the
// LabelConfiguration in the EndpointConfigurationSpec is *not* consumed here.
func (e *Endpoint) Update(owner Owner, cfg *models.EndpointConfigurationSpec) error {
om, err := EndpointMutableOptionLibrary.ValidateConfigurationMap(cfg.Options)
if err != nil {
return UpdateValidationError{err.Error()}
}
if err := e.LockAlive(); err != nil {
return err
}
e.Logger().WithField("configuration-options", cfg).Debug("updating endpoint configuration options")
// CurrentStatus will be not OK when we have an uncleared error in BPF,
// policy or Other. We should keep trying to regenerate in the hopes of
// suceeding.
// Note: This "retry" behaviour is better suited to a controller, and can be
// moved there once we have an endpoint regeneration controller.
needToRegenerateBPF := e.updateAndOverrideEndpointOptions(om) || (e.Status.CurrentStatus() != OK)
reason := "endpoint was updated via API"
// If configuration options are provided, we only regenerate if necessary.
// Otherwise always regenerate.
if cfg.Options == nil {
needToRegenerateBPF = true
reason = "endpoint was manually regenerated via API"
}
if needToRegenerateBPF {
e.Logger().Debug("need to regenerate endpoint; checking state before" +
" attempting to regenerate")
// TODO / FIXME: GH-3281: need ways to queue up regenerations per-endpoint.
// Default timeout for PATCH /endpoint/{id}/config is 60 seconds, so put
// timeout in this function a bit below that timeout. If the timeout
// for clients in API is below this value, they will get a message containing
// "context deadline exceeded".
timeout := time.After(EndpointGenerationTimeout)
// Check for endpoint state every second.
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
e.Unlock()
for {
select {
case <-ticker.C:
if err := e.LockAlive(); err != nil {
return err
}
// Check endpoint state before attempting configuration update because
// configuration updates can only be applied when the endpoint is in
// specific states. See GH-3058.
stateTransitionSucceeded := e.SetStateLocked(StateWaitingToRegenerate, reason)
if stateTransitionSucceeded {
e.Unlock()
e.Regenerate(owner, NewRegenerationContext(reason))
return nil
}
e.Unlock()
case <-timeout:
e.Logger().Warningf("timed out waiting for endpoint state to change")
return UpdateStateChangeError{fmt.Sprintf("unable to regenerate endpoint program because state transition to %s was unsuccessful; check `cilium endpoint log %d` for more information", StateWaitingToRegenerate, e.ID)}
}
}
}
e.Unlock()
return nil
}
// HasLabels returns whether endpoint e contains all labels l. Will return 'false'
// if any label in l is not in the endpoint's labels.
func (e *Endpoint) HasLabels(l pkgLabels.Labels) bool {
e.UnconditionalRLock()
defer e.RUnlock()
return e.hasLabelsRLocked(l)
}
// hasLabelsRLocked returns whether endpoint e contains all labels l. Will
// return 'false' if any label in l is not in the endpoint's labels.
// e.Mutex must be RLocked
func (e *Endpoint) hasLabelsRLocked(l pkgLabels.Labels) bool {
allEpLabels := e.OpLabels.AllLabels()
for _, v := range l {
found := false
for _, j := range allEpLabels {
if j.Equals(v) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
// replaceInformationLabels replaces the information labels of the endpoint.
// Passing a nil set of labels will not perform any action.
// Must be called with e.Mutex.Lock().
func (e *Endpoint) replaceInformationLabels(l pkgLabels.Labels) {
if l == nil {
return
}
e.OpLabels.OrchestrationInfo.MarkAllForDeletion()
scopedLog := e.Logger()
for _, v := range l {
if e.OpLabels.OrchestrationInfo.UpsertLabel(v) {
scopedLog.WithField(logfields.Labels, logfields.Repr(v)).Debug("Assigning information label")
}
}
e.OpLabels.OrchestrationInfo.DeleteMarked()
}
// replaceIdentityLabels replaces the identity labels of the endpoint. If a net
// changed occurred, the identityRevision is bumped and returned, otherwise 0 is
// returned.
// Passing a nil set of labels will not perform any action and will return the
// current endpoint's identityRevision.
// Must be called with e.Mutex.Lock().
func (e *Endpoint) replaceIdentityLabels(l pkgLabels.Labels) int {
if l == nil {
return e.identityRevision
}
changed := false
e.OpLabels.OrchestrationIdentity.MarkAllForDeletion()
e.OpLabels.Disabled.MarkAllForDeletion()
scopedLog := e.Logger()
for k, v := range l {
// A disabled identity label stays disabled without value updates
if e.OpLabels.Disabled[k] != nil {
e.OpLabels.Disabled[k].ClearDeletionMark()
} else if e.OpLabels.OrchestrationIdentity.UpsertLabel(v) {
scopedLog.WithField(logfields.Labels, logfields.Repr(v)).Debug("Assigning security relevant label")
changed = true
}
}
if e.OpLabels.OrchestrationIdentity.DeleteMarked() || e.OpLabels.Disabled.DeleteMarked() {
changed = true
}
rev := 0
if changed {
e.identityRevision++
rev = e.identityRevision
}
return rev
}
// LeaveLocked removes the endpoint's directory from the system. Must be called
// with Endpoint's mutex AND BuildMutex locked.
func (e *Endpoint) LeaveLocked(owner Owner, proxyWaitGroup *completion.WaitGroup) []error {
errors := []error{}
owner.RemoveFromEndpointQueue(uint64(e.ID))
if e.SecurityIdentity != nil && e.RealizedL4Policy != nil {
// Passing a new map of nil will purge all redirects
e.removeOldRedirects(owner, nil, proxyWaitGroup)
}
if e.PolicyMap != nil {
if err := e.PolicyMap.Close(); err != nil {
errors = append(errors, fmt.Errorf("unable to close policymap %s: %s", e.PolicyGlobalMapPathLocked(), err))
}
}
if e.SecurityIdentity != nil {
err := e.SecurityIdentity.Release()
if err != nil {
errors = append(errors, fmt.Errorf("unable to release identity: %s", err))
}
// TODO: Check if network policy was created even without SecurityIdentity
owner.RemoveNetworkPolicy(e)
e.SecurityIdentity = nil
}
e.removeDirectory()
e.removeFailedDirectory()
e.controllers.RemoveAll()
e.cleanPolicySignals()
e.scrubIPsInConntrackTableLocked()
e.SetStateLocked(StateDisconnected, "Endpoint removed")
endpointPolicyStatus.Remove(e.ID)
e.Logger().Info("Removed endpoint")
return errors
}
func (e *Endpoint) removeDirectory() {
os.RemoveAll(e.DirectoryPath())
}
func (e *Endpoint) removeFailedDirectory() {
os.RemoveAll(e.FailedDirectoryPath())
}
func (e *Endpoint) RemoveDirectory() {
e.UnconditionalLock()
defer e.Unlock()
e.removeDirectory()
}
// CreateDirectory creates endpoint directory
func (e *Endpoint) CreateDirectory() error {
if err := e.LockAlive(); err != nil {
return err
}
defer e.Unlock()
lxcDir := e.DirectoryPath()
if err := os.MkdirAll(lxcDir, 0777); err != nil {
return fmt.Errorf("unable to create endpoint directory: %s", err)
}
return nil
}
// RegenerateWait should only be called when endpoint's state has successfully
// been changed to "waiting-to-regenerate"
func (e *Endpoint) RegenerateWait(owner Owner, reason string) error {
if !<-e.Regenerate(owner, NewRegenerationContext(reason)) {
return fmt.Errorf("error while regenerating endpoint."+
" For more info run: 'cilium endpoint get %d'", e.ID)
}
return nil
}
// SetContainerName modifies the endpoint's container name
func (e *Endpoint) SetContainerName(name string) {
e.UnconditionalLock()
e.ContainerName = name
e.Unlock()
}
// GetK8sNamespace returns the name of the pod if the endpoint represents a
// Kubernetes pod
func (e *Endpoint) GetK8sNamespace() string {
e.UnconditionalRLock()
ns := e.k8sNamespace
e.RUnlock()
return ns
}
// SetK8sNamespace modifies the endpoint's pod name
func (e *Endpoint) SetK8sNamespace(name string) {
e.UnconditionalLock()
e.k8sNamespace = name
e.UpdateLogger(map[string]interface{}{
logfields.K8sPodName: e.GetK8sNamespaceAndPodNameLocked(),
})
e.Unlock()
}
// GetK8sPodName returns the name of the pod if the endpoint represents a
// Kubernetes pod
func (e *Endpoint) GetK8sPodName() string {
e.UnconditionalRLock()
k8sPodName := e.k8sPodName
e.RUnlock()
return k8sPodName
}
// GetK8sNamespaceAndPodNameLocked returns the namespace and pod name. This
// function requires e.Mutex to be held.
func (e *Endpoint) GetK8sNamespaceAndPodNameLocked() string {
return e.k8sNamespace + "/" + e.k8sPodName
}
// SetK8sPodName modifies the endpoint's pod name
func (e *Endpoint) SetK8sPodName(name string) {
e.UnconditionalLock()
e.k8sPodName = name
e.UpdateLogger(map[string]interface{}{
logfields.K8sPodName: e.GetK8sNamespaceAndPodNameLocked(),
})
e.Unlock()
}
// SetContainerID modifies the endpoint's container ID
func (e *Endpoint) SetContainerID(id string) {
e.UnconditionalLock()
e.ContainerID = id
e.UpdateLogger(map[string]interface{}{
logfields.ContainerID: e.getShortContainerID(),
})
e.Unlock()
}
// GetContainerID returns the endpoint's container ID
func (e *Endpoint) GetContainerID() string {
e.UnconditionalRLock()
cID := e.ContainerID
e.RUnlock()
return cID
}
// GetShortContainerID returns the endpoint's shortened container ID
func (e *Endpoint) GetShortContainerID() string {
e.UnconditionalRLock()
defer e.RUnlock()
return e.getShortContainerID()
}
func (e *Endpoint) getShortContainerID() string {
if e == nil {
return ""
}
caplen := 10
if len(e.ContainerID) <= caplen {
return e.ContainerID
}
return e.ContainerID[:caplen]
}
// SetDockerEndpointID modifies the endpoint's Docker Endpoint ID
func (e *Endpoint) SetDockerEndpointID(id string) {
e.UnconditionalLock()
e.DockerEndpointID = id
e.Unlock()
}
// SetDockerNetworkID modifies the endpoint's Docker Endpoint ID
func (e *Endpoint) SetDockerNetworkID(id string) {
e.UnconditionalLock()
e.DockerNetworkID = id
e.Unlock()
}
// GetDockerNetworkID returns the endpoint's Docker Endpoint ID
func (e *Endpoint) GetDockerNetworkID() string {
e.UnconditionalRLock()
defer e.RUnlock()
return e.DockerNetworkID
}
// GetState returns the endpoint's state
// endpoint.Mutex may only be.RLockAlive()ed
func (e *Endpoint) GetStateLocked() string {
return e.state
}
// GetState returns the endpoint's state
// endpoint.Mutex may only be.RLockAlive()ed
func (e *Endpoint) GetState() string {
e.UnconditionalRLock()
defer e.RUnlock()
return e.GetStateLocked()
}
// SetStateLocked modifies the endpoint's state
// endpoint.Mutex must be held
// Returns true only if endpoints state was changed as requested
func (e *Endpoint) SetStateLocked(toState, reason string) bool {
// Validate the state transition.
fromState := e.state
switch fromState { // From state
case "": // Special case for capturing initial state transitions like
// nil --> StateWaitingForIdentity, StateRestoring
switch toState {
case StateWaitingForIdentity, StateRestoring:
goto OKState
}
case StateCreating:
switch toState {
case StateDisconnecting, StateWaitingForIdentity, StateRestoring:
goto OKState
}
case StateWaitingForIdentity:
switch toState {
case StateReady, StateDisconnecting:
goto OKState
}
case StateReady:
switch toState {
case StateWaitingForIdentity, StateDisconnecting, StateWaitingToRegenerate, StateRestoring:
goto OKState
}
case StateDisconnecting:
switch toState {
case StateDisconnected:
goto OKState
}
case StateDisconnected:
// No valid transitions, as disconnected is a terminal state for the endpoint.
case StateWaitingToRegenerate:
switch toState {
// Note that transitions to waiting-to-regenerate state
case StateWaitingForIdentity, StateDisconnecting, StateRestoring:
goto OKState
}
case StateRegenerating:
switch toState {
// Even while the endpoint is regenerating it is
// possible that further changes require a new
// build. In this case the endpoint is transitioned
// from the regenerating state to
// waiting-for-identity or waiting-to-regenerate state.
case StateWaitingForIdentity, StateDisconnecting, StateWaitingToRegenerate, StateRestoring:
goto OKState
}
case StateRestoring:
switch toState {
case StateDisconnecting, StateWaitingToRegenerate, StateRestoring:
goto OKState
}
}
if toState != fromState {
_, fileName, fileLine, _ := runtime.Caller(1)
e.Logger().WithFields(logrus.Fields{
logfields.EndpointState + ".from": fromState,
logfields.EndpointState + ".to": toState,
"file": fileName,
"line": fileLine,
}).Info("Invalid state transition skipped")
}
e.logStatusLocked(Other, Warning, fmt.Sprintf("Skipped invalid state transition to %s due to: %s", toState, reason))
return false
OKState:
e.state = toState
e.logStatusLocked(Other, OK, reason)
// Initial state transitions i.e nil --> waiting-for-identity
// need to be handled correctly while updating metrics.
// Note that if we are transitioning from some state to restoring
// state, we cannot decrement the old state counters as they will not
// be accounted for in the metrics.
if fromState != "" && toState != StateRestoring {
metrics.EndpointStateCount.
WithLabelValues(fromState).Dec()
}
// Since StateDisconnected is the final state, after which the
// endpoint is gone, we should not increment metrics for this state.
if toState != "" && toState != StateDisconnected {
metrics.EndpointStateCount.
WithLabelValues(toState).Inc()
}
return true
}
// BuilderSetStateLocked modifies the endpoint's state
// endpoint.Mutex must be held
// endpoint BuildMutex must be held!
func (e *Endpoint) BuilderSetStateLocked(toState, reason string) bool {
// Validate the state transition.
fromState := e.state
switch fromState { // From state
case StateCreating, StateWaitingForIdentity, StateReady, StateDisconnecting, StateDisconnected:
// No valid transitions for the builder
case StateWaitingToRegenerate:
switch toState {
// Builder transitions the endpoint from
// waiting-to-regenerate state to regenerating state
// right after acquiring the endpoint lock, and while
// endpoint's build mutex is held. All changes to
// cilium and endpoint configuration, policy as well
// as the existing set of security identities will be
// reconsidered after this point, i.e., even if some
// of them are changed regeneration need not be queued
// if the endpoint is already in waiting-to-regenerate
// state.
case StateRegenerating:
goto OKState
}
case StateRegenerating:
switch toState {
// While still holding the build mutex, the builder
// tries to transition the endpoint to ready
// state. But since the endpoint mutex was released
// for the duration of the bpf generation, it is
// possible that another build request has been
// queued. In this case the endpoint has been
// transitioned to waiting-to-regenerate state
// already, and the transition to ready state is
// skipped.
case StateReady:
goto OKState
}
}
e.logStatusLocked(Other, Warning, fmt.Sprintf("Skipped invalid state transition to %s due to: %s", toState, reason))
return false
OKState:
e.state = toState
e.logStatusLocked(Other, OK, reason)
if fromState != "" && toState != StateRestoring {
metrics.EndpointStateCount.
WithLabelValues(fromState).Dec()
}
// Since StateDisconnected is the final state, after which the
// endpoint is gone, we should not increment metrics for this state.
if toState != "" && toState != StateDisconnected {
metrics.EndpointStateCount.
WithLabelValues(toState).Inc()
}
return true
}
// bumpPolicyRevisionLocked marks the endpoint to be running the next scheduled
// policy revision as setup by e.regenerate()
// endpoint.Mutex should held.
func (e *Endpoint) bumpPolicyRevisionLocked(revision uint64) {
if revision > e.policyRevision {
e.setPolicyRevision(revision)
}
}
// OnProxyPolicyUpdate is a callback used to update the Endpoint's
// proxyPolicyRevision when the specified revision has been applied in the
// proxy.
func (e *Endpoint) OnProxyPolicyUpdate(revision uint64) {
// NOTE: UnconditionalLock is used here because this callback has no way of reporting an error
e.UnconditionalLock()
if revision > e.proxyPolicyRevision {
e.proxyPolicyRevision = revision
}
e.Unlock()
}
// getProxyStatisticsLocked gets the ProxyStatistics for the flows with the
// given characteristics, or adds a new one and returns it.
// Must be called with e.proxyStatisticsMutex held.
func (e *Endpoint) getProxyStatisticsLocked(l7Protocol string, port uint16, ingress bool) *models.ProxyStatistics {
var location string
if ingress {
location = models.ProxyStatisticsLocationIngress
} else {
location = models.ProxyStatisticsLocationEgress
}
key := models.ProxyStatistics{
Location: location,
Port: int64(port),
Protocol: l7Protocol,
}
if e.proxyStatistics == nil {
e.proxyStatistics = make(map[models.ProxyStatistics]*models.ProxyStatistics)
}
proxyStats, ok := e.proxyStatistics[key]
if !ok {
keyCopy := key
proxyStats = &keyCopy
proxyStats.Statistics = &models.RequestResponseStatistics{
Requests: &models.MessageForwardingStatistics{},
Responses: &models.MessageForwardingStatistics{},
}
e.proxyStatistics[key] = proxyStats
}
return proxyStats
}
// UpdateProxyStatistics updates the Endpoint's proxy statistics to account
// for a new observed flow with the given characteristics.
func (e *Endpoint) UpdateProxyStatistics(l7Protocol string, port uint16, ingress, request bool, verdict accesslog.FlowVerdict) {
e.proxyStatisticsMutex.Lock()
defer e.proxyStatisticsMutex.Unlock()
proxyStats := e.getProxyStatisticsLocked(l7Protocol, port, ingress)
var stats *models.MessageForwardingStatistics
if request {
stats = proxyStats.Statistics.Requests
} else {
stats = proxyStats.Statistics.Responses
}
stats.Received++
metrics.ProxyReceived.Inc()
switch verdict {
case accesslog.VerdictForwarded:
stats.Forwarded++
metrics.ProxyForwarded.Inc()
case accesslog.VerdictDenied:
stats.Denied++
metrics.ProxyDenied.Inc()
case accesslog.VerdictError:
stats.Error++
metrics.ProxyParseErrors.Inc()
}
}
// APICanModify determines whether API requests from a user are allowed to
// modify this endpoint.
func APICanModify(e *Endpoint) error {
if e.IsInit() {
return nil
}
if lbls := e.OpLabels.OrchestrationIdentity.FindReserved(); lbls != nil {
return fmt.Errorf("Endpoint cannot be modified by API call")
}
return nil
}
func (e *Endpoint) getIDandLabels() string {
e.UnconditionalRLock()
defer e.RUnlock()
labels := ""
if e.SecurityIdentity != nil {
labels = e.SecurityIdentity.Labels.String()
}
return fmt.Sprintf("%d (%s)", e.ID, labels)
}
// ModifyIdentityLabels changes the custom and orchestration identity labels of an endpoint.
// Labels can be added or deleted. If a label change is performed, the
// endpoint will receive a new identity and will be regenerated. Both of these
// operations will happen in the background.
func (e *Endpoint) ModifyIdentityLabels(owner Owner, addLabels, delLabels pkgLabels.Labels) error {
if err := e.LockAlive(); err != nil {
return err
}
switch e.GetStateLocked() {
case StateDisconnected, StateDisconnecting:
return nil
}
newLabels := e.OpLabels.DeepCopy()
for k := range delLabels {
// The change request is accepted if the label is on
// any of the lists. If the label is already disabled,
// we will simply ignore that change.
if newLabels.Custom[k] == nil && newLabels.OrchestrationIdentity[k] == nil && newLabels.Disabled[k] == nil {
e.Unlock()
return fmt.Errorf("label %s not found", k)
}
if v := newLabels.OrchestrationIdentity[k]; v != nil {
delete(newLabels.OrchestrationIdentity, k)
newLabels.Disabled[k] = v
}
if newLabels.Custom[k] != nil {
delete(newLabels.Custom, k)
}
}
for k, v := range addLabels {
if newLabels.Disabled[k] != nil { // Restore label.
delete(newLabels.Disabled, k)
newLabels.OrchestrationIdentity[k] = v
} else if newLabels.OrchestrationIdentity[k] != nil { // Replace label's source and value.
newLabels.OrchestrationIdentity[k] = v
} else {
newLabels.Custom[k] = v
}
}
e.OpLabels = *newLabels
// Mark with StateWaitingForIdentity, it will be set to
// StateWaitingToRegenerate after the identity resolution has been
// completed
e.SetStateLocked(StateWaitingForIdentity, "Triggering identity resolution due to updated identity labels")
e.identityRevision++
rev := e.identityRevision
e.Unlock()
e.runLabelsResolver(owner, rev)
return nil
}
// IsInit returns true if the endpoint still hasn't received identity labels,
// i.e. has the special identity with label reserved:init.
func (e *Endpoint) IsInit() bool {
init := e.OpLabels.GetIdentityLabel(pkgLabels.IDNameInit)
return init != nil && init.Source == pkgLabels.LabelSourceReserved
}
// UpdateLabels is called to update the labels of an endpoint. Calls to this
// function do not necessarily mean that the labels actually changed. The
// container runtime layer will periodically synchronize labels.
//
// If a net label changed was performed, the endpoint will receive a new
// identity and will be regenerated. Both of these operations will happen in
// the background.
func (e *Endpoint) UpdateLabels(owner Owner, identityLabels, infoLabels pkgLabels.Labels) {
log.WithFields(logrus.Fields{
logfields.ContainerID: e.GetShortContainerID(),
logfields.EndpointID: e.StringID(),
logfields.IdentityLabels: identityLabels.String(),
logfields.InfoLabels: infoLabels.String(),
}).Debug("Refreshing labels of endpoint")
if err := e.LockAlive(); err != nil {
e.LogDisconnectedMutexAction(err, "when trying to refresh endpint labels")
return
}
e.replaceInformationLabels(infoLabels)
// replace identity labels and update the identity if labels have changed
rev := e.replaceIdentityLabels(identityLabels)
e.Unlock()
if rev != 0 {
e.runLabelsResolver(owner, rev)
}
}
func (e *Endpoint) identityResolutionIsObsolete(myChangeRev int) bool {
// If in disconnected state, skip as well as this operation is no
// longer required.
if e.state == StateDisconnected {
return true
}
// Check if the endpoint has since received a new identity revision, if
// so, abort as a new resolution routine will have been started.
if myChangeRev != e.identityRevision {
return true
}
return false
}
// Must be called with e.Mutex NOT held.
func (e *Endpoint) runLabelsResolver(owner Owner, myChangeRev int) {
if err := e.RLockAlive(); err != nil {
// If a labels update and an endpoint delete API request arrive
// in quick succession, this could occur; in that case, there's
// no point updating the controller.
e.Logger().WithError(err).Info("Cannot run labels resolver")
return
}
newLabels := e.OpLabels.IdentityLabels()
e.RUnlock()
scopedLog := e.Logger().WithField(logfields.IdentityLabels, newLabels)
// If we are certain we can resolve the identity without accessing the KV
// store, do it first synchronously right now. This can reduce the number
// of regenerations for the endpoint during its initialization.
if identityPkg.IdentityAllocationIsLocal(newLabels) {
scopedLog.Debug("Endpoint has reserved identity, changing synchronously")
err := e.identityLabelsChanged(owner, myChangeRev)
if err != nil {
scopedLog.WithError(err).Warn("Error changing endpoint identity")
}
}
ctrlName := fmt.Sprintf("resolve-identity-%d", e.ID)
e.controllers.UpdateController(ctrlName,
controller.ControllerParams{
DoFunc: func() error {
return e.identityLabelsChanged(owner, myChangeRev)
},
RunInterval: 5 * time.Minute,
},
)
}
func (e *Endpoint) identityLabelsChanged(owner Owner, myChangeRev int) error {
if err := e.RLockAlive(); err != nil {
return err
}
newLabels := e.OpLabels.IdentityLabels()
elog := e.Logger().WithFields(logrus.Fields{
logfields.EndpointID: e.ID,
logfields.IdentityLabels: newLabels,
})
// Since we unlocked the endpoint and re-locked, the label update may already be obsolete
if e.identityResolutionIsObsolete(myChangeRev) {
e.RUnlock()
elog.Debug("Endpoint identity has changed, aborting resolution routine in favour of new one")
return nil
}
if e.SecurityIdentity != nil && e.SecurityIdentity.Labels.Equals(newLabels) {
// Sets endpoint state to ready if was waiting for identity
if e.GetStateLocked() == StateWaitingForIdentity {
e.SetStateLocked(StateReady, "Set identity for this endpoint")
}
e.RUnlock()
elog.Debug("Endpoint labels unchanged, skipping resolution of identity")
return nil
}
// Unlock the endpoint mutex for the possibly long lasting kvstore operation
e.RUnlock()
elog.Debug("Resolving identity for labels")
identity, _, err := identityPkg.AllocateIdentity(newLabels)
if err != nil {
err = fmt.Errorf("unable to resolve identity: %s", err)
e.LogStatus(Other, Warning, fmt.Sprintf("%s (will retry)", err.Error()))
return err
}
if err := e.LockAlive(); err != nil {
return err
}
// Since we unlocked the endpoint and re-locked, the label update may already be obsolete
if e.identityResolutionIsObsolete(myChangeRev) {
e.Unlock()
err := identity.Release()
if err != nil {
// non fatal error as keys will expire after lease expires but log it
elog.WithFields(logrus.Fields{logfields.Identity: identity.ID}).
WithError(err).Warn("Unable to release newly allocated identity again")
}
return nil
}
// If endpoint has an old identity, defer release of it to the end of
// the function after the endpoint structured has been unlocked again
if e.SecurityIdentity != nil {
oldIdentity := e.SecurityIdentity
defer func() {
err := oldIdentity.Release()
if err != nil {
elog.WithFields(logrus.Fields{logfields.Identity: oldIdentity.ID}).
WithError(err).Warn("BUG: Unable to release old endpoint identity")
}
}()
}
elog.WithFields(logrus.Fields{logfields.Identity: identity.StringID()}).
Debug("Assigned new identity to endpoint")
e.SetIdentity(identity)
readyToRegenerate := e.SetStateLocked(StateWaitingToRegenerate, "Triggering regeneration due to new identity")
// Unconditionally force policy recomputation after a new identity has been
// assigned.
e.ForcePolicyCompute()
e.Unlock()
if readyToRegenerate {
e.Regenerate(owner, NewRegenerationContext("updated security labels"))
}
return nil
}
// setPolicyRevision sets the policy wantedRev with the given revision.
func (e *Endpoint) setPolicyRevision(rev uint64) {
e.policyRevision = rev
e.UpdateLogger(map[string]interface{}{
logfields.DatapathPolicyRevision: e.policyRevision,
})
for ps := range e.policyRevisionSignals {
select {
case <-ps.ctx.Done():
close(ps.ch)
delete(e.policyRevisionSignals, ps)
default:
if rev >= ps.wantedRev {
close(ps.ch)
delete(e.policyRevisionSignals, ps)
}
}
}
}
// cleanPolicySignals closes and removes all policy revision signals.
func (e *Endpoint) cleanPolicySignals() {
for w := range e.policyRevisionSignals {
close(w.ch)
}
e.policyRevisionSignals = map[policySignal]bool{}
}
// policySignal is used to mark when a wanted policy wantedRev is reached
type policySignal struct {
// wantedRev specifies which policy revision the signal wants.
wantedRev uint64
// ch is the channel that signalizes once the policy revision wanted is reached.
ch chan struct{}
// ctx is the context for the policy signal request.
ctx context.Context
}
// WaitForPolicyRevision returns a channel that is closed when one or more of
// the following conditions have met:
// - the endpoint is disconnected state
// - the endpoint's policy revision reaches the wanted revision
func (e *Endpoint) WaitForPolicyRevision(ctx context.Context, rev uint64) <-chan struct{} {
// NOTE: UnconditionalLock is used here because this method handles endpoint in disconnected state on its own
e.UnconditionalLock()
defer e.Unlock()
ch := make(chan struct{})
if e.policyRevision >= rev || e.state == StateDisconnected {
close(ch)
return ch
}
ps := policySignal{
wantedRev: rev,
ctx: ctx,
ch: ch,
}
if e.policyRevisionSignals == nil {
e.policyRevisionSignals = map[policySignal]bool{}
}
e.policyRevisionSignals[ps] = true
return ch
}
// IPs returns the slice of valid IPs for this endpoint.
func (e *Endpoint) IPs() []net.IP {
ips := []net.IP{}
if e.IPv4 != nil {
ips = append(ips, e.IPv4.IP())
}
if e.IPv6 != nil {
ips = append(ips, e.IPv6.IP())
}
return ips
}
// InsertEvent is called when the endpoint is inserted into the endpoint
// manager.
func (e *Endpoint) InsertEvent() {
e.Logger().Info("New endpoint")
}
// syncPolicyMap attempts to synchronize the PolicyMap for this endpoint to
// contain the set of PolicyKeys represented by the endpoint's desiredMapState.
// It checks the current contents of the endpoint's PolicyMap and deletes any
// PolicyKeys that are not present in the endpoint's desiredMapState. It then
// adds any keys that are not present in the map. When a key from desiredMapState
// is inserted successfully to the endpoint's BPF PolicyMap, it is added to the
// endpoint's realizedMapState field. Returns an error if the endpoint's BPF
// PolicyMap is unable to be dumped, or any update operation to the map fails.
// Must be called with e.Mutex locked.
func (e *Endpoint) syncPolicyMap() error {
if e.realizedMapState == nil {
e.realizedMapState = make(PolicyMapState)
}
if e.desiredMapState == nil {
e.desiredMapState = make(PolicyMapState)
}
if e.PolicyMap == nil {
return fmt.Errorf("not syncing PolicyMap state for endpoint because PolicyMap is nil")
}
currentMapContents, err := e.PolicyMap.DumpToSlice()
// If map is unable to be dumped, attempt to close map and open it again.
// See GH-4229.
if err != nil {
e.Logger().WithError(err).Error("unable to dump PolicyMap when trying to sync desired and realized PolicyMap state")
// Close to avoid leaking of file descriptors, but still continue in case
// Close() does not succeed, because otherwise the map will never be
// opened again unless the agent is restarted.
err := e.PolicyMap.Close()
if err != nil {
e.Logger().WithError(err).Error("unable to close PolicyMap which was not able to be dumped")
}
e.PolicyMap, _, err = policymap.OpenMap(e.PolicyMapPathLocked())
if err != nil {
return fmt.Errorf("unable to open PolicyMap for endpoint: %s", err)
}
// Try to dump again, fail if error occurs.
currentMapContents, err = e.PolicyMap.DumpToSlice()
if err != nil {
return err
}
}
errors := []error{}
for _, entry := range currentMapContents {
// Convert key to host-byte order for lookup in the desiredMapState.
keyHostOrder := entry.Key.ToHost()
// If key that is in policy map is not in desired state, just remove it.
if _, ok := e.desiredMapState[keyHostOrder]; !ok {
// Can pass key with host byte-order fields, as it will get
// converted to network byte-order.
err := e.PolicyMap.DeleteKey(keyHostOrder)
if err != nil {
e.Logger().WithError(err).Errorf("Failed to delete PolicyMap key %s", entry.Key.String())
errors = append(errors, err)
} else {
// Operation was successful, remove from realized state.
delete(e.realizedMapState, keyHostOrder)
}
}
}
for keyToAdd, entry := range e.desiredMapState {
if oldEntry, ok := e.realizedMapState[keyToAdd]; !ok || oldEntry != entry {
err := e.PolicyMap.AllowKey(keyToAdd, entry.ProxyPort)
if err != nil {
e.Logger().WithError(err).Errorf("Failed to add PolicyMap key %s %d", keyToAdd.String(), entry.ProxyPort)
errors = append(errors, err)
} else {
// Operation was successful, add to realized state.
e.realizedMapState[keyToAdd] = entry
}
}
}
if len(errors) > 0 {
return fmt.Errorf("synchronizing desired PolicyMap state failed: %s", errors)
}
return nil
}
func (e *Endpoint) syncPolicyMapController() {
ctrlName := fmt.Sprintf("sync-policymap-%d", e.ID)
e.controllers.UpdateController(ctrlName,
controller.ControllerParams{
DoFunc: func() (reterr error) {
// Failure to lock is not an error, it means
// that the endpoint was disconnected and we
// should exit gracefully.
if err := e.LockAlive(); err != nil {
return nil
}
defer e.Unlock()
return e.syncPolicyMap()
},
RunInterval: 1 * time.Minute,
},
)
}
// IsDisconnecting returns true if the endpoint is being disconnected or
// already disconnected
//
// This function must be called after re-aquiring the endpoint mutex to verify
// that the endpoint has not been removed in the meantime.
//
// endpoint.mutex must be held in read mode at least
func (e *Endpoint) IsDisconnecting() bool {
return e.state == StateDisconnected || e.state == StateDisconnecting
}
// garbageCollectConntrack will run the ctmap.GC() on either the endpoint's
// local conntrack table or the global conntrack table.
//
// The endpoint lock must be held
func (e *Endpoint) garbageCollectConntrack(filter *ctmap.GCFilter) {
var maps []*ctmap.Map
ipv4 := !option.Config.IPv4Disabled
if e.ConntrackLocalLocked() {
maps = ctmap.LocalMaps(e, ipv4, true)
} else {
maps = ctmap.GlobalMaps(ipv4, true)
}
for _, m := range maps {
if err := m.Open(); err != nil {
filepath, err2 := m.Path()
if err2 != nil {
log.WithError(err2).Warn("Unable to get CT map path")
}
log.WithError(err).WithField(logfields.Path, filepath).Warn("Unable to open map")
continue
}
defer m.Close()
ctmap.GC(m, filter)
}
}
func (e *Endpoint) scrubIPsInConntrackTableLocked() {
e.garbageCollectConntrack(&ctmap.GCFilter{
MatchIPs: map[string]struct{}{
e.IPv4.String(): {},
e.IPv6.String(): {},
},
})
}
func (e *Endpoint) scrubIPsInConntrackTable() {
e.UnconditionalLock()
e.scrubIPsInConntrackTableLocked()
e.Unlock()
}
endpoint: Don't scrub local ct map on leave
The local ct map will be deleted soon after, so don't bother spending
the CPU cycles to dump and delete entries from the table.
Signed-off-by: Joe Stringer <16a9a54ddf4259952e3c118c763138e83693d7fd@covalent.io>
// Copyright 2016-2018 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package endpoint
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net"
"os"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"unsafe"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/common/addressing"
"github.com/cilium/cilium/pkg/bpf"
"github.com/cilium/cilium/pkg/completion"
"github.com/cilium/cilium/pkg/controller"
identityPkg "github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/k8s"
"github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
cilium_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
cilium_client_v2 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2"
pkgLabels "github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/mac"
"github.com/cilium/cilium/pkg/maps/cidrmap"
"github.com/cilium/cilium/pkg/maps/ctmap"
"github.com/cilium/cilium/pkg/maps/lxcmap"
"github.com/cilium/cilium/pkg/maps/policymap"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/monitor/notifications"
"github.com/cilium/cilium/pkg/node"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy"
"github.com/cilium/cilium/pkg/proxy/accesslog"
"github.com/cilium/cilium/pkg/u8proto"
"github.com/cilium/cilium/pkg/versioncheck"
go_version "github.com/hashicorp/go-version"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
"github.com/sirupsen/logrus"
)
const (
maxLogs = 256
)
var (
EndpointMutableOptionLibrary = option.GetEndpointMutableOptionLibrary()
// ciliumEPControllerLimit is the range of k8s versions with which we are
// willing to run the EndpointCRD controllers
ciliumEPControllerLimit = versioncheck.MustCompile("> 1.6")
// ciliumEndpointSyncControllerK8sClient is a k8s client shared by the
// RunK8sCiliumEndpointSync and RunK8sCiliumEndpointSyncGC. They obtain the
// controller via getCiliumClient and the sync.Once is used to avoid race.
ciliumEndpointSyncControllerOnce sync.Once
ciliumEndpointSyncControllerK8sClient clientset.Interface
// ciliumUpdateStatusVerConstr is the minimal version supported for
// to perform a CRD UpdateStatus.
ciliumUpdateStatusVerConstr = versioncheck.MustCompile(">= 1.11.0")
k8sServerVer *go_version.Version
)
// getCiliumClient builds and returns a k8s auto-generated client for cilium
// objects
func getCiliumClient() (ciliumClient cilium_client_v2.CiliumV2Interface, err error) {
// This allows us to reuse the k8s client
ciliumEndpointSyncControllerOnce.Do(func() {
var (
restConfig *rest.Config
k8sClient *clientset.Clientset
)
restConfig, err = k8s.CreateConfig()
if err != nil {
return
}
k8sClient, err = clientset.NewForConfig(restConfig)
if err != nil {
return
}
ciliumEndpointSyncControllerK8sClient = k8sClient
})
if err != nil {
return nil, err
}
// This guards against the situation where another invocation of this
// function (in another thread or previous in time) might have returned an
// error and not initialized ciliumEndpointSyncControllerK8sClient
if ciliumEndpointSyncControllerK8sClient == nil {
return nil, errors.New("No initialised k8s Cilium CRD client")
}
return ciliumEndpointSyncControllerK8sClient.CiliumV2(), nil
}
// RunK8sCiliumEndpointSyncGC starts the node-singleton sweeper for
// CiliumEndpoint objects where the managing node is no longer running. These
// objects are created by the sync-to-k8s-ciliumendpoint controller on each
// Endpoint.
// The general steps are:
// - get list of nodes
// - only run with probability 1/nodes
// - get list of CEPs
// - for each CEP
// delete CEP if the corresponding pod does not exist
// CiliumEndpoint objects have the same name as the pod they represent
func RunK8sCiliumEndpointSyncGC() {
var (
controllerName = fmt.Sprintf("sync-to-k8s-ciliumendpoint-gc (%v)", node.GetName())
scopedLog = log.WithField("controller", controllerName)
// random source to throttle how often this controller runs cluster-wide
runThrottler = rand.New(rand.NewSource(time.Now().UnixNano()))
)
// this is a sanity check
if !k8s.IsEnabled() {
scopedLog.WithField("name", controllerName).Warn("Not running controller because k8s is disabled")
return
}
sv, err := k8s.GetServerVersion()
if err != nil {
scopedLog.WithError(err).Error("unable to retrieve kubernetes serverversion")
return
}
if !ciliumEPControllerLimit.Check(sv) {
scopedLog.WithFields(logrus.Fields{
"expected": sv,
"found": ciliumEPControllerLimit,
}).Warn("cannot run with this k8s version")
return
}
ciliumClient, err := getCiliumClient()
if err != nil {
scopedLog.WithError(err).Error("Not starting controller because unable to get cilium k8s client")
return
}
k8sClient := k8s.Client()
// this dummy manager is needed only to add this controller to the global list
controller.NewManager().UpdateController(controllerName,
controller.ControllerParams{
RunInterval: 1 * time.Minute,
DoFunc: func() error {
// Don't run if there are no other known nodes
// Only run with a probability of 1/(number of nodes in cluster). This
// is because this controller runs on every node on the same interval
// but only one is neede to run.
nodes := node.GetNodes()
if len(nodes) <= 1 || runThrottler.Int63n(int64(len(nodes))) != 0 {
return nil
}
clusterPodSet := map[string]bool{}
clusterPods, err := k8sClient.CoreV1().Pods("").List(meta_v1.ListOptions{})
if err != nil {
return err
}
for _, pod := range clusterPods.Items {
podFullName := pod.Name + ":" + pod.Namespace
clusterPodSet[podFullName] = true
}
// "" is all-namespaces
ceps, err := ciliumClient.CiliumEndpoints(meta_v1.NamespaceAll).List(meta_v1.ListOptions{})
if err != nil {
scopedLog.WithError(err).Debug("Cannot list CEPs")
return err
}
for _, cep := range ceps.Items {
cepFullName := cep.Name + ":" + cep.Namespace
if _, found := clusterPodSet[cepFullName]; !found {
// delete
scopedLog = scopedLog.WithFields(logrus.Fields{
logfields.EndpointID: cep.Status.ID,
logfields.K8sPodName: cepFullName,
})
scopedLog.Debug("Orphaned CiliumEndpoint is being garbage collected")
if err := ciliumClient.CiliumEndpoints(cep.Namespace).Delete(cep.Name, &meta_v1.DeleteOptions{}); err != nil {
scopedLog.WithError(err).Debug("Unable to delete CEP")
return err
}
}
}
return nil
},
})
}
const (
// StateCreating is used to set the endpoint is being created.
StateCreating = string(models.EndpointStateCreating)
// StateWaitingForIdentity is used to set if the endpoint is waiting
// for an identity from the KVStore.
StateWaitingForIdentity = string(models.EndpointStateWaitingForIdentity)
// StateReady specifies if the endpoint is ready to be used.
StateReady = string(models.EndpointStateReady)
// StateWaitingToRegenerate specifies when the endpoint needs to be regenerated, but regeneration has not started yet.
StateWaitingToRegenerate = string(models.EndpointStateWaitingToRegenerate)
// StateRegenerating specifies when the endpoint is being regenerated.
StateRegenerating = string(models.EndpointStateRegenerating)
// StateDisconnecting indicates that the endpoint is being disconnected
StateDisconnecting = string(models.EndpointStateDisconnecting)
// StateDisconnected is used to set the endpoint is disconnected.
StateDisconnected = string(models.EndpointStateDisconnected)
// StateRestoring is used to set the endpoint is being restored.
StateRestoring = string(models.EndpointStateRestoring)
// CallsMapName specifies the base prefix for EP specific call map.
CallsMapName = "cilium_calls_"
// PolicyGlobalMapName specifies the global tail call map for EP handle_policy() lookup.
PolicyGlobalMapName = "cilium_policy"
// HealthCEPPrefix is the prefix used to name the cilium health endpoints' CEP
HealthCEPPrefix = "cilium-health-"
)
// compile time interface check
var _ notifications.RegenNotificationInfo = &Endpoint{}
// PolicyMapState is a state of a policy map.
type PolicyMapState map[policymap.PolicyKey]PolicyMapStateEntry
// PolicyMapStateEntry is the configuration associated with a PolicyKey in a
// PolicyMapState. This is a minimized version of policymap.PolicyEntry.
type PolicyMapStateEntry struct {
// The proxy port, in host byte order.
// If 0 (default), there is no proxy redirection for the corresponding
// PolicyKey.
ProxyPort uint16
}
// Endpoint represents a container or similar which can be individually
// addresses on L3 with its own IP addresses. This structured is managed by the
// endpoint manager in pkg/endpointmanager.
//
//
// WARNING - STABLE API
// This structure is written as JSON to StateDir/{ID}/lxc_config.h to allow to
// restore endpoints when the agent is being restarted. The restore operation
// will read the file and re-create all endpoints with all fields which are not
// marked as private to JSON marshal. Do NOT modify this structure in ways which
// is not JSON forward compatible.
//
type Endpoint struct {
// ID of the endpoint, unique in the scope of the node
ID uint16
// mutex protects write operations to this endpoint structure except
// for the logger field which has its own mutex
mutex lock.RWMutex
// ContainerName is the name given to the endpoint by the container runtime
ContainerName string
// ContainerID is the container ID that docker has assigned to the endpoint
// Note: The JSON tag was kept for backward compatibility.
ContainerID string `json:"dockerID,omitempty"`
// DockerNetworkID is the network ID of the libnetwork network if the
// endpoint is a docker managed container which uses libnetwork
DockerNetworkID string
// DockerEndpointID is the Docker network endpoint ID if managed by
// libnetwork
DockerEndpointID string
// IfName is the name of the host facing interface (veth pair) which
// connects into the endpoint
IfName string
// IfIndex is the interface index of the host face interface (veth pair)
IfIndex int
// OpLabels is the endpoint's label configuration
//
// FIXME: Rename this field to Labels
OpLabels pkgLabels.OpLabels
// identityRevision is incremented each time the identity label
// information of the endpoint has changed
identityRevision int
// LXCMAC is the MAC address of the endpoint
//
// FIXME: Rename this field to MAC
LXCMAC mac.MAC // Container MAC address.
// IPv6 is the IPv6 address of the endpoint
IPv6 addressing.CiliumIPv6
// IPv4 is the IPv4 address of the endpoint
IPv4 addressing.CiliumIPv4
// NodeMAC is the MAC of the node (agent). The MAC is different for every endpoint.
NodeMAC mac.MAC
// SecurityIdentity is the security identity of this endpoint. This is computed from
// the endpoint's labels.
SecurityIdentity *identityPkg.Identity `json:"SecLabel"`
// hasSidecarProxy indicates whether the endpoint has been injected by
// Istio with a Cilium-compatible sidecar proxy. If true, the sidecar proxy
// will be used to apply L7 policy rules. Otherwise, Cilium's node-wide
// proxy will be used.
// TODO: Currently this applies only to HTTP L7 rules. Kafka L7 rules are still enforced by Cilium's node-wide Kafka proxy.
hasSidecarProxy bool
// prevIdentityCache is the set of all security identities used in the
// previous policy computation
prevIdentityCache *identityPkg.IdentityCache
// RealizedL4Policy is the L4Policy in effect for the endpoint.
RealizedL4Policy *policy.L4Policy `json:"-"`
// DesiredL4Policy is the desired L4Policy for the endpoint. It is populated
// when the policy for this endpoint is generated.
DesiredL4Policy *policy.L4Policy `json:"-"`
// PolicyMap is the policy related state of the datapath including
// reference to all policy related BPF
PolicyMap *policymap.PolicyMap `json:"-"`
// CIDRPolicy is the CIDR based policy configuration of the endpoint.
L3Policy *policy.CIDRPolicy `json:"-"`
// Options determine the datapath configuration of the endpoint.
Options *option.IntOptions
// Status are the last n state transitions this endpoint went through
Status *EndpointStatus
// state is the state the endpoint is in. See SetStateLocked()
state string
// bpfHeaderfileHash is the hash of the last BPF headerfile that has been
// compiled and installed.
bpfHeaderfileHash string
k8sPodName string
k8sNamespace string
// policyRevision is the policy revision this endpoint is currently on
// to modify this field please use endpoint.setPolicyRevision instead
policyRevision uint64
// policyRevisionSignals contains a map of PolicyRevision signals that
// should be triggered once the policyRevision reaches the wanted wantedRev.
policyRevisionSignals map[policySignal]bool
// proxyPolicyRevision is the policy revision that has been applied to
// the proxy.
proxyPolicyRevision uint64
// proxyStatisticsMutex is the mutex that must be held to read or write
// proxyStatistics.
proxyStatisticsMutex lock.RWMutex
// proxyStatistics contains statistics of proxy redirects.
// They keys in this map are the ProxyStatistics with their
// AllocatedProxyPort and Statistics fields set to 0 and nil.
// You must hold Endpoint.proxyStatisticsMutex to read or write it.
proxyStatistics map[models.ProxyStatistics]*models.ProxyStatistics
// nextPolicyRevision is the policy revision that the endpoint has
// updated to and that will become effective with the next regenerate
nextPolicyRevision uint64
// forcePolicyCompute full endpoint policy recomputation
// Set when endpoint options have been changed. Cleared right before releasing the
// endpoint mutex after policy recalculation.
forcePolicyCompute bool
// BuildMutex synchronizes builds of individual endpoints and locks out
// deletion during builds
//
// FIXME: Mark private once endpoint deletion can be moved into
// `pkg/endpoint`
BuildMutex lock.Mutex `json:"-"`
// logger is a logrus object with fields set to report an endpoints information.
// You must hold Endpoint.Mutex to read or write it (but not to log with it).
logger unsafe.Pointer
// controllers is the list of async controllers syncing the endpoint to
// other resources
controllers controller.Manager
// realizedRedirects maps the ID of each proxy redirect that has been
// successfully added into a proxy for this endpoint, to the redirect's
// proxy port number.
// You must hold Endpoint.Mutex to read or write it.
realizedRedirects map[string]uint16
// realizedMapState maps each PolicyKey which is presently
// inserted (realized) in the endpoint's BPF PolicyMap to a proxy port.
// Proxy port 0 indicates no proxy redirection.
// All fields within the PolicyKey and the proxy port must be in host byte-order.
realizedMapState PolicyMapState
// desiredMapState maps each PolicyKeys which should be synched
// with, but may not yet be synched with, the endpoint's BPF PolicyMap, to
// a proxy port.
// This map is updated upon regeneration of policy for an endpoint.
// Proxy port 0 indicates no proxy redirection.
// All fields within the PolicyKey and the proxy port must be in host byte-order.
desiredMapState PolicyMapState
// ctCleaned indicates whether the conntrack table has already been
// cleaned when this endpoint was first created
ctCleaned bool
// ingressPolicyEnabled specifies whether policy enforcement on ingress
// is enabled for this endpoint.
ingressPolicyEnabled bool
// egressPolicyEnabled specifies whether policy enforcement on egress
// is enabled for this endpoint.
egressPolicyEnabled bool
///////////////////////
// DEPRECATED FIELDS //
///////////////////////
// DeprecatedOpts represents the mutable options for the endpoint, in
// the format understood by Cilium 1.1 or earlier.
//
// Deprecated: Use Options instead.
DeprecatedOpts deprecatedOptions `json:"Opts"`
}
// GetIngressPolicyEnabledLocked returns whether ingress policy enforcement is
// enabled for endpoint or not. The endpoint's mutex must be held.
func (e *Endpoint) GetIngressPolicyEnabledLocked() bool {
return e.ingressPolicyEnabled
}
// GetEgressPolicyEnabledLocked returns whether egress policy enforcement is
// enabled for endpoint or not. The endpoint's mutex must be held.
func (e *Endpoint) GetEgressPolicyEnabledLocked() bool {
return e.egressPolicyEnabled
}
// SetIngressPolicyEnabled sets Endpoint's ingress policy enforcement
// configuration to the specified value. The endpoint's mutex must not be held.
func (e *Endpoint) SetIngressPolicyEnabled(ingress bool) {
e.UnconditionalLock()
e.ingressPolicyEnabled = ingress
e.Unlock()
}
// SetEgressPolicyEnabled sets Endpoint's egress policy enforcement
// configuration to the specified value. The endpoint's mutex must not be held.
func (e *Endpoint) SetEgressPolicyEnabled(egress bool) {
e.UnconditionalLock()
e.egressPolicyEnabled = egress
e.Unlock()
}
// SetIngressPolicyEnabledLocked sets Endpoint's ingress policy enforcement
// configuration to the specified value. The endpoint's mutex must be held.
func (e *Endpoint) SetIngressPolicyEnabledLocked(ingress bool) {
e.ingressPolicyEnabled = ingress
}
// SetEgressPolicyEnabledLocked sets Endpoint's egress policy enforcement
// configuration to the specified value. The endpoint's mutex must be held.
func (e *Endpoint) SetEgressPolicyEnabledLocked(egress bool) {
e.egressPolicyEnabled = egress
}
// WaitForProxyCompletions blocks until all proxy changes have been completed.
// Called with BuildMutex held.
func (e *Endpoint) WaitForProxyCompletions(proxyWaitGroup *completion.WaitGroup) error {
if proxyWaitGroup == nil {
return nil
}
err := proxyWaitGroup.Context().Err()
if err != nil {
return fmt.Errorf("context cancelled before waiting for proxy updates: %s", err)
}
start := time.Now()
e.Logger().Debug("Waiting for proxy updates to complete...")
err = proxyWaitGroup.Wait()
if err != nil {
return fmt.Errorf("proxy state changes failed: %s", err)
}
e.Logger().Debug("Wait time for proxy updates: ", time.Since(start))
return nil
}
// RunK8sCiliumEndpointSync starts a controller that syncronizes the endpoint
// to the corresponding k8s CiliumEndpoint CRD
// CiliumEndpoint objects have the same name as the pod they represent
func (e *Endpoint) RunK8sCiliumEndpointSync() {
var (
endpointID = e.ID
controllerName = fmt.Sprintf("sync-to-k8s-ciliumendpoint (%v)", endpointID)
scopedLog = e.Logger().WithField("controller", controllerName)
err error
)
if !k8s.IsEnabled() {
scopedLog.Debug("Not starting controller because k8s is disabled")
return
}
k8sServerVer, err = k8s.GetServerVersion()
if err != nil {
scopedLog.WithError(err).Error("unable to retrieve kubernetes serverversion")
return
}
if !ciliumEPControllerLimit.Check(k8sServerVer) {
scopedLog.WithFields(logrus.Fields{
"expected": k8sServerVer,
"found": ciliumEPControllerLimit,
}).Warn("cannot run with this k8s version")
return
}
ciliumClient, err := getCiliumClient()
if err != nil {
scopedLog.WithError(err).Error("Not starting controller because unable to get cilium k8s client")
return
}
// The health endpoint doesn't really exist in k8s and updates to it caused
// arbitrary errors. Disable the controller for these endpoints.
if isHealthEP := e.HasLabels(pkgLabels.LabelHealth); isHealthEP {
scopedLog.Debug("Not starting unnecessary CEP controller for cilium-health endpoint")
return
}
var (
lastMdl *models.Endpoint
firstRun = true
)
// NOTE: The controller functions do NOT hold the endpoint locks
e.controllers.UpdateController(controllerName,
controller.ControllerParams{
RunInterval: 10 * time.Second,
DoFunc: func() (err error) {
// Update logger as scopeLog might not have the podName when it
// was created.
scopedLog = e.Logger().WithField("controller", controllerName)
podName := e.GetK8sPodName()
if podName == "" {
scopedLog.Debug("Skipping CiliumEndpoint update because it has no k8s pod name")
return nil
}
namespace := e.GetK8sNamespace()
if namespace == "" {
scopedLog.Debug("Skipping CiliumEndpoint update because it has no k8s namespace")
return nil
}
mdl := e.GetModel()
if reflect.DeepEqual(mdl, lastMdl) {
scopedLog.Debug("Skipping CiliumEndpoint update because it has not changed")
return nil
}
k8sMdl := (*cilium_v2.CiliumEndpointDetail)(mdl)
cep, err := ciliumClient.CiliumEndpoints(namespace).Get(podName, meta_v1.GetOptions{})
switch {
// The CEP doesn't exist. We will fall through to the create code below
case err != nil && k8serrors.IsNotFound(err):
break
// Delete the CEP on the first ever run. We will fall through to the create code below
case firstRun:
firstRun = false
scopedLog.Debug("Deleting CEP on first run")
err := ciliumClient.CiliumEndpoints(namespace).Delete(podName, &meta_v1.DeleteOptions{})
if err != nil {
scopedLog.WithError(err).Warn("Error deleting CEP")
return err
}
// Delete an invalid CEP. We will fall through to the create code below
case err != nil && k8serrors.IsInvalid(err):
scopedLog.WithError(err).Warn("Invalid CEP during update")
err := ciliumClient.CiliumEndpoints(namespace).Delete(podName, &meta_v1.DeleteOptions{})
if err != nil {
scopedLog.WithError(err).Warn("Error deleting invalid CEP during update")
return err
}
// A real error
case err != nil && !k8serrors.IsNotFound(err):
scopedLog.WithError(err).Error("Cannot get CEP for update")
return err
// do an update
case err == nil:
// Update the copy of the cep
k8sMdl.DeepCopyInto(&cep.Status)
var err2 error
switch {
case ciliumUpdateStatusVerConstr.Check(k8sServerVer):
_, err2 = ciliumClient.CiliumEndpoints(namespace).UpdateStatus(cep)
default:
_, err2 = ciliumClient.CiliumEndpoints(namespace).Update(cep)
}
if err2 != nil {
scopedLog.WithError(err2).Error("Cannot update CEP")
return err2
}
lastMdl = mdl
return nil
}
// The CEP was not found, this is the first creation of the endpoint
cep = &cilium_v2.CiliumEndpoint{
ObjectMeta: meta_v1.ObjectMeta{
Name: podName,
},
Status: *k8sMdl,
}
_, err = ciliumClient.CiliumEndpoints(namespace).Create(cep)
if err != nil {
scopedLog.WithError(err).Error("Cannot create CEP")
return err
}
return nil
},
StopFunc: func() error {
podName := e.GetK8sPodName()
namespace := e.GetK8sNamespace()
if err := ciliumClient.CiliumEndpoints(namespace).Delete(podName, &meta_v1.DeleteOptions{}); err != nil {
scopedLog.WithError(err).Error("Unable to delete CEP")
return err
}
return nil
},
})
}
// NewEndpointWithState creates a new endpoint useful for testing purposes
func NewEndpointWithState(ID uint16, state string) *Endpoint {
ep := &Endpoint{
ID: ID,
Options: option.NewIntOptions(&EndpointMutableOptionLibrary),
Status: NewEndpointStatus(),
state: state,
}
ep.UpdateLogger(nil)
return ep
}
// NewEndpointFromChangeModel creates a new endpoint from a request
func NewEndpointFromChangeModel(base *models.EndpointChangeRequest) (*Endpoint, error) {
if base == nil {
return nil, nil
}
ep := &Endpoint{
ID: uint16(base.ID),
ContainerName: base.ContainerName,
ContainerID: base.ContainerID,
DockerNetworkID: base.DockerNetworkID,
DockerEndpointID: base.DockerEndpointID,
IfName: base.InterfaceName,
IfIndex: int(base.InterfaceIndex),
OpLabels: pkgLabels.OpLabels{
Custom: pkgLabels.Labels{},
Disabled: pkgLabels.Labels{},
OrchestrationIdentity: pkgLabels.Labels{},
OrchestrationInfo: pkgLabels.Labels{},
},
state: "",
Status: NewEndpointStatus(),
}
ep.UpdateLogger(nil)
ep.SetStateLocked(string(base.State), "Endpoint creation")
if base.Mac != "" {
m, err := mac.ParseMAC(base.Mac)
if err != nil {
return nil, err
}
ep.LXCMAC = m
}
if base.HostMac != "" {
m, err := mac.ParseMAC(base.HostMac)
if err != nil {
return nil, err
}
ep.NodeMAC = m
}
if base.Addressing != nil {
if ip := base.Addressing.IPV6; ip != "" {
ip6, err := addressing.NewCiliumIPv6(ip)
if err != nil {
return nil, err
}
ep.IPv6 = ip6
}
if ip := base.Addressing.IPV4; ip != "" {
ip4, err := addressing.NewCiliumIPv4(ip)
if err != nil {
return nil, err
}
ep.IPv4 = ip4
}
}
return ep, nil
}
// GetModelRLocked returns the API model of endpoint e.
// e.mutex must be RLocked.
func (e *Endpoint) GetModelRLocked() *models.Endpoint {
if e == nil {
return nil
}
currentState := models.EndpointState(e.state)
if currentState == models.EndpointStateReady && e.Status.CurrentStatus() != OK {
currentState = models.EndpointStateNotReady
}
// This returns the most recent log entry for this endpoint. It is backwards
// compatible with the json from before we added `cilium endpoint log` but it
// only returns 1 entry.
statusLog := e.Status.GetModel()
if len(statusLog) > 0 {
statusLog = statusLog[:1]
}
lblSpec := &models.LabelConfigurationSpec{
User: e.OpLabels.Custom.GetModel(),
}
lblMdl := &models.LabelConfigurationStatus{
Realized: lblSpec,
SecurityRelevant: e.OpLabels.OrchestrationIdentity.GetModel(),
Derived: e.OpLabels.OrchestrationInfo.GetModel(),
Disabled: e.OpLabels.Disabled.GetModel(),
}
// Sort these slices since they come out in random orders. This allows
// reflect.DeepEqual to succeed.
sort.StringSlice(lblSpec.User).Sort()
sort.StringSlice(lblMdl.Disabled).Sort()
sort.StringSlice(lblMdl.SecurityRelevant).Sort()
sort.StringSlice(lblMdl.Derived).Sort()
controllerMdl := e.controllers.GetStatusModel()
sort.Slice(controllerMdl, func(i, j int) bool { return controllerMdl[i].Name < controllerMdl[j].Name })
spec := &models.EndpointConfigurationSpec{
LabelConfiguration: lblSpec,
Options: *e.Options.GetMutableModel(),
}
mdl := &models.Endpoint{
ID: int64(e.ID),
Spec: spec,
Status: &models.EndpointStatus{
// FIXME GH-3280 When we begin implementing revision numbers this will
// diverge from models.Endpoint.Spec to reflect the in-datapath config
Realized: spec,
Identity: e.SecurityIdentity.GetModel(),
Labels: lblMdl,
Networking: &models.EndpointNetworking{
Addressing: []*models.AddressPair{{
IPV4: e.IPv4.String(),
IPV6: e.IPv6.String(),
}},
InterfaceIndex: int64(e.IfIndex),
InterfaceName: e.IfName,
Mac: e.LXCMAC.String(),
HostMac: e.NodeMAC.String(),
},
ExternalIdentifiers: &models.EndpointIdentifiers{
ContainerID: e.ContainerID,
ContainerName: e.ContainerName,
DockerEndpointID: e.DockerEndpointID,
DockerNetworkID: e.DockerNetworkID,
PodName: e.GetK8sNamespaceAndPodNameLocked(),
},
// FIXME GH-3280 When we begin returning endpoint revisions this should
// change to return the configured and in-datapath policies.
Policy: e.GetPolicyModel(),
Log: statusLog,
Controllers: controllerMdl,
State: currentState, // TODO: Validate
Health: e.getHealthModel(),
},
}
return mdl
}
// GetHealthModel returns the endpoint's health object.
//
// Must be called with e.Mutex locked.
func (e *Endpoint) getHealthModel() *models.EndpointHealth {
// Duplicated from GetModelRLocked.
currentState := models.EndpointState(e.state)
if currentState == models.EndpointStateReady && e.Status.CurrentStatus() != OK {
currentState = models.EndpointStateNotReady
}
h := models.EndpointHealth{
Bpf: models.EndpointHealthStatusDisabled,
Policy: models.EndpointHealthStatusDisabled,
Connected: false,
OverallHealth: models.EndpointHealthStatusDisabled,
}
switch currentState {
case models.EndpointStateRegenerating, models.EndpointStateWaitingToRegenerate, models.EndpointStateDisconnecting:
h = models.EndpointHealth{
Bpf: models.EndpointHealthStatusPending,
Policy: models.EndpointHealthStatusPending,
Connected: true,
OverallHealth: models.EndpointHealthStatusPending,
}
case models.EndpointStateCreating:
h = models.EndpointHealth{
Bpf: models.EndpointHealthStatusBootstrap,
Policy: models.EndpointHealthStatusDisabled,
Connected: true,
OverallHealth: models.EndpointHealthStatusDisabled,
}
case models.EndpointStateWaitingForIdentity:
h = models.EndpointHealth{
Bpf: models.EndpointHealthStatusDisabled,
Policy: models.EndpointHealthStatusBootstrap,
Connected: true,
OverallHealth: models.EndpointHealthStatusDisabled,
}
case models.EndpointStateNotReady:
h = models.EndpointHealth{
Bpf: models.EndpointHealthStatusWarning,
Policy: models.EndpointHealthStatusWarning,
Connected: true,
OverallHealth: models.EndpointHealthStatusWarning,
}
case models.EndpointStateDisconnected:
h = models.EndpointHealth{
Bpf: models.EndpointHealthStatusDisabled,
Policy: models.EndpointHealthStatusDisabled,
Connected: false,
OverallHealth: models.EndpointHealthStatusDisabled,
}
case models.EndpointStateReady:
h = models.EndpointHealth{
Bpf: models.EndpointHealthStatusOK,
Policy: models.EndpointHealthStatusOK,
Connected: true,
OverallHealth: models.EndpointHealthStatusOK,
}
}
return &h
}
// GetHealthModel returns the endpoint's health object.
func (e *Endpoint) GetHealthModel() *models.EndpointHealth {
// NOTE: Using rlock on mutex directly because getHealthModel handles removed endpoint properly
e.mutex.RLock()
defer e.mutex.RUnlock()
return e.getHealthModel()
}
// GetModel returns the API model of endpoint e.
func (e *Endpoint) GetModel() *models.Endpoint {
if e == nil {
return nil
}
// NOTE: Using rlock on mutex directly because GetModelRLocked handles removed endpoint properly
e.mutex.RLock()
defer e.mutex.RUnlock()
return e.GetModelRLocked()
}
// GetPolicyModel returns the endpoint's policy as an API model.
//
// Must be called with e.Mutex locked.
func (e *Endpoint) GetPolicyModel() *models.EndpointPolicyStatus {
if e == nil {
return nil
}
if e.SecurityIdentity == nil {
return nil
}
realizedIngressIdentities := make([]int64, 0)
realizedEgressIdentities := make([]int64, 0)
for policyMapKey := range e.realizedMapState {
if policyMapKey.DestPort != 0 {
// If the port is non-zero, then the PolicyKey no longer only applies
// at L3. AllowedIngressIdentities and AllowedEgressIdentities
// contain sets of which identities (i.e., label-based L3 only)
// are allowed, so anything which contains L4-related policy should
// not be added to these sets.
continue
}
switch policymap.TrafficDirection(policyMapKey.TrafficDirection) {
case policymap.Ingress:
realizedIngressIdentities = append(realizedIngressIdentities, int64(policyMapKey.Identity))
case policymap.Egress:
realizedEgressIdentities = append(realizedEgressIdentities, int64(policyMapKey.Identity))
default:
log.WithField(logfields.TrafficDirection, policymap.TrafficDirection(policyMapKey.TrafficDirection)).Error("Unexpected traffic direction present in realized PolicyMap state for endpoint")
}
}
desiredIngressIdentities := make([]int64, 0)
desiredEgressIdentities := make([]int64, 0)
for policyMapKey := range e.desiredMapState {
if policyMapKey.DestPort != 0 {
// If the port is non-zero, then the PolicyKey no longer only applies
// at L3. AllowedIngressIdentities and AllowedEgressIdentities
// contain sets of which identities (i.e., label-based L3 only)
// are allowed, so anything which contains L4-related policy should
// not be added to these sets.
continue
}
switch policymap.TrafficDirection(policyMapKey.TrafficDirection) {
case policymap.Ingress:
desiredIngressIdentities = append(desiredIngressIdentities, int64(policyMapKey.Identity))
case policymap.Egress:
desiredEgressIdentities = append(desiredEgressIdentities, int64(policyMapKey.Identity))
default:
log.WithField(logfields.TrafficDirection, policymap.TrafficDirection(policyMapKey.TrafficDirection)).Error("Unexpected traffic direction present in desired PolicyMap state for endpoint")
}
}
policyEnabled := e.policyStatus()
// Make a shallow copy of the stats.
e.proxyStatisticsMutex.RLock()
proxyStats := make([]*models.ProxyStatistics, 0, len(e.proxyStatistics))
for _, stats := range e.proxyStatistics {
statsCopy := *stats
proxyStats = append(proxyStats, &statsCopy)
}
e.proxyStatisticsMutex.RUnlock()
sortProxyStats(proxyStats)
mdl := &models.EndpointPolicy{
ID: int64(e.SecurityIdentity.ID),
// This field should be removed.
Build: int64(e.policyRevision),
PolicyRevision: int64(e.policyRevision),
AllowedIngressIdentities: realizedIngressIdentities,
AllowedEgressIdentities: realizedEgressIdentities,
CidrPolicy: e.L3Policy.GetModel(),
L4: e.RealizedL4Policy.GetModel(),
PolicyEnabled: policyEnabled,
}
desiredMdl := &models.EndpointPolicy{
ID: int64(e.SecurityIdentity.ID),
// This field should be removed.
Build: int64(e.nextPolicyRevision),
PolicyRevision: int64(e.nextPolicyRevision),
AllowedIngressIdentities: desiredIngressIdentities,
AllowedEgressIdentities: desiredEgressIdentities,
CidrPolicy: e.L3Policy.GetModel(),
L4: e.DesiredL4Policy.GetModel(),
PolicyEnabled: policyEnabled,
}
// FIXME GH-3280 Once we start returning revisions Realized should be the
// policy implemented in the data path
return &models.EndpointPolicyStatus{
Spec: desiredMdl,
Realized: mdl,
ProxyPolicyRevision: int64(e.proxyPolicyRevision),
ProxyStatistics: proxyStats,
}
}
// policyStatus returns the endpoint's policy status
//
// Must be called with e.Mutex locked.
func (e *Endpoint) policyStatus() models.EndpointPolicyEnabled {
policyEnabled := models.EndpointPolicyEnabledNone
switch {
case e.ingressPolicyEnabled && e.egressPolicyEnabled:
policyEnabled = models.EndpointPolicyEnabledBoth
case e.ingressPolicyEnabled:
policyEnabled = models.EndpointPolicyEnabledIngress
case e.egressPolicyEnabled:
policyEnabled = models.EndpointPolicyEnabledEgress
}
return policyEnabled
}
// GetID returns the endpoint's ID
func (e *Endpoint) GetID() uint64 {
return uint64(e.ID)
}
// GetLabels returns the labels as slice
func (e *Endpoint) GetLabels() []string {
if e.SecurityIdentity == nil {
return []string{}
}
return e.SecurityIdentity.Labels.GetModel()
}
// GetK8sPodLabels returns all labels that exist in the endpoint and were
// derived from k8s pod.
func (e *Endpoint) GetK8sPodLabels() pkgLabels.Labels {
e.UnconditionalRLock()
defer e.RUnlock()
allLabels := e.OpLabels.AllLabels()
if allLabels == nil {
return nil
}
allLabelsFromK8s := allLabels.GetFromSource(pkgLabels.LabelSourceK8s)
k8sEPPodLabels := pkgLabels.Labels{}
for k, v := range allLabelsFromK8s {
if !strings.HasPrefix(v.Key, ciliumio.PodNamespaceMetaLabels) &&
!strings.HasPrefix(v.Key, ciliumio.PolicyLabelServiceAccount) &&
!strings.HasPrefix(v.Key, ciliumio.PodNamespaceLabel) {
k8sEPPodLabels[k] = v
}
}
return k8sEPPodLabels
}
// GetLabelsSHA returns the SHA of labels
func (e *Endpoint) GetLabelsSHA() string {
if e.SecurityIdentity == nil {
return ""
}
return e.SecurityIdentity.GetLabelsSHA256()
}
// GetOpLabels returns the labels as slice
func (e *Endpoint) GetOpLabels() []string {
e.UnconditionalRLock()
defer e.RUnlock()
return e.OpLabels.IdentityLabels().GetModel()
}
// GetIPv4Address returns the IPv4 address of the endpoint
func (e *Endpoint) GetIPv4Address() string {
return e.IPv4.String()
}
// GetIPv6Address returns the IPv6 address of the endpoint
func (e *Endpoint) GetIPv6Address() string {
return e.IPv6.String()
}
func (e *Endpoint) HasSidecarProxy() bool {
return e.hasSidecarProxy
}
// statusLogMsg represents a log message.
type statusLogMsg struct {
Status Status `json:"status"`
Timestamp time.Time `json:"timestamp"`
}
// statusLog represents a slice of statusLogMsg.
type statusLog []*statusLogMsg
// componentStatus represents a map of a single statusLogMsg by StatusType.
type componentStatus map[StatusType]*statusLogMsg
// contains checks if the given `s` statusLogMsg is present in the
// priorityStatus.
func (ps componentStatus) contains(s *statusLogMsg) bool {
return ps[s.Status.Type] == s
}
// statusTypeSlice represents a slice of StatusType, is used for sorting
// purposes.
type statusTypeSlice []StatusType
// Len returns the length of the slice.
func (p statusTypeSlice) Len() int { return len(p) }
// Less returns true if the element `j` is less than element `i`.
// *It's reversed* so that we can sort the slice by high to lowest priority.
func (p statusTypeSlice) Less(i, j int) bool { return p[i] > p[j] }
// Swap swaps element in `i` with element in `j`.
func (p statusTypeSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// sortByPriority returns a statusLog ordered from highest priority to lowest.
func (ps componentStatus) sortByPriority() statusLog {
prs := statusTypeSlice{}
for k := range ps {
prs = append(prs, k)
}
sort.Sort(prs)
slogSorted := statusLog{}
for _, pr := range prs {
slogSorted = append(slogSorted, ps[pr])
}
return slogSorted
}
// EndpointStatus represents the endpoint status.
type EndpointStatus struct {
// CurrentStatuses is the last status of a given priority.
CurrentStatuses componentStatus `json:"current-status,omitempty"`
// Contains the last maxLogs messages for this endpoint.
Log statusLog `json:"log,omitempty"`
// Index is the index in the statusLog, is used to keep track the next
// available position to write a new log message.
Index int `json:"index"`
// indexMU is the Mutex for the CurrentStatus and Log RW operations.
indexMU lock.RWMutex
}
func NewEndpointStatus() *EndpointStatus {
return &EndpointStatus{
CurrentStatuses: componentStatus{},
Log: statusLog{},
}
}
func (e *EndpointStatus) lastIndex() int {
lastIndex := e.Index - 1
if lastIndex < 0 {
return maxLogs - 1
}
return lastIndex
}
// getAndIncIdx returns current free slot index and increments the index to the
// next index that can be overwritten.
func (e *EndpointStatus) getAndIncIdx() int {
idx := e.Index
e.Index++
if e.Index >= maxLogs {
e.Index = 0
}
// Lets skip the CurrentStatus message from the log to prevent removing
// non-OK status!
if e.Index < len(e.Log) &&
e.CurrentStatuses.contains(e.Log[e.Index]) &&
e.Log[e.Index].Status.Code != OK {
e.Index++
if e.Index >= maxLogs {
e.Index = 0
}
}
return idx
}
// addStatusLog adds statusLogMsg to endpoint log.
// example of e.Log's contents where maxLogs = 3 and Index = 0
// [index] - Priority - Code
// [0] - BPF - OK
// [1] - Policy - Failure
// [2] - BPF - OK
// With this log, the CurrentStatus will keep [1] for Policy priority and [2]
// for BPF priority.
//
// Whenever a new statusLogMsg is received, that log will be kept in the
// CurrentStatus map for the statusLogMsg's priority.
// The CurrentStatus map, ensures non of the failure messages are deleted for
// higher priority messages and vice versa.
func (e *EndpointStatus) addStatusLog(s *statusLogMsg) {
e.CurrentStatuses[s.Status.Type] = s
idx := e.getAndIncIdx()
if len(e.Log) < maxLogs {
e.Log = append(e.Log, s)
} else {
e.Log[idx] = s
}
}
func (e *EndpointStatus) GetModel() []*models.EndpointStatusChange {
e.indexMU.RLock()
defer e.indexMU.RUnlock()
list := []*models.EndpointStatusChange{}
for i := e.lastIndex(); ; i-- {
if i < 0 {
i = maxLogs - 1
}
if i < len(e.Log) && e.Log[i] != nil {
list = append(list, &models.EndpointStatusChange{
Timestamp: e.Log[i].Timestamp.Format(time.RFC3339),
Code: e.Log[i].Status.Code.String(),
Message: e.Log[i].Status.Msg,
State: models.EndpointState(e.Log[i].Status.State),
})
}
if i == e.Index {
break
}
}
return list
}
func (e *EndpointStatus) CurrentStatus() StatusCode {
e.indexMU.RLock()
defer e.indexMU.RUnlock()
sP := e.CurrentStatuses.sortByPriority()
for _, v := range sP {
if v.Status.Code != OK {
return v.Status.Code
}
}
return OK
}
func (e *EndpointStatus) String() string {
return e.CurrentStatus().String()
}
// StringID returns the endpoint's ID in a string.
func (e *Endpoint) StringID() string {
return strconv.Itoa(int(e.ID))
}
func (e *Endpoint) GetIdentity() identityPkg.NumericIdentity {
if e.SecurityIdentity != nil {
return e.SecurityIdentity.ID
}
return identityPkg.InvalidIdentity
}
func (e *Endpoint) Allows(id identityPkg.NumericIdentity) bool {
e.UnconditionalRLock()
defer e.RUnlock()
keyToLookup := policymap.PolicyKey{
Identity: uint32(id),
TrafficDirection: policymap.Ingress.Uint8(),
}
_, ok := e.desiredMapState[keyToLookup]
return ok
}
// String returns endpoint on a JSON format.
func (e *Endpoint) String() string {
e.UnconditionalRLock()
defer e.RUnlock()
b, err := json.MarshalIndent(e, "", " ")
if err != nil {
return err.Error()
}
return string(b)
}
// optionChanged is a callback used with pkg/option to apply the options to an
// endpoint. Not used for anything at the moment.
func optionChanged(key string, value option.OptionSetting, data interface{}) {
}
// applyOptsLocked applies the given options to the endpoint's options and
// returns true if there were any options changed.
func (e *Endpoint) applyOptsLocked(opts option.OptionMap) bool {
changed := e.Options.ApplyValidated(opts, optionChanged, e) > 0
_, exists := opts[option.Debug]
if exists && changed {
e.UpdateLogger(nil)
}
return changed
}
// ForcePolicyCompute marks the endpoint for forced bpf regeneration.
func (e *Endpoint) ForcePolicyCompute() {
e.forcePolicyCompute = true
}
func (e *Endpoint) SetDefaultOpts(opts *option.IntOptions) {
if e.Options == nil {
e.Options = option.NewIntOptions(&EndpointMutableOptionLibrary)
}
if e.Options.Library == nil {
e.Options.Library = &EndpointMutableOptionLibrary
}
if opts != nil {
epOptLib := option.GetEndpointMutableOptionLibrary()
for k := range epOptLib {
e.Options.SetValidated(k, opts.GetValue(k))
}
}
e.UpdateLogger(nil)
}
// ConntrackLocal determines whether this endpoint is currently using a local
// table to handle connection tracking (true), or the global table (false).
func (e *Endpoint) ConntrackLocal() bool {
e.UnconditionalRLock()
defer e.RUnlock()
return e.ConntrackLocalLocked()
}
// ConntrackLocalLocked is the same as ConntrackLocal, but assumes that the
// endpoint is already locked for reading.
func (e *Endpoint) ConntrackLocalLocked() bool {
if e.SecurityIdentity == nil || e.Options == nil ||
!e.Options.IsEnabled(option.ConntrackLocal) {
return false
}
return true
}
type orderEndpoint func(e1, e2 *models.Endpoint) bool
// OrderEndpointAsc orders the slice of Endpoint in ascending ID order.
func OrderEndpointAsc(eps []*models.Endpoint) {
ascPriority := func(e1, e2 *models.Endpoint) bool {
return e1.ID < e2.ID
}
orderEndpoint(ascPriority).sort(eps)
}
func (by orderEndpoint) sort(eps []*models.Endpoint) {
dS := &epSorter{
eps: eps,
by: by,
}
sort.Sort(dS)
}
type epSorter struct {
eps []*models.Endpoint
by func(e1, e2 *models.Endpoint) bool
}
func (epS *epSorter) Len() int {
return len(epS.eps)
}
func (epS *epSorter) Swap(i, j int) {
epS.eps[i], epS.eps[j] = epS.eps[j], epS.eps[i]
}
func (epS *epSorter) Less(i, j int) bool {
return epS.by(epS.eps[i], epS.eps[j])
}
// base64 returns the endpoint in a base64 format.
func (e *Endpoint) base64() (string, error) {
var (
jsonBytes []byte
err error
)
transformEndpointForDowngrade(e)
jsonBytes, err = json.Marshal(e)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(jsonBytes), nil
}
// parseBase64ToEndpoint parses the endpoint stored in the given base64 string.
func parseBase64ToEndpoint(str string, ep *Endpoint) error {
jsonBytes, err := base64.StdEncoding.DecodeString(str)
if err != nil {
return err
}
return json.Unmarshal(jsonBytes, ep)
}
// FilterEPDir returns a list of directories' names that possible belong to an endpoint.
func FilterEPDir(dirFiles []os.FileInfo) []string {
eptsID := []string{}
for _, file := range dirFiles {
if file.IsDir() {
_, err := strconv.ParseUint(file.Name(), 10, 16)
if err == nil || strings.HasSuffix(file.Name(), "_next_fail") {
eptsID = append(eptsID, file.Name())
}
}
}
return eptsID
}
// ParseEndpoint parses the given strEp which is in the form of:
// common.CiliumCHeaderPrefix + common.Version + ":" + endpointBase64
func ParseEndpoint(strEp string) (*Endpoint, error) {
// TODO: Provide a better mechanism to update from old version once we bump
// TODO: cilium version.
strEpSlice := strings.Split(strEp, ":")
if len(strEpSlice) != 2 {
return nil, fmt.Errorf("invalid format %q. Should contain a single ':'", strEp)
}
var ep Endpoint
if err := parseBase64ToEndpoint(strEpSlice[1], &ep); err != nil {
return nil, fmt.Errorf("failed to parse base64toendpoint: %s", err)
}
// We need to check for nil in Status, CurrentStatuses and Log, since in
// some use cases, status will be not nil and Cilium will eventually
// error/panic if CurrentStatus or Log are not initialized correctly.
// Reference issue GH-2477
if ep.Status == nil || ep.Status.CurrentStatuses == nil || ep.Status.Log == nil {
ep.Status = NewEndpointStatus()
}
ep.UpdateLogger(nil)
ep.SetStateLocked(StateRestoring, "Endpoint restoring")
return &ep, nil
}
func (e *Endpoint) RemoveFromGlobalPolicyMap() error {
gpm, err := policymap.OpenGlobalMap(e.PolicyGlobalMapPathLocked())
if err == nil {
// We need to remove ourselves from global map, so that
// resources (prog/map reference counts) can be released.
gpm.Delete(uint32(e.ID), policymap.AllPorts, u8proto.All, policymap.Ingress)
gpm.Delete(uint32(e.ID), policymap.AllPorts, u8proto.All, policymap.Egress)
gpm.Close()
}
return err
}
// GetBPFKeys returns all keys which should represent this endpoint in the BPF
// endpoints map
func (e *Endpoint) GetBPFKeys() []*lxcmap.EndpointKey {
key := lxcmap.NewEndpointKey(e.IPv6.IP())
if e.IPv4 != nil {
key4 := lxcmap.NewEndpointKey(e.IPv4.IP())
return []*lxcmap.EndpointKey{key, key4}
}
return []*lxcmap.EndpointKey{key}
}
// GetBPFValue returns the value which should represent this endpoint in the
// BPF endpoints map
func (e *Endpoint) GetBPFValue() (*lxcmap.EndpointInfo, error) {
mac, err := e.LXCMAC.Uint64()
if err != nil {
return nil, fmt.Errorf("invalid LXC MAC: %v", err)
}
nodeMAC, err := e.NodeMAC.Uint64()
if err != nil {
return nil, fmt.Errorf("invalid node MAC: %v", err)
}
info := &lxcmap.EndpointInfo{
IfIndex: uint32(e.IfIndex),
// Store security identity in network byte order so it can be
// written into the packet without an additional byte order
// conversion.
LxcID: e.ID,
MAC: lxcmap.MAC(mac),
NodeMAC: lxcmap.MAC(nodeMAC),
}
return info, nil
}
// mapPath returns the path to a map for endpoint ID.
func mapPath(mapname string, id int) string {
return bpf.MapPath(mapname + strconv.Itoa(id))
}
// PolicyMapPathLocked returns the path to the policy map of endpoint.
func (e *Endpoint) PolicyMapPathLocked() string {
return mapPath(policymap.MapName, int(e.ID))
}
// IPv6IngressMapPathLocked returns the path to policy map of endpoint.
func (e *Endpoint) IPv6IngressMapPathLocked() string {
return mapPath(cidrmap.MapName+"ingress6_", int(e.ID))
}
// IPv6EgressMapPathLocked returns the path to policy map of endpoint.
func (e *Endpoint) IPv6EgressMapPathLocked() string {
return mapPath(cidrmap.MapName+"egress6_", int(e.ID))
}
// IPv4IngressMapPathLocked returns the path to policy map of endpoint.
func (e *Endpoint) IPv4IngressMapPathLocked() string {
return mapPath(cidrmap.MapName+"ingress4_", int(e.ID))
}
// IPv4EgressMapPathLocked returns the path to policy map of endpoint.
func (e *Endpoint) IPv4EgressMapPathLocked() string {
return mapPath(cidrmap.MapName+"egress4_", int(e.ID))
}
// PolicyGlobalMapPathLocked returns the path to the global policy map.
func (e *Endpoint) PolicyGlobalMapPathLocked() string {
return bpf.MapPath(PolicyGlobalMapName)
}
func CallsMapPath(id int) string {
return bpf.MapPath(CallsMapName + strconv.Itoa(id))
}
// CallsMapPathLocked returns the path to cilium tail calls map of an endpoint.
func (e *Endpoint) CallsMapPathLocked() string {
return CallsMapPath(int(e.ID))
}
func (e *Endpoint) LogStatus(typ StatusType, code StatusCode, msg string) {
e.UnconditionalLock()
defer e.Unlock()
// FIXME GH2323 instead of a mutex we could use a channel to send the status
// log message to a single writer?
e.logStatusLocked(typ, code, msg)
}
func (e *Endpoint) LogStatusOK(typ StatusType, msg string) {
e.LogStatus(typ, OK, msg)
}
// LogStatusOKLocked will log an OK message of the given status type with the
// given msg string.
// must be called with endpoint.Mutex held
func (e *Endpoint) LogStatusOKLocked(typ StatusType, msg string) {
e.logStatusLocked(typ, OK, msg)
}
// logStatusLocked logs a status message
// must be called with endpoint.Mutex held
func (e *Endpoint) logStatusLocked(typ StatusType, code StatusCode, msg string) {
e.Status.indexMU.Lock()
defer e.Status.indexMU.Unlock()
sts := &statusLogMsg{
Status: Status{
Code: code,
Msg: msg,
Type: typ,
State: e.state,
},
Timestamp: time.Now().UTC(),
}
e.Status.addStatusLog(sts)
e.Logger().WithFields(logrus.Fields{
"code": sts.Status.Code,
"type": sts.Status.Type,
logfields.EndpointState: sts.Status.State,
logfields.PolicyRevision: e.policyRevision,
}).Debug(msg)
}
type UpdateValidationError struct {
msg string
}
func (e UpdateValidationError) Error() string { return e.msg }
type UpdateCompilationError struct {
msg string
}
func (e UpdateCompilationError) Error() string { return e.msg }
// UpdateStateChangeError is an error that indicates that updating the state
// of an endpoint was unsuccessful.
// Implements error interface.
type UpdateStateChangeError struct {
msg string
}
func (e UpdateStateChangeError) Error() string { return e.msg }
// Update modifies the endpoint options and *always* tries to regenerate the
// endpoint's program. Returns an error if the provided options are not valid,
// if there was an issue triggering policy updates for the given endpoint,
// or if endpoint regeneration was unable to be triggered. Note that the
// LabelConfiguration in the EndpointConfigurationSpec is *not* consumed here.
func (e *Endpoint) Update(owner Owner, cfg *models.EndpointConfigurationSpec) error {
om, err := EndpointMutableOptionLibrary.ValidateConfigurationMap(cfg.Options)
if err != nil {
return UpdateValidationError{err.Error()}
}
if err := e.LockAlive(); err != nil {
return err
}
e.Logger().WithField("configuration-options", cfg).Debug("updating endpoint configuration options")
// CurrentStatus will be not OK when we have an uncleared error in BPF,
// policy or Other. We should keep trying to regenerate in the hopes of
// suceeding.
// Note: This "retry" behaviour is better suited to a controller, and can be
// moved there once we have an endpoint regeneration controller.
needToRegenerateBPF := e.updateAndOverrideEndpointOptions(om) || (e.Status.CurrentStatus() != OK)
reason := "endpoint was updated via API"
// If configuration options are provided, we only regenerate if necessary.
// Otherwise always regenerate.
if cfg.Options == nil {
needToRegenerateBPF = true
reason = "endpoint was manually regenerated via API"
}
if needToRegenerateBPF {
e.Logger().Debug("need to regenerate endpoint; checking state before" +
" attempting to regenerate")
// TODO / FIXME: GH-3281: need ways to queue up regenerations per-endpoint.
// Default timeout for PATCH /endpoint/{id}/config is 60 seconds, so put
// timeout in this function a bit below that timeout. If the timeout
// for clients in API is below this value, they will get a message containing
// "context deadline exceeded".
timeout := time.After(EndpointGenerationTimeout)
// Check for endpoint state every second.
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
e.Unlock()
for {
select {
case <-ticker.C:
if err := e.LockAlive(); err != nil {
return err
}
// Check endpoint state before attempting configuration update because
// configuration updates can only be applied when the endpoint is in
// specific states. See GH-3058.
stateTransitionSucceeded := e.SetStateLocked(StateWaitingToRegenerate, reason)
if stateTransitionSucceeded {
e.Unlock()
e.Regenerate(owner, NewRegenerationContext(reason))
return nil
}
e.Unlock()
case <-timeout:
e.Logger().Warningf("timed out waiting for endpoint state to change")
return UpdateStateChangeError{fmt.Sprintf("unable to regenerate endpoint program because state transition to %s was unsuccessful; check `cilium endpoint log %d` for more information", StateWaitingToRegenerate, e.ID)}
}
}
}
e.Unlock()
return nil
}
// HasLabels returns whether endpoint e contains all labels l. Will return 'false'
// if any label in l is not in the endpoint's labels.
func (e *Endpoint) HasLabels(l pkgLabels.Labels) bool {
e.UnconditionalRLock()
defer e.RUnlock()
return e.hasLabelsRLocked(l)
}
// hasLabelsRLocked returns whether endpoint e contains all labels l. Will
// return 'false' if any label in l is not in the endpoint's labels.
// e.Mutex must be RLocked
func (e *Endpoint) hasLabelsRLocked(l pkgLabels.Labels) bool {
allEpLabels := e.OpLabels.AllLabels()
for _, v := range l {
found := false
for _, j := range allEpLabels {
if j.Equals(v) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
// replaceInformationLabels replaces the information labels of the endpoint.
// Passing a nil set of labels will not perform any action.
// Must be called with e.Mutex.Lock().
func (e *Endpoint) replaceInformationLabels(l pkgLabels.Labels) {
if l == nil {
return
}
e.OpLabels.OrchestrationInfo.MarkAllForDeletion()
scopedLog := e.Logger()
for _, v := range l {
if e.OpLabels.OrchestrationInfo.UpsertLabel(v) {
scopedLog.WithField(logfields.Labels, logfields.Repr(v)).Debug("Assigning information label")
}
}
e.OpLabels.OrchestrationInfo.DeleteMarked()
}
// replaceIdentityLabels replaces the identity labels of the endpoint. If a net
// changed occurred, the identityRevision is bumped and returned, otherwise 0 is
// returned.
// Passing a nil set of labels will not perform any action and will return the
// current endpoint's identityRevision.
// Must be called with e.Mutex.Lock().
func (e *Endpoint) replaceIdentityLabels(l pkgLabels.Labels) int {
if l == nil {
return e.identityRevision
}
changed := false
e.OpLabels.OrchestrationIdentity.MarkAllForDeletion()
e.OpLabels.Disabled.MarkAllForDeletion()
scopedLog := e.Logger()
for k, v := range l {
// A disabled identity label stays disabled without value updates
if e.OpLabels.Disabled[k] != nil {
e.OpLabels.Disabled[k].ClearDeletionMark()
} else if e.OpLabels.OrchestrationIdentity.UpsertLabel(v) {
scopedLog.WithField(logfields.Labels, logfields.Repr(v)).Debug("Assigning security relevant label")
changed = true
}
}
if e.OpLabels.OrchestrationIdentity.DeleteMarked() || e.OpLabels.Disabled.DeleteMarked() {
changed = true
}
rev := 0
if changed {
e.identityRevision++
rev = e.identityRevision
}
return rev
}
// LeaveLocked removes the endpoint's directory from the system. Must be called
// with Endpoint's mutex AND BuildMutex locked.
func (e *Endpoint) LeaveLocked(owner Owner, proxyWaitGroup *completion.WaitGroup) []error {
errors := []error{}
owner.RemoveFromEndpointQueue(uint64(e.ID))
if e.SecurityIdentity != nil && e.RealizedL4Policy != nil {
// Passing a new map of nil will purge all redirects
e.removeOldRedirects(owner, nil, proxyWaitGroup)
}
if e.PolicyMap != nil {
if err := e.PolicyMap.Close(); err != nil {
errors = append(errors, fmt.Errorf("unable to close policymap %s: %s", e.PolicyGlobalMapPathLocked(), err))
}
}
if e.SecurityIdentity != nil {
err := e.SecurityIdentity.Release()
if err != nil {
errors = append(errors, fmt.Errorf("unable to release identity: %s", err))
}
// TODO: Check if network policy was created even without SecurityIdentity
owner.RemoveNetworkPolicy(e)
e.SecurityIdentity = nil
}
e.removeDirectory()
e.removeFailedDirectory()
e.controllers.RemoveAll()
e.cleanPolicySignals()
if !e.ConntrackLocalLocked() {
e.scrubIPsInConntrackTableLocked()
}
e.SetStateLocked(StateDisconnected, "Endpoint removed")
endpointPolicyStatus.Remove(e.ID)
e.Logger().Info("Removed endpoint")
return errors
}
func (e *Endpoint) removeDirectory() {
os.RemoveAll(e.DirectoryPath())
}
func (e *Endpoint) removeFailedDirectory() {
os.RemoveAll(e.FailedDirectoryPath())
}
func (e *Endpoint) RemoveDirectory() {
e.UnconditionalLock()
defer e.Unlock()
e.removeDirectory()
}
// CreateDirectory creates endpoint directory
func (e *Endpoint) CreateDirectory() error {
if err := e.LockAlive(); err != nil {
return err
}
defer e.Unlock()
lxcDir := e.DirectoryPath()
if err := os.MkdirAll(lxcDir, 0777); err != nil {
return fmt.Errorf("unable to create endpoint directory: %s", err)
}
return nil
}
// RegenerateWait should only be called when endpoint's state has successfully
// been changed to "waiting-to-regenerate"
func (e *Endpoint) RegenerateWait(owner Owner, reason string) error {
if !<-e.Regenerate(owner, NewRegenerationContext(reason)) {
return fmt.Errorf("error while regenerating endpoint."+
" For more info run: 'cilium endpoint get %d'", e.ID)
}
return nil
}
// SetContainerName modifies the endpoint's container name
func (e *Endpoint) SetContainerName(name string) {
e.UnconditionalLock()
e.ContainerName = name
e.Unlock()
}
// GetK8sNamespace returns the name of the pod if the endpoint represents a
// Kubernetes pod
func (e *Endpoint) GetK8sNamespace() string {
e.UnconditionalRLock()
ns := e.k8sNamespace
e.RUnlock()
return ns
}
// SetK8sNamespace modifies the endpoint's pod name
func (e *Endpoint) SetK8sNamespace(name string) {
e.UnconditionalLock()
e.k8sNamespace = name
e.UpdateLogger(map[string]interface{}{
logfields.K8sPodName: e.GetK8sNamespaceAndPodNameLocked(),
})
e.Unlock()
}
// GetK8sPodName returns the name of the pod if the endpoint represents a
// Kubernetes pod
func (e *Endpoint) GetK8sPodName() string {
e.UnconditionalRLock()
k8sPodName := e.k8sPodName
e.RUnlock()
return k8sPodName
}
// GetK8sNamespaceAndPodNameLocked returns the namespace and pod name. This
// function requires e.Mutex to be held.
func (e *Endpoint) GetK8sNamespaceAndPodNameLocked() string {
return e.k8sNamespace + "/" + e.k8sPodName
}
// SetK8sPodName modifies the endpoint's pod name
func (e *Endpoint) SetK8sPodName(name string) {
e.UnconditionalLock()
e.k8sPodName = name
e.UpdateLogger(map[string]interface{}{
logfields.K8sPodName: e.GetK8sNamespaceAndPodNameLocked(),
})
e.Unlock()
}
// SetContainerID modifies the endpoint's container ID
func (e *Endpoint) SetContainerID(id string) {
e.UnconditionalLock()
e.ContainerID = id
e.UpdateLogger(map[string]interface{}{
logfields.ContainerID: e.getShortContainerID(),
})
e.Unlock()
}
// GetContainerID returns the endpoint's container ID
func (e *Endpoint) GetContainerID() string {
e.UnconditionalRLock()
cID := e.ContainerID
e.RUnlock()
return cID
}
// GetShortContainerID returns the endpoint's shortened container ID
func (e *Endpoint) GetShortContainerID() string {
e.UnconditionalRLock()
defer e.RUnlock()
return e.getShortContainerID()
}
func (e *Endpoint) getShortContainerID() string {
if e == nil {
return ""
}
caplen := 10
if len(e.ContainerID) <= caplen {
return e.ContainerID
}
return e.ContainerID[:caplen]
}
// SetDockerEndpointID modifies the endpoint's Docker Endpoint ID
func (e *Endpoint) SetDockerEndpointID(id string) {
e.UnconditionalLock()
e.DockerEndpointID = id
e.Unlock()
}
// SetDockerNetworkID modifies the endpoint's Docker Endpoint ID
func (e *Endpoint) SetDockerNetworkID(id string) {
e.UnconditionalLock()
e.DockerNetworkID = id
e.Unlock()
}
// GetDockerNetworkID returns the endpoint's Docker Endpoint ID
func (e *Endpoint) GetDockerNetworkID() string {
e.UnconditionalRLock()
defer e.RUnlock()
return e.DockerNetworkID
}
// GetState returns the endpoint's state
// endpoint.Mutex may only be.RLockAlive()ed
func (e *Endpoint) GetStateLocked() string {
return e.state
}
// GetState returns the endpoint's state
// endpoint.Mutex may only be.RLockAlive()ed
func (e *Endpoint) GetState() string {
e.UnconditionalRLock()
defer e.RUnlock()
return e.GetStateLocked()
}
// SetStateLocked modifies the endpoint's state
// endpoint.Mutex must be held
// Returns true only if endpoints state was changed as requested
func (e *Endpoint) SetStateLocked(toState, reason string) bool {
// Validate the state transition.
fromState := e.state
switch fromState { // From state
case "": // Special case for capturing initial state transitions like
// nil --> StateWaitingForIdentity, StateRestoring
switch toState {
case StateWaitingForIdentity, StateRestoring:
goto OKState
}
case StateCreating:
switch toState {
case StateDisconnecting, StateWaitingForIdentity, StateRestoring:
goto OKState
}
case StateWaitingForIdentity:
switch toState {
case StateReady, StateDisconnecting:
goto OKState
}
case StateReady:
switch toState {
case StateWaitingForIdentity, StateDisconnecting, StateWaitingToRegenerate, StateRestoring:
goto OKState
}
case StateDisconnecting:
switch toState {
case StateDisconnected:
goto OKState
}
case StateDisconnected:
// No valid transitions, as disconnected is a terminal state for the endpoint.
case StateWaitingToRegenerate:
switch toState {
// Note that transitions to waiting-to-regenerate state
case StateWaitingForIdentity, StateDisconnecting, StateRestoring:
goto OKState
}
case StateRegenerating:
switch toState {
// Even while the endpoint is regenerating it is
// possible that further changes require a new
// build. In this case the endpoint is transitioned
// from the regenerating state to
// waiting-for-identity or waiting-to-regenerate state.
case StateWaitingForIdentity, StateDisconnecting, StateWaitingToRegenerate, StateRestoring:
goto OKState
}
case StateRestoring:
switch toState {
case StateDisconnecting, StateWaitingToRegenerate, StateRestoring:
goto OKState
}
}
if toState != fromState {
_, fileName, fileLine, _ := runtime.Caller(1)
e.Logger().WithFields(logrus.Fields{
logfields.EndpointState + ".from": fromState,
logfields.EndpointState + ".to": toState,
"file": fileName,
"line": fileLine,
}).Info("Invalid state transition skipped")
}
e.logStatusLocked(Other, Warning, fmt.Sprintf("Skipped invalid state transition to %s due to: %s", toState, reason))
return false
OKState:
e.state = toState
e.logStatusLocked(Other, OK, reason)
// Initial state transitions i.e nil --> waiting-for-identity
// need to be handled correctly while updating metrics.
// Note that if we are transitioning from some state to restoring
// state, we cannot decrement the old state counters as they will not
// be accounted for in the metrics.
if fromState != "" && toState != StateRestoring {
metrics.EndpointStateCount.
WithLabelValues(fromState).Dec()
}
// Since StateDisconnected is the final state, after which the
// endpoint is gone, we should not increment metrics for this state.
if toState != "" && toState != StateDisconnected {
metrics.EndpointStateCount.
WithLabelValues(toState).Inc()
}
return true
}
// BuilderSetStateLocked modifies the endpoint's state
// endpoint.Mutex must be held
// endpoint BuildMutex must be held!
func (e *Endpoint) BuilderSetStateLocked(toState, reason string) bool {
// Validate the state transition.
fromState := e.state
switch fromState { // From state
case StateCreating, StateWaitingForIdentity, StateReady, StateDisconnecting, StateDisconnected:
// No valid transitions for the builder
case StateWaitingToRegenerate:
switch toState {
// Builder transitions the endpoint from
// waiting-to-regenerate state to regenerating state
// right after acquiring the endpoint lock, and while
// endpoint's build mutex is held. All changes to
// cilium and endpoint configuration, policy as well
// as the existing set of security identities will be
// reconsidered after this point, i.e., even if some
// of them are changed regeneration need not be queued
// if the endpoint is already in waiting-to-regenerate
// state.
case StateRegenerating:
goto OKState
}
case StateRegenerating:
switch toState {
// While still holding the build mutex, the builder
// tries to transition the endpoint to ready
// state. But since the endpoint mutex was released
// for the duration of the bpf generation, it is
// possible that another build request has been
// queued. In this case the endpoint has been
// transitioned to waiting-to-regenerate state
// already, and the transition to ready state is
// skipped.
case StateReady:
goto OKState
}
}
e.logStatusLocked(Other, Warning, fmt.Sprintf("Skipped invalid state transition to %s due to: %s", toState, reason))
return false
OKState:
e.state = toState
e.logStatusLocked(Other, OK, reason)
if fromState != "" && toState != StateRestoring {
metrics.EndpointStateCount.
WithLabelValues(fromState).Dec()
}
// Since StateDisconnected is the final state, after which the
// endpoint is gone, we should not increment metrics for this state.
if toState != "" && toState != StateDisconnected {
metrics.EndpointStateCount.
WithLabelValues(toState).Inc()
}
return true
}
// bumpPolicyRevisionLocked marks the endpoint to be running the next scheduled
// policy revision as setup by e.regenerate()
// endpoint.Mutex should held.
func (e *Endpoint) bumpPolicyRevisionLocked(revision uint64) {
if revision > e.policyRevision {
e.setPolicyRevision(revision)
}
}
// OnProxyPolicyUpdate is a callback used to update the Endpoint's
// proxyPolicyRevision when the specified revision has been applied in the
// proxy.
func (e *Endpoint) OnProxyPolicyUpdate(revision uint64) {
// NOTE: UnconditionalLock is used here because this callback has no way of reporting an error
e.UnconditionalLock()
if revision > e.proxyPolicyRevision {
e.proxyPolicyRevision = revision
}
e.Unlock()
}
// getProxyStatisticsLocked gets the ProxyStatistics for the flows with the
// given characteristics, or adds a new one and returns it.
// Must be called with e.proxyStatisticsMutex held.
func (e *Endpoint) getProxyStatisticsLocked(l7Protocol string, port uint16, ingress bool) *models.ProxyStatistics {
var location string
if ingress {
location = models.ProxyStatisticsLocationIngress
} else {
location = models.ProxyStatisticsLocationEgress
}
key := models.ProxyStatistics{
Location: location,
Port: int64(port),
Protocol: l7Protocol,
}
if e.proxyStatistics == nil {
e.proxyStatistics = make(map[models.ProxyStatistics]*models.ProxyStatistics)
}
proxyStats, ok := e.proxyStatistics[key]
if !ok {
keyCopy := key
proxyStats = &keyCopy
proxyStats.Statistics = &models.RequestResponseStatistics{
Requests: &models.MessageForwardingStatistics{},
Responses: &models.MessageForwardingStatistics{},
}
e.proxyStatistics[key] = proxyStats
}
return proxyStats
}
// UpdateProxyStatistics updates the Endpoint's proxy statistics to account
// for a new observed flow with the given characteristics.
func (e *Endpoint) UpdateProxyStatistics(l7Protocol string, port uint16, ingress, request bool, verdict accesslog.FlowVerdict) {
e.proxyStatisticsMutex.Lock()
defer e.proxyStatisticsMutex.Unlock()
proxyStats := e.getProxyStatisticsLocked(l7Protocol, port, ingress)
var stats *models.MessageForwardingStatistics
if request {
stats = proxyStats.Statistics.Requests
} else {
stats = proxyStats.Statistics.Responses
}
stats.Received++
metrics.ProxyReceived.Inc()
switch verdict {
case accesslog.VerdictForwarded:
stats.Forwarded++
metrics.ProxyForwarded.Inc()
case accesslog.VerdictDenied:
stats.Denied++
metrics.ProxyDenied.Inc()
case accesslog.VerdictError:
stats.Error++
metrics.ProxyParseErrors.Inc()
}
}
// APICanModify determines whether API requests from a user are allowed to
// modify this endpoint.
func APICanModify(e *Endpoint) error {
if e.IsInit() {
return nil
}
if lbls := e.OpLabels.OrchestrationIdentity.FindReserved(); lbls != nil {
return fmt.Errorf("Endpoint cannot be modified by API call")
}
return nil
}
func (e *Endpoint) getIDandLabels() string {
e.UnconditionalRLock()
defer e.RUnlock()
labels := ""
if e.SecurityIdentity != nil {
labels = e.SecurityIdentity.Labels.String()
}
return fmt.Sprintf("%d (%s)", e.ID, labels)
}
// ModifyIdentityLabels changes the custom and orchestration identity labels of an endpoint.
// Labels can be added or deleted. If a label change is performed, the
// endpoint will receive a new identity and will be regenerated. Both of these
// operations will happen in the background.
func (e *Endpoint) ModifyIdentityLabels(owner Owner, addLabels, delLabels pkgLabels.Labels) error {
if err := e.LockAlive(); err != nil {
return err
}
switch e.GetStateLocked() {
case StateDisconnected, StateDisconnecting:
return nil
}
newLabels := e.OpLabels.DeepCopy()
for k := range delLabels {
// The change request is accepted if the label is on
// any of the lists. If the label is already disabled,
// we will simply ignore that change.
if newLabels.Custom[k] == nil && newLabels.OrchestrationIdentity[k] == nil && newLabels.Disabled[k] == nil {
e.Unlock()
return fmt.Errorf("label %s not found", k)
}
if v := newLabels.OrchestrationIdentity[k]; v != nil {
delete(newLabels.OrchestrationIdentity, k)
newLabels.Disabled[k] = v
}
if newLabels.Custom[k] != nil {
delete(newLabels.Custom, k)
}
}
for k, v := range addLabels {
if newLabels.Disabled[k] != nil { // Restore label.
delete(newLabels.Disabled, k)
newLabels.OrchestrationIdentity[k] = v
} else if newLabels.OrchestrationIdentity[k] != nil { // Replace label's source and value.
newLabels.OrchestrationIdentity[k] = v
} else {
newLabels.Custom[k] = v
}
}
e.OpLabels = *newLabels
// Mark with StateWaitingForIdentity, it will be set to
// StateWaitingToRegenerate after the identity resolution has been
// completed
e.SetStateLocked(StateWaitingForIdentity, "Triggering identity resolution due to updated identity labels")
e.identityRevision++
rev := e.identityRevision
e.Unlock()
e.runLabelsResolver(owner, rev)
return nil
}
// IsInit returns true if the endpoint still hasn't received identity labels,
// i.e. has the special identity with label reserved:init.
func (e *Endpoint) IsInit() bool {
init := e.OpLabels.GetIdentityLabel(pkgLabels.IDNameInit)
return init != nil && init.Source == pkgLabels.LabelSourceReserved
}
// UpdateLabels is called to update the labels of an endpoint. Calls to this
// function do not necessarily mean that the labels actually changed. The
// container runtime layer will periodically synchronize labels.
//
// If a net label changed was performed, the endpoint will receive a new
// identity and will be regenerated. Both of these operations will happen in
// the background.
func (e *Endpoint) UpdateLabels(owner Owner, identityLabels, infoLabels pkgLabels.Labels) {
log.WithFields(logrus.Fields{
logfields.ContainerID: e.GetShortContainerID(),
logfields.EndpointID: e.StringID(),
logfields.IdentityLabels: identityLabels.String(),
logfields.InfoLabels: infoLabels.String(),
}).Debug("Refreshing labels of endpoint")
if err := e.LockAlive(); err != nil {
e.LogDisconnectedMutexAction(err, "when trying to refresh endpint labels")
return
}
e.replaceInformationLabels(infoLabels)
// replace identity labels and update the identity if labels have changed
rev := e.replaceIdentityLabels(identityLabels)
e.Unlock()
if rev != 0 {
e.runLabelsResolver(owner, rev)
}
}
func (e *Endpoint) identityResolutionIsObsolete(myChangeRev int) bool {
// If in disconnected state, skip as well as this operation is no
// longer required.
if e.state == StateDisconnected {
return true
}
// Check if the endpoint has since received a new identity revision, if
// so, abort as a new resolution routine will have been started.
if myChangeRev != e.identityRevision {
return true
}
return false
}
// Must be called with e.Mutex NOT held.
func (e *Endpoint) runLabelsResolver(owner Owner, myChangeRev int) {
if err := e.RLockAlive(); err != nil {
// If a labels update and an endpoint delete API request arrive
// in quick succession, this could occur; in that case, there's
// no point updating the controller.
e.Logger().WithError(err).Info("Cannot run labels resolver")
return
}
newLabels := e.OpLabels.IdentityLabels()
e.RUnlock()
scopedLog := e.Logger().WithField(logfields.IdentityLabels, newLabels)
// If we are certain we can resolve the identity without accessing the KV
// store, do it first synchronously right now. This can reduce the number
// of regenerations for the endpoint during its initialization.
if identityPkg.IdentityAllocationIsLocal(newLabels) {
scopedLog.Debug("Endpoint has reserved identity, changing synchronously")
err := e.identityLabelsChanged(owner, myChangeRev)
if err != nil {
scopedLog.WithError(err).Warn("Error changing endpoint identity")
}
}
ctrlName := fmt.Sprintf("resolve-identity-%d", e.ID)
e.controllers.UpdateController(ctrlName,
controller.ControllerParams{
DoFunc: func() error {
return e.identityLabelsChanged(owner, myChangeRev)
},
RunInterval: 5 * time.Minute,
},
)
}
func (e *Endpoint) identityLabelsChanged(owner Owner, myChangeRev int) error {
if err := e.RLockAlive(); err != nil {
return err
}
newLabels := e.OpLabels.IdentityLabels()
elog := e.Logger().WithFields(logrus.Fields{
logfields.EndpointID: e.ID,
logfields.IdentityLabels: newLabels,
})
// Since we unlocked the endpoint and re-locked, the label update may already be obsolete
if e.identityResolutionIsObsolete(myChangeRev) {
e.RUnlock()
elog.Debug("Endpoint identity has changed, aborting resolution routine in favour of new one")
return nil
}
if e.SecurityIdentity != nil && e.SecurityIdentity.Labels.Equals(newLabels) {
// Sets endpoint state to ready if was waiting for identity
if e.GetStateLocked() == StateWaitingForIdentity {
e.SetStateLocked(StateReady, "Set identity for this endpoint")
}
e.RUnlock()
elog.Debug("Endpoint labels unchanged, skipping resolution of identity")
return nil
}
// Unlock the endpoint mutex for the possibly long lasting kvstore operation
e.RUnlock()
elog.Debug("Resolving identity for labels")
identity, _, err := identityPkg.AllocateIdentity(newLabels)
if err != nil {
err = fmt.Errorf("unable to resolve identity: %s", err)
e.LogStatus(Other, Warning, fmt.Sprintf("%s (will retry)", err.Error()))
return err
}
if err := e.LockAlive(); err != nil {
return err
}
// Since we unlocked the endpoint and re-locked, the label update may already be obsolete
if e.identityResolutionIsObsolete(myChangeRev) {
e.Unlock()
err := identity.Release()
if err != nil {
// non fatal error as keys will expire after lease expires but log it
elog.WithFields(logrus.Fields{logfields.Identity: identity.ID}).
WithError(err).Warn("Unable to release newly allocated identity again")
}
return nil
}
// If endpoint has an old identity, defer release of it to the end of
// the function after the endpoint structured has been unlocked again
if e.SecurityIdentity != nil {
oldIdentity := e.SecurityIdentity
defer func() {
err := oldIdentity.Release()
if err != nil {
elog.WithFields(logrus.Fields{logfields.Identity: oldIdentity.ID}).
WithError(err).Warn("BUG: Unable to release old endpoint identity")
}
}()
}
elog.WithFields(logrus.Fields{logfields.Identity: identity.StringID()}).
Debug("Assigned new identity to endpoint")
e.SetIdentity(identity)
readyToRegenerate := e.SetStateLocked(StateWaitingToRegenerate, "Triggering regeneration due to new identity")
// Unconditionally force policy recomputation after a new identity has been
// assigned.
e.ForcePolicyCompute()
e.Unlock()
if readyToRegenerate {
e.Regenerate(owner, NewRegenerationContext("updated security labels"))
}
return nil
}
// setPolicyRevision sets the policy wantedRev with the given revision.
func (e *Endpoint) setPolicyRevision(rev uint64) {
e.policyRevision = rev
e.UpdateLogger(map[string]interface{}{
logfields.DatapathPolicyRevision: e.policyRevision,
})
for ps := range e.policyRevisionSignals {
select {
case <-ps.ctx.Done():
close(ps.ch)
delete(e.policyRevisionSignals, ps)
default:
if rev >= ps.wantedRev {
close(ps.ch)
delete(e.policyRevisionSignals, ps)
}
}
}
}
// cleanPolicySignals closes and removes all policy revision signals.
func (e *Endpoint) cleanPolicySignals() {
for w := range e.policyRevisionSignals {
close(w.ch)
}
e.policyRevisionSignals = map[policySignal]bool{}
}
// policySignal is used to mark when a wanted policy wantedRev is reached
type policySignal struct {
// wantedRev specifies which policy revision the signal wants.
wantedRev uint64
// ch is the channel that signalizes once the policy revision wanted is reached.
ch chan struct{}
// ctx is the context for the policy signal request.
ctx context.Context
}
// WaitForPolicyRevision returns a channel that is closed when one or more of
// the following conditions have met:
// - the endpoint is disconnected state
// - the endpoint's policy revision reaches the wanted revision
func (e *Endpoint) WaitForPolicyRevision(ctx context.Context, rev uint64) <-chan struct{} {
// NOTE: UnconditionalLock is used here because this method handles endpoint in disconnected state on its own
e.UnconditionalLock()
defer e.Unlock()
ch := make(chan struct{})
if e.policyRevision >= rev || e.state == StateDisconnected {
close(ch)
return ch
}
ps := policySignal{
wantedRev: rev,
ctx: ctx,
ch: ch,
}
if e.policyRevisionSignals == nil {
e.policyRevisionSignals = map[policySignal]bool{}
}
e.policyRevisionSignals[ps] = true
return ch
}
// IPs returns the slice of valid IPs for this endpoint.
func (e *Endpoint) IPs() []net.IP {
ips := []net.IP{}
if e.IPv4 != nil {
ips = append(ips, e.IPv4.IP())
}
if e.IPv6 != nil {
ips = append(ips, e.IPv6.IP())
}
return ips
}
// InsertEvent is called when the endpoint is inserted into the endpoint
// manager.
func (e *Endpoint) InsertEvent() {
e.Logger().Info("New endpoint")
}
// syncPolicyMap attempts to synchronize the PolicyMap for this endpoint to
// contain the set of PolicyKeys represented by the endpoint's desiredMapState.
// It checks the current contents of the endpoint's PolicyMap and deletes any
// PolicyKeys that are not present in the endpoint's desiredMapState. It then
// adds any keys that are not present in the map. When a key from desiredMapState
// is inserted successfully to the endpoint's BPF PolicyMap, it is added to the
// endpoint's realizedMapState field. Returns an error if the endpoint's BPF
// PolicyMap is unable to be dumped, or any update operation to the map fails.
// Must be called with e.Mutex locked.
func (e *Endpoint) syncPolicyMap() error {
if e.realizedMapState == nil {
e.realizedMapState = make(PolicyMapState)
}
if e.desiredMapState == nil {
e.desiredMapState = make(PolicyMapState)
}
if e.PolicyMap == nil {
return fmt.Errorf("not syncing PolicyMap state for endpoint because PolicyMap is nil")
}
currentMapContents, err := e.PolicyMap.DumpToSlice()
// If map is unable to be dumped, attempt to close map and open it again.
// See GH-4229.
if err != nil {
e.Logger().WithError(err).Error("unable to dump PolicyMap when trying to sync desired and realized PolicyMap state")
// Close to avoid leaking of file descriptors, but still continue in case
// Close() does not succeed, because otherwise the map will never be
// opened again unless the agent is restarted.
err := e.PolicyMap.Close()
if err != nil {
e.Logger().WithError(err).Error("unable to close PolicyMap which was not able to be dumped")
}
e.PolicyMap, _, err = policymap.OpenMap(e.PolicyMapPathLocked())
if err != nil {
return fmt.Errorf("unable to open PolicyMap for endpoint: %s", err)
}
// Try to dump again, fail if error occurs.
currentMapContents, err = e.PolicyMap.DumpToSlice()
if err != nil {
return err
}
}
errors := []error{}
for _, entry := range currentMapContents {
// Convert key to host-byte order for lookup in the desiredMapState.
keyHostOrder := entry.Key.ToHost()
// If key that is in policy map is not in desired state, just remove it.
if _, ok := e.desiredMapState[keyHostOrder]; !ok {
// Can pass key with host byte-order fields, as it will get
// converted to network byte-order.
err := e.PolicyMap.DeleteKey(keyHostOrder)
if err != nil {
e.Logger().WithError(err).Errorf("Failed to delete PolicyMap key %s", entry.Key.String())
errors = append(errors, err)
} else {
// Operation was successful, remove from realized state.
delete(e.realizedMapState, keyHostOrder)
}
}
}
for keyToAdd, entry := range e.desiredMapState {
if oldEntry, ok := e.realizedMapState[keyToAdd]; !ok || oldEntry != entry {
err := e.PolicyMap.AllowKey(keyToAdd, entry.ProxyPort)
if err != nil {
e.Logger().WithError(err).Errorf("Failed to add PolicyMap key %s %d", keyToAdd.String(), entry.ProxyPort)
errors = append(errors, err)
} else {
// Operation was successful, add to realized state.
e.realizedMapState[keyToAdd] = entry
}
}
}
if len(errors) > 0 {
return fmt.Errorf("synchronizing desired PolicyMap state failed: %s", errors)
}
return nil
}
func (e *Endpoint) syncPolicyMapController() {
ctrlName := fmt.Sprintf("sync-policymap-%d", e.ID)
e.controllers.UpdateController(ctrlName,
controller.ControllerParams{
DoFunc: func() (reterr error) {
// Failure to lock is not an error, it means
// that the endpoint was disconnected and we
// should exit gracefully.
if err := e.LockAlive(); err != nil {
return nil
}
defer e.Unlock()
return e.syncPolicyMap()
},
RunInterval: 1 * time.Minute,
},
)
}
// IsDisconnecting returns true if the endpoint is being disconnected or
// already disconnected
//
// This function must be called after re-aquiring the endpoint mutex to verify
// that the endpoint has not been removed in the meantime.
//
// endpoint.mutex must be held in read mode at least
func (e *Endpoint) IsDisconnecting() bool {
return e.state == StateDisconnected || e.state == StateDisconnecting
}
// garbageCollectConntrack will run the ctmap.GC() on either the endpoint's
// local conntrack table or the global conntrack table.
//
// The endpoint lock must be held
func (e *Endpoint) garbageCollectConntrack(filter *ctmap.GCFilter) {
var maps []*ctmap.Map
ipv4 := !option.Config.IPv4Disabled
if e.ConntrackLocalLocked() {
maps = ctmap.LocalMaps(e, ipv4, true)
} else {
maps = ctmap.GlobalMaps(ipv4, true)
}
for _, m := range maps {
if err := m.Open(); err != nil {
filepath, err2 := m.Path()
if err2 != nil {
log.WithError(err2).Warn("Unable to get CT map path")
}
log.WithError(err).WithField(logfields.Path, filepath).Warn("Unable to open map")
continue
}
defer m.Close()
ctmap.GC(m, filter)
}
}
func (e *Endpoint) scrubIPsInConntrackTableLocked() {
e.garbageCollectConntrack(&ctmap.GCFilter{
MatchIPs: map[string]struct{}{
e.IPv4.String(): {},
e.IPv6.String(): {},
},
})
}
func (e *Endpoint) scrubIPsInConntrackTable() {
e.UnconditionalLock()
e.scrubIPsInConntrackTableLocked()
e.Unlock()
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.