CombinedText
stringlengths
4
3.42M
package config import ( "encoding/base64" "fmt" "os" "strings" "github.com/dokku/dokku/plugins/common" ) //CommandShow implementes config:show func CommandShow(args []string, global bool, shell bool, export bool, merged bool) { appName, _ := getCommonArgs(global, args) env := getEnvironment(appName, merged) if shell && export { common.LogFail("Only one of --shell and --export can be given") } if shell { fmt.Print(env.Export(ExportFormatShell)) } else if export { fmt.Println(env.Export(ExportFormatExports)) } else { contextName := "global" if appName != "" { contextName = appName } common.LogInfo2(contextName + " env vars") fmt.Println(env.Export(ExportFormatPretty)) } } //CommandGet implements config:get func CommandGet(args []string, global bool, quoted bool) { appName, keys := getCommonArgs(global, args) if len(keys) > 1 { common.LogFail(fmt.Sprintf("Unexpected argument(s): %v", keys[1:])) } if len(keys) == 0 { common.LogFail("Expected: key") } if value, ok := Get(appName, keys[0]); !ok { os.Exit(1) } else { if quoted { fmt.Printf("'%s'", singleQuoteEscape(value)) } else { fmt.Printf("%s", value) } } } //CommandUnset implements config:unset func CommandUnset(args []string, global bool, noRestart bool) { appName, keys := getCommonArgs(global, args) err := UnsetMany(appName, keys, !noRestart) if err != nil { common.LogFail(err.Error()) } } //CommandSet implements config:set func CommandSet(args []string, global bool, noRestart bool, encoded bool) { appName, pairs := getCommonArgs(global, args) updated := make(map[string]string) for _, e := range pairs { parts := strings.SplitN(e, "=", 2) if len(parts) == 1 { common.LogFail("Invalid env pair: " + e) } key, value := parts[0], parts[1] if encoded { decoded, err := base64.StdEncoding.DecodeString(value) if err != nil { common.LogFail(fmt.Sprintf("%s for key '%s'", err.Error(), key)) } value = string(decoded) } updated[key] = value } err := SetMany(appName, updated, !noRestart) if err != nil { common.LogFail(err.Error()) } } //CommandKeys implements config:keys func CommandKeys(args []string, global bool, merged bool) { appName, trailingArgs := getCommonArgs(global, args) if len(trailingArgs) > 0 { common.LogFail(fmt.Sprintf("Trailing argument(s): %v", trailingArgs)) } env := getEnvironment(appName, merged) for _, k := range env.Keys() { fmt.Println(k) } } //CommandExport implements config:export func CommandExport(args []string, global bool, merged bool, format string) { appName, trailingArgs := getCommonArgs(global, args) if len(trailingArgs) > 0 { common.LogFail(fmt.Sprintf("Trailing argument(s): %v", trailingArgs)) } env := getEnvironment(appName, merged) exportType := ExportFormatExports suffix := "\n" switch format { case "exports": exportType = ExportFormatExports case "envfile": exportType = ExportFormatEnvfile case "docker-args": exportType = ExportFormatDockerArgs case "shell": exportType = ExportFormatShell suffix = " " case "pretty": exportType = ExportFormatPretty default: common.LogFail(fmt.Sprintf("Unknown export format: %v", format)) } exported := env.Export(exportType) fmt.Print(exported + suffix) } //CommandBundle implements config:bundle func CommandBundle(args []string, global bool, merged bool) { appName, trailingArgs := getCommonArgs(global, args) if len(trailingArgs) > 0 { common.LogFail(fmt.Sprintf("Trailing argument(s): %v", trailingArgs)) } env := getEnvironment(appName, merged) env.ExportBundle(os.Stdout) } //getEnvironment for the given app (global config if appName is empty). Merge with global environment if merged is true. func getEnvironment(appName string, merged bool) (env *Env) { var err error if appName != "" && merged { env, err = LoadMergedAppEnv(appName) } else { env, err = loadAppOrGlobalEnv(appName) } if err != nil { common.LogFail(err.Error()) } return env } //getCommonArgs extracts common positional args (appName and keys) func getCommonArgs(global bool, args []string) (appName string, keys []string) { nextArg := 0 if !global { if len(args) > 0 { appName = args[0] } if appName == "" { common.LogFail("Please specify an app or --global") } else { nextArg++ } } keys = args[nextArg:] return appName, keys } Reduce copying args when global package config import ( "encoding/base64" "fmt" "os" "strings" "github.com/dokku/dokku/plugins/common" ) //CommandShow implementes config:show func CommandShow(args []string, global bool, shell bool, export bool, merged bool) { appName, _ := getCommonArgs(global, args) env := getEnvironment(appName, merged) if shell && export { common.LogFail("Only one of --shell and --export can be given") } if shell { fmt.Print(env.Export(ExportFormatShell)) } else if export { fmt.Println(env.Export(ExportFormatExports)) } else { contextName := "global" if appName != "" { contextName = appName } common.LogInfo2(contextName + " env vars") fmt.Println(env.Export(ExportFormatPretty)) } } //CommandGet implements config:get func CommandGet(args []string, global bool, quoted bool) { appName, keys := getCommonArgs(global, args) if len(keys) > 1 { common.LogFail(fmt.Sprintf("Unexpected argument(s): %v", keys[1:])) } if len(keys) == 0 { common.LogFail("Expected: key") } if value, ok := Get(appName, keys[0]); !ok { os.Exit(1) } else { if quoted { fmt.Printf("'%s'", singleQuoteEscape(value)) } else { fmt.Printf("%s", value) } } } //CommandUnset implements config:unset func CommandUnset(args []string, global bool, noRestart bool) { appName, keys := getCommonArgs(global, args) err := UnsetMany(appName, keys, !noRestart) if err != nil { common.LogFail(err.Error()) } } //CommandSet implements config:set func CommandSet(args []string, global bool, noRestart bool, encoded bool) { appName, pairs := getCommonArgs(global, args) updated := make(map[string]string) for _, e := range pairs { parts := strings.SplitN(e, "=", 2) if len(parts) == 1 { common.LogFail("Invalid env pair: " + e) } key, value := parts[0], parts[1] if encoded { decoded, err := base64.StdEncoding.DecodeString(value) if err != nil { common.LogFail(fmt.Sprintf("%s for key '%s'", err.Error(), key)) } value = string(decoded) } updated[key] = value } err := SetMany(appName, updated, !noRestart) if err != nil { common.LogFail(err.Error()) } } //CommandKeys implements config:keys func CommandKeys(args []string, global bool, merged bool) { appName, trailingArgs := getCommonArgs(global, args) if len(trailingArgs) > 0 { common.LogFail(fmt.Sprintf("Trailing argument(s): %v", trailingArgs)) } env := getEnvironment(appName, merged) for _, k := range env.Keys() { fmt.Println(k) } } //CommandExport implements config:export func CommandExport(args []string, global bool, merged bool, format string) { appName, trailingArgs := getCommonArgs(global, args) if len(trailingArgs) > 0 { common.LogFail(fmt.Sprintf("Trailing argument(s): %v", trailingArgs)) } env := getEnvironment(appName, merged) exportType := ExportFormatExports suffix := "\n" switch format { case "exports": exportType = ExportFormatExports case "envfile": exportType = ExportFormatEnvfile case "docker-args": exportType = ExportFormatDockerArgs case "shell": exportType = ExportFormatShell suffix = " " case "pretty": exportType = ExportFormatPretty default: common.LogFail(fmt.Sprintf("Unknown export format: %v", format)) } exported := env.Export(exportType) fmt.Print(exported + suffix) } //CommandBundle implements config:bundle func CommandBundle(args []string, global bool, merged bool) { appName, trailingArgs := getCommonArgs(global, args) if len(trailingArgs) > 0 { common.LogFail(fmt.Sprintf("Trailing argument(s): %v", trailingArgs)) } env := getEnvironment(appName, merged) env.ExportBundle(os.Stdout) } //getEnvironment for the given app (global config if appName is empty). Merge with global environment if merged is true. func getEnvironment(appName string, merged bool) (env *Env) { var err error if appName != "" && merged { env, err = LoadMergedAppEnv(appName) } else { env, err = loadAppOrGlobalEnv(appName) } if err != nil { common.LogFail(err.Error()) } return env } //getCommonArgs extracts common positional args (appName and keys) func getCommonArgs(global bool, args []string) (appName string, keys []string) { keys = args if !global { if len(args) > 0 { appName = args[0] } if appName == "" { common.LogFail("Please specify an app or --global") } else { keys = args[1:] } } return appName, keys }
package login import ( "crypto/md5" "crypto/rsa" "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" "net/http" "net/url" "regexp" "strconv" errs "github.com/pkg/errors" "github.com/almighty/almighty-core/account" "github.com/almighty/almighty-core/app" "github.com/almighty/almighty-core/application" "github.com/almighty/almighty-core/auth" er "github.com/almighty/almighty-core/errors" "github.com/almighty/almighty-core/jsonapi" "github.com/almighty/almighty-core/log" tokencontext "github.com/almighty/almighty-core/login/token_context" "github.com/almighty/almighty-core/rest" "github.com/almighty/almighty-core/token" jwt "github.com/dgrijalva/jwt-go" "github.com/goadesign/goa" goajwt "github.com/goadesign/goa/middleware/security/jwt" uuid "github.com/satori/go.uuid" "golang.org/x/net/context" "golang.org/x/oauth2" ) // NewKeycloakOAuthProvider creates a new login.Service capable of using keycloak for authorization func NewKeycloakOAuthProvider(config *oauth2.Config, identities account.IdentityRepository, users account.UserRepository, tokenManager token.Manager, db application.DB) *KeycloakOAuthProvider { return &KeycloakOAuthProvider{ config: config, Identities: identities, Users: users, TokenManager: tokenManager, db: db, } } // KeycloakOAuthProvider represents a keyclaok IDP type KeycloakOAuthProvider struct { config *oauth2.Config Identities account.IdentityRepository Users account.UserRepository TokenManager token.Manager db application.DB } // KeycloakOAuthService represents keycloak OAuth service interface type KeycloakOAuthService interface { Perform(ctx *app.AuthorizeLoginContext, authEndpoint string, tokenEndpoint string, brokerEndpoint string, validRedirectURL string) error CreateOrUpdateKeycloakUser(accessToken string, ctx context.Context) (*account.Identity, *account.User, error) Link(ctx *app.LinkLoginContext, brokerEndpoint string, clientID string, validRedirectURL string) error LinkSession(ctx *app.LinksessionLoginContext, brokerEndpoint string, clientID string, validRedirectURL string) error LinkCallback(ctx *app.LinkcallbackLoginContext, brokerEndpoint string, clientID string) error } type linkInterface interface { context.Context jsonapi.InternalServerError TemporaryRedirect() error BadRequest(r *app.JSONAPIErrors) error } // keycloakTokenClaims represents standard Keycloak token claims type keycloakTokenClaims struct { Name string `json:"name"` Username string `json:"preferred_username"` GivenName string `json:"given_name"` FamilyName string `json:"family_name"` Email string `json:"email"` SessionState string `json:"session_state"` ClientSession string `json:"client_session"` jwt.StandardClaims } var allProvidersToLink = []string{"github", "openshift-v3"} // Perform performs authenticatin func (keycloak *KeycloakOAuthProvider) Perform(ctx *app.AuthorizeLoginContext, authEndpoint string, tokenEndpoint string, brokerEndpoint string, validRedirectURL string) error { state := ctx.Params.Get("state") code := ctx.Params.Get("code") if code != "" { // After redirect from oauth provider // validate known state knownReferrer, err := keycloak.getReferrer(ctx, state) if err != nil { log.Error(ctx, map[string]interface{}{ "state": state, "err": err, }, "uknown state") jerrors, _ := jsonapi.ErrorToJSONAPIErrors(goa.ErrUnauthorized("uknown state. " + err.Error())) return ctx.Unauthorized(jerrors) } keycloakToken, err := keycloak.config.Exchange(ctx, code) if err != nil { log.Error(ctx, map[string]interface{}{ "code": code, "err": err, }, "keycloak exchange operation failed") return redirectWithError(ctx, knownReferrer, err.Error()) } _, _, err = keycloak.CreateOrUpdateKeycloakUser(keycloakToken.AccessToken, ctx) if err != nil { log.Error(ctx, map[string]interface{}{ "token": keycloakToken.AccessToken, "err": err, }, "failed to create a user and KeyCloak identity using the access token") return redirectWithError(ctx, knownReferrer, err.Error()) } // redirect back to original referrel referrerURL, err := url.Parse(knownReferrer) if err != nil { return redirectWithError(ctx, knownReferrer, err.Error()) } err = encodeToken(referrerURL, keycloakToken) if err != nil { return redirectWithError(ctx, knownReferrer, err.Error()) } referrerStr := referrerURL.String() // Check if federated identities are not likned yet // TODO we probably won't want to check it for the existing users. // But we need it for now because old users still may not be linked. linked, err := keycloak.checkAllFederatedIdentities(ctx, keycloakToken.AccessToken, brokerEndpoint) if err != nil { return jsonapi.JSONErrorResponse(ctx, goa.ErrInternal(err.Error())) } // Return linked=true param if account has been linked to all IdPs or linked=false if not. if linked { referrerStr = referrerStr + "&linked=true" ctx.ResponseData.Header().Set("Location", referrerStr) return ctx.TemporaryRedirect() } // TODO // ---- Autolinking enabled regardless of the "link" param. Remove when UI adds support of account linking link := true // ---- if !link && (ctx.Link == nil || !*ctx.Link) { referrerStr = referrerStr + "&linked=false" ctx.ResponseData.Header().Set("Location", referrerStr) return ctx.TemporaryRedirect() } referrerStr = referrerStr + "&linked=true" return keycloak.autoLinkProvidersDuringLogin(ctx, keycloakToken.AccessToken, referrerStr) } // First time access, redirect to oauth provider redirect := ctx.Redirect referrer := ctx.RequestData.Header.Get("Referer") if redirect == nil { if referrer == "" { jerrors, _ := jsonapi.ErrorToJSONAPIErrors(goa.ErrBadRequest("Referer Header and redirect param are both empty. At least one should be specified.")) return ctx.BadRequest(jerrors) } redirect = &referrer } // store referrer in a state reference to redirect later log.Info(ctx, map[string]interface{}{ "referrer": referrer, "redirect": redirect, }, "Got Request from!") stateID := uuid.NewV4() err := keycloak.saveReferrer(ctx, stateID, *redirect, validRedirectURL) if err != nil { log.Error(ctx, map[string]interface{}{ "state": stateID, "referrer": referrer, "redirect": redirect, "err": err, }, "unable to save the state") return err } keycloak.config.Endpoint.AuthURL = authEndpoint keycloak.config.Endpoint.TokenURL = tokenEndpoint keycloak.config.RedirectURL = rest.AbsoluteURL(ctx.RequestData, "/api/login/authorize") redirectURL := keycloak.config.AuthCodeURL(stateID.String(), oauth2.AccessTypeOnline) ctx.ResponseData.Header().Set("Location", redirectURL) return ctx.TemporaryRedirect() } func (keycloak *KeycloakOAuthProvider) autoLinkProvidersDuringLogin(ctx *app.AuthorizeLoginContext, token string, referrerURL string) error { // Link all available Identity Providers linkURL, err := url.Parse(rest.AbsoluteURL(ctx.RequestData, "/api/login/linksession")) if err != nil { return jsonapi.JSONErrorResponse(ctx, goa.ErrInternal(err.Error())) } claims, err := parseToken(token, keycloak.TokenManager.PublicKey()) if err != nil { log.Error(ctx, map[string]interface{}{ "err": err, }, "unable to parse token") return jsonapi.JSONErrorResponse(ctx, goa.ErrUnauthorized(err.Error())) } parameters := url.Values{} parameters.Add("redirect", referrerURL) parameters.Add("sessionState", fmt.Sprintf("%v", claims.SessionState)) parameters.Add("clientSession", fmt.Sprintf("%v", claims.ClientSession)) linkURL.RawQuery = parameters.Encode() ctx.ResponseData.Header().Set("Location", linkURL.String()) return ctx.TemporaryRedirect() } // checkAllFederatedIdentities returns false if there is at least one federated identity not linked to the account func (keycloak *KeycloakOAuthProvider) checkAllFederatedIdentities(ctx context.Context, token string, brokerEndpoint string) (bool, error) { for _, provider := range allProvidersToLink { linked, err := keycloak.checkFederatedIdentity(ctx, token, brokerEndpoint, provider) if err != nil { return false, err } if !linked { return false, nil } } return true, nil } // checkFederatedIdentity returns true if the account is already linked to the identity provider func (keycloak *KeycloakOAuthProvider) checkFederatedIdentity(ctx context.Context, token string, brokerEndpoint string, provider string) (bool, error) { req, err := http.NewRequest("GET", brokerEndpoint+"/"+provider+"/token", nil) if err != nil { log.Error(ctx, map[string]interface{}{ "err": err.Error(), }, "Unable to crete http request") return false, er.NewInternalError("unable to crete http request " + err.Error()) } req.Header.Add("Authorization", "Bearer "+token) res, err := http.DefaultClient.Do(req) if err != nil { log.Error(ctx, map[string]interface{}{ "provider": provider, "err": err.Error(), }, "Unable to obtain a federated identity token") return false, er.NewInternalError("Unable to obtain a federated identity token " + err.Error()) } return res.StatusCode == http.StatusOK, nil } // Link links identity provider(s) to the user's account using user's access token func (keycloak *KeycloakOAuthProvider) Link(ctx *app.LinkLoginContext, brokerEndpoint string, clientID string, validRedirectURL string) error { token := goajwt.ContextJWT(ctx) claims := token.Claims.(jwt.MapClaims) sessionState := claims["session_state"] clientSession := claims["client_session"] if sessionState == nil || clientSession == nil { return jsonapi.JSONErrorResponse(ctx, goa.ErrInternal("Session state or client session are missing in token")) } ss := sessionState.(*string) cs := clientSession.(*string) return keycloak.linkAccountToProviders(ctx, ctx.RequestData, ctx.ResponseData, ctx.Redirect, ctx.Provider, *ss, *cs, brokerEndpoint, clientID, validRedirectURL) } // LinkSession links identity provider(s) to the user's account using session state func (keycloak *KeycloakOAuthProvider) LinkSession(ctx *app.LinksessionLoginContext, brokerEndpoint string, clientID string, validRedirectURL string) error { if ctx.SessionState == nil || ctx.ClientSession == nil { return jsonapi.JSONErrorResponse(ctx, goa.ErrBadRequest("Authorization header or session state and client session params are required")) } return keycloak.linkAccountToProviders(ctx, ctx.RequestData, ctx.ResponseData, ctx.Redirect, ctx.Provider, *ctx.SessionState, *ctx.ClientSession, brokerEndpoint, clientID, validRedirectURL) } func (keycloak *KeycloakOAuthProvider) linkAccountToProviders(ctx linkInterface, req *goa.RequestData, res *goa.ResponseData, redirect *string, provider *string, sessionState string, clientSession, brokerEndpoint string, clientID string, validRedirectURL string) error { referrer := req.Header.Get("Referer") rdr := redirect if rdr == nil { rdr = &referrer } state := uuid.NewV4() err := keycloak.saveReferrer(ctx, state, *rdr, validRedirectURL) if err != nil { return err } if provider != nil { return keycloak.linkProvider(ctx, req, res, state.String(), sessionState, clientSession, *provider, nil, brokerEndpoint, clientID) } return keycloak.linkProvider(ctx, req, res, state.String(), sessionState, clientSession, allProvidersToLink[0], &allProvidersToLink[1], brokerEndpoint, clientID) } // LinkCallback redirects to original referrer when Identity Provider account are linked to the user account func (keycloak *KeycloakOAuthProvider) LinkCallback(ctx *app.LinkcallbackLoginContext, brokerEndpoint string, clientID string) error { state := ctx.State errorMessage := ctx.Params.Get("error") if state == nil { jsonapi.JSONErrorResponse(ctx, goa.ErrInternal("State is empty. "+errorMessage)) } if errorMessage != "" { return jsonapi.JSONErrorResponse(ctx, goa.ErrInternal(errorMessage)) } next := ctx.Next if next != nil { // Link the next provider sessionState := ctx.SessionState clientSession := ctx.ClientSession if sessionState == nil || clientSession == nil { log.Error(ctx, map[string]interface{}{ "state": state, }, "Session state or client session state is empty") jerrors, _ := jsonapi.ErrorToJSONAPIErrors(goa.ErrBadRequest("Session state or client session state is empty")) return ctx.Unauthorized(jerrors) } providerURL, err := getProviderURL(ctx.RequestData, *state, *sessionState, *clientSession, *next, nextProvider(*next), brokerEndpoint, clientID) if err != nil { return jsonapi.JSONErrorResponse(ctx, goa.ErrInternal(err.Error())) } ctx.ResponseData.Header().Set("Location", providerURL) return ctx.TemporaryRedirect() } // No more providers to link. Redirect back to the original referrer originalReferrer, err := keycloak.getReferrer(ctx, *state) if err != nil { log.Error(ctx, map[string]interface{}{ "state": state, "err": err, }, "uknown state") jerrors, _ := jsonapi.ErrorToJSONAPIErrors(goa.ErrUnauthorized("uknown state. " + err.Error())) return ctx.Unauthorized(jerrors) } ctx.ResponseData.Header().Set("Location", originalReferrer) return ctx.TemporaryRedirect() } func nextProvider(currentProvider string) *string { for i, provider := range allProvidersToLink { if provider == currentProvider { if i+1 < len(allProvidersToLink) { return &allProvidersToLink[i+1] } return nil } } return nil } func (keycloak *KeycloakOAuthProvider) linkProvider(ctx linkInterface, req *goa.RequestData, res *goa.ResponseData, state string, sessionState string, clientSession string, provider string, nextProvider *string, brokerEndpoint string, clientID string) error { providerURL, err := getProviderURL(req, state, sessionState, clientSession, provider, nextProvider, brokerEndpoint, clientID) if err != nil { return jsonapi.JSONErrorResponse(ctx, goa.ErrInternal(err.Error())) } res.Header().Set("Location", providerURL) return ctx.TemporaryRedirect() } func (keycloak *KeycloakOAuthProvider) saveReferrer(ctx linkInterface, state uuid.UUID, referrer string, validReferrerURL string) error { matched, err := regexp.MatchString(validReferrerURL, referrer) if err != nil { log.Error(ctx, map[string]interface{}{ "referrer": referrer, "valid-referrer-url": validReferrerURL, "err": err, }, "Can't match referrer and whitelist regex") jerrors, _ := jsonapi.ErrorToJSONAPIErrors(goa.ErrInternal(err.Error())) return ctx.InternalServerError(jerrors) } if !matched { log.Error(ctx, map[string]interface{}{ "referrer": referrer, "valid-referrer-url": validReferrerURL, }, "Referrer not valid") jerrors, _ := jsonapi.ErrorToJSONAPIErrors(goa.ErrBadRequest("Not valid redirect URL")) return ctx.BadRequest(jerrors) } // TODO The state reference table will be collecting dead states left from some failed login attempts. // We need to clean up the old states from time to time. ref := auth.OauthStateReference{ ID: state, Referrer: referrer, } err = application.Transactional(keycloak.db, func(appl application.Application) error { _, err := appl.OauthStates().Create(ctx, &ref) return err }) if err != nil { log.Error(ctx, map[string]interface{}{ "state": state, "referrer": referrer, "err": err, }, "unable to create oauth state reference") jerrors, _ := jsonapi.ErrorToJSONAPIErrors(goa.ErrInternal("Unable to create oauth state reference " + err.Error())) return ctx.InternalServerError(jerrors) } return nil } func (keycloak *KeycloakOAuthProvider) getReferrer(ctx context.Context, state string) (string, error) { var referrer string stateID, err := uuid.FromString(state) if err != nil { log.Error(ctx, map[string]interface{}{ "state": state, "err": err, }, "unable to convert oauth state to uuid") return "", errors.New("Unable to convert oauth state to uuid. " + err.Error()) } err = application.Transactional(keycloak.db, func(appl application.Application) error { ref, err := appl.OauthStates().Load(ctx, stateID) if err != nil { return err } referrer = ref.Referrer err = appl.OauthStates().Delete(ctx, stateID) return err }) if err != nil { log.Error(ctx, map[string]interface{}{ "state": state, "err": err, }, "unable to delete oauth state reference") return "", errors.New("Unable to delete oauth state reference " + err.Error()) } return referrer, nil } func getProviderURL(req *goa.RequestData, state string, sessionState string, clientSession string, provider string, nextProvider *string, brokerEndpoint string, clientID string) (string, error) { var nextParam string if nextProvider != nil { nextParam = "&next=" + *nextProvider } callbackURL := rest.AbsoluteURL(req, "/api/login/linkcallback?provider="+provider+nextParam+"&sessionState="+sessionState+"&clientSession="+clientSession+"&state="+state) nonce := uuid.NewV4().String() s := nonce + sessionState + clientSession + provider h := sha256.New() h.Write([]byte(s)) hash := base64.StdEncoding.EncodeToString(h.Sum(nil)) linkingURL, err := url.Parse(brokerEndpoint + "/" + provider + "/link") if err != nil { return "", err } parameters := url.Values{} parameters.Add("client_id", clientID) parameters.Add("redirect_uri", callbackURL) parameters.Add("nonce", nonce) parameters.Add("hash", hash) linkingURL.RawQuery = parameters.Encode() return linkingURL.String(), nil } func encodeToken(referrer *url.URL, outhToken *oauth2.Token) error { str := outhToken.Extra("expires_in") expiresIn, err := strconv.Atoi(fmt.Sprintf("%v", str)) if err != nil { return errs.WithStack(errors.New("cant convert expires_in to integer " + err.Error())) } str = outhToken.Extra("refresh_expires_in") refreshExpiresIn, err := strconv.Atoi(fmt.Sprintf("%v", str)) if err != nil { return errs.WithStack(errors.New("cant convert refresh_expires_in to integer " + err.Error())) } tokenData := &app.TokenData{ AccessToken: &outhToken.AccessToken, RefreshToken: &outhToken.RefreshToken, TokenType: &outhToken.TokenType, ExpiresIn: &expiresIn, RefreshExpiresIn: &refreshExpiresIn, } b, err := json.Marshal(tokenData) if err != nil { return errs.WithStack(errors.New("cant marshal token data struct " + err.Error())) } parameters := url.Values{} parameters.Add("token", outhToken.AccessToken) // Temporary keep the old "token" param. We will drop this param as soon as UI adopt the new json param. parameters.Add("token_json", string(b)) referrer.RawQuery = parameters.Encode() return nil } // CreateOrUpdateKeycloakUser creates a user and a keyclaok identity. If the user and identity already exist then update them. func (keycloak *KeycloakOAuthProvider) CreateOrUpdateKeycloakUser(accessToken string, ctx context.Context) (*account.Identity, *account.User, error) { var identity *account.Identity var user *account.User claims, err := parseToken(accessToken, keycloak.TokenManager.PublicKey()) if err != nil { log.Error(ctx, map[string]interface{}{ "token": accessToken, "err": err, }, "unable to parse the token") return nil, nil, errors.New("unable to parse the token " + err.Error()) } if err := checkClaims(claims); err != nil { log.Error(ctx, map[string]interface{}{ "token": accessToken, "err": err, }, "invalid keycloak token claims") return nil, nil, errors.New("invalid keycloak token claims " + err.Error()) } keycloakIdentityID, _ := uuid.FromString(claims.Subject) identities, err := keycloak.Identities.Query(account.IdentityFilterByID(keycloakIdentityID), account.IdentityWithUser()) if err != nil { log.Error(ctx, map[string]interface{}{ "keycloak_identity_id": keycloakIdentityID, "err": err, }, "unable to query for an identity by ID") return nil, nil, errors.New("Error during querying for an identity by ID " + err.Error()) } // TODO REMOVE THIS WORKAROUND // ----------------- BEGIN WORKAROUND ----------------- if len(identities) == 0 { // This is not what actaully should happen. // This is a workaround for Keyclaok and DB unsynchronization. // The old identity will be removed. The new one with proper ID will be created. // All links to the old identities (in Work Items for example) will still point to the deleted identity. // No Idenity with the keycloak user ID is found, try to search by the username identities, err = keycloak.Identities.Query(account.IdentityFilterByUsername(claims.Username), account.IdentityWithUser()) if err != nil { log.Error(ctx, map[string]interface{}{ "keycloakIdentityUsername": claims.Username, "err": err, }, "unable to query for an identity by username") return nil, nil, errors.New("Error during querying for an identity by username " + err.Error()) } if len(identities) != 0 { idn := identities[0] if idn.ProviderType == account.KeycloakIDP { log.Warn(ctx, map[string]interface{}{ "keycloak_identity_id": keycloakIdentityID, "core_identity_id": idn.ID, "keycloak_identity_username": claims.Username, }, "the identity ID fetched from Keycloak and the identity ID from the core DB for the same username don't match. The identity will be re-created.") err = application.Transactional(keycloak.db, func(appl application.Application) error { user = &idn.User identity = &account.Identity{ ID: keycloakIdentityID, Username: claims.Username, ProviderType: account.KeycloakIDP, UserID: account.NullUUID{UUID: user.ID, Valid: true}, User: *user} err := appl.Identities().Delete(ctx, idn.ID) if err != nil { return err } err = appl.Identities().Create(ctx, identity) return err }) if err != nil { log.Error(ctx, map[string]interface{}{ "keycloak_identity_id": keycloakIdentityID, "core_identity_id": idn.ID, "keycloak_identity_username": claims.Username, "err": err, }, "unable to update identity") return nil, nil, errors.New("Cant' create user/identity " + err.Error()) } identities[0] = identity } else { // The found identity is not a KC identity, ignore it // TODO we also should make sure that the email used by this Identity is not the same. // It may happen if the found identity was imported from a remote issue tracker and has the same email identities = []*account.Identity{} } } } // ----------------- END WORKAROUND ----------------- if len(identities) == 0 { // No Idenity found, create a new Identity and User user = new(account.User) fillUser(claims, user) err = application.Transactional(keycloak.db, func(appl application.Application) error { err := appl.Users().Create(ctx, user) if err != nil { return err } identity = &account.Identity{ ID: keycloakIdentityID, Username: claims.Username, ProviderType: account.KeycloakIDP, UserID: account.NullUUID{UUID: user.ID, Valid: true}, User: *user} err = appl.Identities().Create(ctx, identity) return err }) if err != nil { log.Error(ctx, map[string]interface{}{ "keycloak_identity_id": keycloakIdentityID, "username": claims.Username, "err": err, }, "unable to create user/identity") return nil, nil, errors.New("Cant' create user/identity " + err.Error()) } } else { user = &identities[0].User if user.ID == uuid.Nil { log.Error(ctx, map[string]interface{}{ "identity_id": keycloakIdentityID, }, "Found Keycloak identity is not linked to any User") return nil, nil, errors.New("found Keycloak identity is not linked to any User") } // let's update the existing user with the fullname, email and avatar from Keycloak, // in case the user changed them since the last time he/she logged in fillUser(claims, user) err = keycloak.Users.Save(ctx, user) if err != nil { log.Error(ctx, map[string]interface{}{ "user_id": user.ID, "err": err, }, "unable to update user") return nil, nil, errors.New("Cant' update user " + err.Error()) } } return identity, user, nil } func redirectWithError(ctx *app.AuthorizeLoginContext, knownReferrer string, errorString string) error { ctx.ResponseData.Header().Set("Location", knownReferrer+"?error="+errorString) return ctx.TemporaryRedirect() } func parseToken(tokenString string, publicKey *rsa.PublicKey) (*keycloakTokenClaims, error) { token, err := jwt.ParseWithClaims(tokenString, &keycloakTokenClaims{}, func(t *jwt.Token) (interface{}, error) { return publicKey, nil }) if err != nil { return nil, err } claims := token.Claims.(*keycloakTokenClaims) if token.Valid { return claims, nil } return nil, errs.WithStack(errors.New("token is not valid")) } func generateGravatarURL(email string) (string, error) { if email == "" { return "", nil } grURL, err := url.Parse("https://www.gravatar.com/avatar/") if err != nil { return "", errs.WithStack(err) } hash := md5.New() hash.Write([]byte(email)) grURL.Path += fmt.Sprintf("%v", hex.EncodeToString(hash.Sum(nil))) + ".jpg" // We can use our own default image if there is no gravatar available for this email // defaultImage := "someDefaultImageURL.jpg" // parameters := url.Values{} // parameters.Add("d", fmt.Sprintf("%v", defaultImage)) // grURL.RawQuery = parameters.Encode() urlStr := grURL.String() return urlStr, nil } func checkClaims(claims *keycloakTokenClaims) error { if claims.Subject == "" { return errors.New("subject claim not found in token") } _, err := uuid.FromString(claims.Subject) if err != nil { return errors.New("subject claim from token is not UUID " + err.Error()) } if claims.Username == "" { return errors.New("username claim not found in token") } if claims.Email == "" { return errors.New("email claim not found in token") } return nil } func fillUser(claims *keycloakTokenClaims, user *account.User) error { user.FullName = claims.Name user.Email = claims.Email image, err := generateGravatarURL(claims.Email) if err != nil { log.Warn(nil, map[string]interface{}{ "user_full_name": user.FullName, "err": err, }, "error when generating gravatar") return errors.New("Error when generating gravatar " + err.Error()) } user.ImageURL = image return nil } // ContextIdentity returns the identity's ID found in given context // Uses tokenManager.Locate to fetch the identity of currently logged in user func ContextIdentity(ctx context.Context) (*uuid.UUID, error) { tm := tokencontext.ReadTokenManagerFromContext(ctx) if tm == nil { log.Error(ctx, map[string]interface{}{ "token": tm, }, "missing token manager") return nil, errs.New("Missing token manager") } // As mentioned in token.go, we can now safely convert tm to a token.Manager manager := tm.(token.Manager) uuid, err := manager.Locate(ctx) if err != nil { // TODO : need a way to define user as Guest log.Error(ctx, map[string]interface{}{ "uuid": uuid, "token_manager": manager, "err": err, }, "identity belongs to a Guest User") return nil, errs.WithStack(err) } return &uuid, nil } // InjectTokenManager is a middleware responsible for setting up tokenManager in the context for every request. func InjectTokenManager(tokenManager token.Manager) goa.Middleware { return func(h goa.Handler) goa.Handler { return func(ctx context.Context, rw http.ResponseWriter, req *http.Request) error { ctxWithTM := tokencontext.ContextWithTokenManager(ctx, tokenManager) return h(ctxWithTM, rw, req) } } } Disable hardcoded auto-linking during login. Relay on ?link param instead (#1115) package login import ( "crypto/md5" "crypto/rsa" "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" "net/http" "net/url" "regexp" "strconv" errs "github.com/pkg/errors" "github.com/almighty/almighty-core/account" "github.com/almighty/almighty-core/app" "github.com/almighty/almighty-core/application" "github.com/almighty/almighty-core/auth" er "github.com/almighty/almighty-core/errors" "github.com/almighty/almighty-core/jsonapi" "github.com/almighty/almighty-core/log" tokencontext "github.com/almighty/almighty-core/login/token_context" "github.com/almighty/almighty-core/rest" "github.com/almighty/almighty-core/token" jwt "github.com/dgrijalva/jwt-go" "github.com/goadesign/goa" goajwt "github.com/goadesign/goa/middleware/security/jwt" uuid "github.com/satori/go.uuid" "golang.org/x/net/context" "golang.org/x/oauth2" ) // NewKeycloakOAuthProvider creates a new login.Service capable of using keycloak for authorization func NewKeycloakOAuthProvider(config *oauth2.Config, identities account.IdentityRepository, users account.UserRepository, tokenManager token.Manager, db application.DB) *KeycloakOAuthProvider { return &KeycloakOAuthProvider{ config: config, Identities: identities, Users: users, TokenManager: tokenManager, db: db, } } // KeycloakOAuthProvider represents a keyclaok IDP type KeycloakOAuthProvider struct { config *oauth2.Config Identities account.IdentityRepository Users account.UserRepository TokenManager token.Manager db application.DB } // KeycloakOAuthService represents keycloak OAuth service interface type KeycloakOAuthService interface { Perform(ctx *app.AuthorizeLoginContext, authEndpoint string, tokenEndpoint string, brokerEndpoint string, validRedirectURL string) error CreateOrUpdateKeycloakUser(accessToken string, ctx context.Context) (*account.Identity, *account.User, error) Link(ctx *app.LinkLoginContext, brokerEndpoint string, clientID string, validRedirectURL string) error LinkSession(ctx *app.LinksessionLoginContext, brokerEndpoint string, clientID string, validRedirectURL string) error LinkCallback(ctx *app.LinkcallbackLoginContext, brokerEndpoint string, clientID string) error } type linkInterface interface { context.Context jsonapi.InternalServerError TemporaryRedirect() error BadRequest(r *app.JSONAPIErrors) error } // keycloakTokenClaims represents standard Keycloak token claims type keycloakTokenClaims struct { Name string `json:"name"` Username string `json:"preferred_username"` GivenName string `json:"given_name"` FamilyName string `json:"family_name"` Email string `json:"email"` SessionState string `json:"session_state"` ClientSession string `json:"client_session"` jwt.StandardClaims } var allProvidersToLink = []string{"github", "openshift-v3"} const ( initiateLinkingParam = "initlinking" ) // Perform performs authenticatin func (keycloak *KeycloakOAuthProvider) Perform(ctx *app.AuthorizeLoginContext, authEndpoint string, tokenEndpoint string, brokerEndpoint string, validRedirectURL string) error { state := ctx.Params.Get("state") code := ctx.Params.Get("code") if code != "" { // After redirect from oauth provider // validate known state knownReferrer, err := keycloak.getReferrer(ctx, state) if err != nil { log.Error(ctx, map[string]interface{}{ "state": state, "err": err, }, "uknown state") jerrors, _ := jsonapi.ErrorToJSONAPIErrors(goa.ErrUnauthorized("uknown state. " + err.Error())) return ctx.Unauthorized(jerrors) } keycloakToken, err := keycloak.config.Exchange(ctx, code) if err != nil { log.Error(ctx, map[string]interface{}{ "code": code, "err": err, }, "keycloak exchange operation failed") return redirectWithError(ctx, knownReferrer, err.Error()) } _, _, err = keycloak.CreateOrUpdateKeycloakUser(keycloakToken.AccessToken, ctx) if err != nil { log.Error(ctx, map[string]interface{}{ "token": keycloakToken.AccessToken, "err": err, }, "failed to create a user and KeyCloak identity using the access token") return redirectWithError(ctx, knownReferrer, err.Error()) } // redirect back to original referrel referrerURL, err := url.Parse(knownReferrer) if err != nil { return redirectWithError(ctx, knownReferrer, err.Error()) } err = encodeToken(referrerURL, keycloakToken) if err != nil { return redirectWithError(ctx, knownReferrer, err.Error()) } referrerStr := referrerURL.String() // Check if federated identities are not likned yet // TODO we probably won't want to check it for the existing users. // But we need it for now because old users still may not be linked. linked, err := keycloak.checkAllFederatedIdentities(ctx, keycloakToken.AccessToken, brokerEndpoint) if err != nil { return jsonapi.JSONErrorResponse(ctx, goa.ErrInternal(err.Error())) } // Return linked=true param if account has been linked to all IdPs or linked=false if not. if linked { referrerStr = referrerStr + "&linked=true" ctx.ResponseData.Header().Set("Location", referrerStr) return ctx.TemporaryRedirect() } if s, err := strconv.ParseBool(referrerURL.Query().Get(initiateLinkingParam)); err != nil || !s { referrerStr = referrerStr + "&linked=false" ctx.ResponseData.Header().Set("Location", referrerStr) return ctx.TemporaryRedirect() } referrerStr = referrerStr + "&linked=true" return keycloak.autoLinkProvidersDuringLogin(ctx, keycloakToken.AccessToken, referrerStr) } // First time access, redirect to oauth provider redirect := ctx.Redirect referrer := ctx.RequestData.Header.Get("Referer") if redirect == nil { if referrer == "" { jerrors, _ := jsonapi.ErrorToJSONAPIErrors(goa.ErrBadRequest("Referer Header and redirect param are both empty. At least one should be specified.")) return ctx.BadRequest(jerrors) } redirect = &referrer } // store referrer in a state reference to redirect later log.Info(ctx, map[string]interface{}{ "referrer": referrer, "redirect": redirect, }, "Got Request from!") stateID := uuid.NewV4() if ctx.Link != nil && *ctx.Link { // We need to save the "link" param so we don't lose it when redirect to sso for auth and back to core. // TODO find a better place to save this param between redirects. linkURL, err := url.Parse(*redirect) if err != nil { return jsonapi.JSONErrorResponse(ctx, goa.ErrBadRequest(err.Error())) } parameters := linkURL.Query() parameters.Add(initiateLinkingParam, strconv.FormatBool(*ctx.Link)) linkURL.RawQuery = parameters.Encode() s := linkURL.String() redirect = &s } err := keycloak.saveReferrer(ctx, stateID, *redirect, validRedirectURL) if err != nil { log.Error(ctx, map[string]interface{}{ "state": stateID, "referrer": referrer, "redirect": redirect, "err": err, }, "unable to save the state") return err } keycloak.config.Endpoint.AuthURL = authEndpoint keycloak.config.Endpoint.TokenURL = tokenEndpoint keycloak.config.RedirectURL = rest.AbsoluteURL(ctx.RequestData, "/api/login/authorize") redirectURL := keycloak.config.AuthCodeURL(stateID.String(), oauth2.AccessTypeOnline) ctx.ResponseData.Header().Set("Location", redirectURL) return ctx.TemporaryRedirect() } func (keycloak *KeycloakOAuthProvider) autoLinkProvidersDuringLogin(ctx *app.AuthorizeLoginContext, token string, referrerURL string) error { // Link all available Identity Providers linkURL, err := url.Parse(rest.AbsoluteURL(ctx.RequestData, "/api/login/linksession")) if err != nil { return jsonapi.JSONErrorResponse(ctx, goa.ErrInternal(err.Error())) } claims, err := parseToken(token, keycloak.TokenManager.PublicKey()) if err != nil { log.Error(ctx, map[string]interface{}{ "err": err, }, "unable to parse token") return jsonapi.JSONErrorResponse(ctx, goa.ErrUnauthorized(err.Error())) } parameters := url.Values{} parameters.Add("redirect", referrerURL) parameters.Add("sessionState", fmt.Sprintf("%v", claims.SessionState)) parameters.Add("clientSession", fmt.Sprintf("%v", claims.ClientSession)) linkURL.RawQuery = parameters.Encode() ctx.ResponseData.Header().Set("Location", linkURL.String()) return ctx.TemporaryRedirect() } // checkAllFederatedIdentities returns false if there is at least one federated identity not linked to the account func (keycloak *KeycloakOAuthProvider) checkAllFederatedIdentities(ctx context.Context, token string, brokerEndpoint string) (bool, error) { for _, provider := range allProvidersToLink { linked, err := keycloak.checkFederatedIdentity(ctx, token, brokerEndpoint, provider) if err != nil { return false, err } if !linked { return false, nil } } return true, nil } // checkFederatedIdentity returns true if the account is already linked to the identity provider func (keycloak *KeycloakOAuthProvider) checkFederatedIdentity(ctx context.Context, token string, brokerEndpoint string, provider string) (bool, error) { req, err := http.NewRequest("GET", brokerEndpoint+"/"+provider+"/token", nil) if err != nil { log.Error(ctx, map[string]interface{}{ "err": err.Error(), }, "Unable to crete http request") return false, er.NewInternalError("unable to crete http request " + err.Error()) } req.Header.Add("Authorization", "Bearer "+token) res, err := http.DefaultClient.Do(req) if err != nil { log.Error(ctx, map[string]interface{}{ "provider": provider, "err": err.Error(), }, "Unable to obtain a federated identity token") return false, er.NewInternalError("Unable to obtain a federated identity token " + err.Error()) } return res.StatusCode == http.StatusOK, nil } // Link links identity provider(s) to the user's account using user's access token func (keycloak *KeycloakOAuthProvider) Link(ctx *app.LinkLoginContext, brokerEndpoint string, clientID string, validRedirectURL string) error { token := goajwt.ContextJWT(ctx) claims := token.Claims.(jwt.MapClaims) sessionState := claims["session_state"] clientSession := claims["client_session"] if sessionState == nil || clientSession == nil { return jsonapi.JSONErrorResponse(ctx, goa.ErrInternal("Session state or client session are missing in token")) } ss := sessionState.(*string) cs := clientSession.(*string) return keycloak.linkAccountToProviders(ctx, ctx.RequestData, ctx.ResponseData, ctx.Redirect, ctx.Provider, *ss, *cs, brokerEndpoint, clientID, validRedirectURL) } // LinkSession links identity provider(s) to the user's account using session state func (keycloak *KeycloakOAuthProvider) LinkSession(ctx *app.LinksessionLoginContext, brokerEndpoint string, clientID string, validRedirectURL string) error { if ctx.SessionState == nil || ctx.ClientSession == nil { return jsonapi.JSONErrorResponse(ctx, goa.ErrBadRequest("Authorization header or session state and client session params are required")) } return keycloak.linkAccountToProviders(ctx, ctx.RequestData, ctx.ResponseData, ctx.Redirect, ctx.Provider, *ctx.SessionState, *ctx.ClientSession, brokerEndpoint, clientID, validRedirectURL) } func (keycloak *KeycloakOAuthProvider) linkAccountToProviders(ctx linkInterface, req *goa.RequestData, res *goa.ResponseData, redirect *string, provider *string, sessionState string, clientSession, brokerEndpoint string, clientID string, validRedirectURL string) error { referrer := req.Header.Get("Referer") rdr := redirect if rdr == nil { rdr = &referrer } state := uuid.NewV4() err := keycloak.saveReferrer(ctx, state, *rdr, validRedirectURL) if err != nil { return err } if provider != nil { return keycloak.linkProvider(ctx, req, res, state.String(), sessionState, clientSession, *provider, nil, brokerEndpoint, clientID) } return keycloak.linkProvider(ctx, req, res, state.String(), sessionState, clientSession, allProvidersToLink[0], &allProvidersToLink[1], brokerEndpoint, clientID) } // LinkCallback redirects to original referrer when Identity Provider account are linked to the user account func (keycloak *KeycloakOAuthProvider) LinkCallback(ctx *app.LinkcallbackLoginContext, brokerEndpoint string, clientID string) error { state := ctx.State errorMessage := ctx.Params.Get("error") if state == nil { jsonapi.JSONErrorResponse(ctx, goa.ErrInternal("State is empty. "+errorMessage)) } if errorMessage != "" { return jsonapi.JSONErrorResponse(ctx, goa.ErrInternal(errorMessage)) } next := ctx.Next if next != nil { // Link the next provider sessionState := ctx.SessionState clientSession := ctx.ClientSession if sessionState == nil || clientSession == nil { log.Error(ctx, map[string]interface{}{ "state": state, }, "Session state or client session state is empty") jerrors, _ := jsonapi.ErrorToJSONAPIErrors(goa.ErrBadRequest("Session state or client session state is empty")) return ctx.Unauthorized(jerrors) } providerURL, err := getProviderURL(ctx.RequestData, *state, *sessionState, *clientSession, *next, nextProvider(*next), brokerEndpoint, clientID) if err != nil { return jsonapi.JSONErrorResponse(ctx, goa.ErrInternal(err.Error())) } ctx.ResponseData.Header().Set("Location", providerURL) return ctx.TemporaryRedirect() } // No more providers to link. Redirect back to the original referrer originalReferrer, err := keycloak.getReferrer(ctx, *state) if err != nil { log.Error(ctx, map[string]interface{}{ "state": state, "err": err, }, "uknown state") jerrors, _ := jsonapi.ErrorToJSONAPIErrors(goa.ErrUnauthorized("uknown state. " + err.Error())) return ctx.Unauthorized(jerrors) } ctx.ResponseData.Header().Set("Location", originalReferrer) return ctx.TemporaryRedirect() } func nextProvider(currentProvider string) *string { for i, provider := range allProvidersToLink { if provider == currentProvider { if i+1 < len(allProvidersToLink) { return &allProvidersToLink[i+1] } return nil } } return nil } func (keycloak *KeycloakOAuthProvider) linkProvider(ctx linkInterface, req *goa.RequestData, res *goa.ResponseData, state string, sessionState string, clientSession string, provider string, nextProvider *string, brokerEndpoint string, clientID string) error { providerURL, err := getProviderURL(req, state, sessionState, clientSession, provider, nextProvider, brokerEndpoint, clientID) if err != nil { return jsonapi.JSONErrorResponse(ctx, goa.ErrInternal(err.Error())) } res.Header().Set("Location", providerURL) return ctx.TemporaryRedirect() } func (keycloak *KeycloakOAuthProvider) saveReferrer(ctx linkInterface, state uuid.UUID, referrer string, validReferrerURL string) error { matched, err := regexp.MatchString(validReferrerURL, referrer) if err != nil { log.Error(ctx, map[string]interface{}{ "referrer": referrer, "valid-referrer-url": validReferrerURL, "err": err, }, "Can't match referrer and whitelist regex") jerrors, _ := jsonapi.ErrorToJSONAPIErrors(goa.ErrInternal(err.Error())) return ctx.InternalServerError(jerrors) } if !matched { log.Error(ctx, map[string]interface{}{ "referrer": referrer, "valid-referrer-url": validReferrerURL, }, "Referrer not valid") jerrors, _ := jsonapi.ErrorToJSONAPIErrors(goa.ErrBadRequest("Not valid redirect URL")) return ctx.BadRequest(jerrors) } // TODO The state reference table will be collecting dead states left from some failed login attempts. // We need to clean up the old states from time to time. ref := auth.OauthStateReference{ ID: state, Referrer: referrer, } err = application.Transactional(keycloak.db, func(appl application.Application) error { _, err := appl.OauthStates().Create(ctx, &ref) return err }) if err != nil { log.Error(ctx, map[string]interface{}{ "state": state, "referrer": referrer, "err": err, }, "unable to create oauth state reference") jerrors, _ := jsonapi.ErrorToJSONAPIErrors(goa.ErrInternal("Unable to create oauth state reference " + err.Error())) return ctx.InternalServerError(jerrors) } return nil } func (keycloak *KeycloakOAuthProvider) getReferrer(ctx context.Context, state string) (string, error) { var referrer string stateID, err := uuid.FromString(state) if err != nil { log.Error(ctx, map[string]interface{}{ "state": state, "err": err, }, "unable to convert oauth state to uuid") return "", errors.New("Unable to convert oauth state to uuid. " + err.Error()) } err = application.Transactional(keycloak.db, func(appl application.Application) error { ref, err := appl.OauthStates().Load(ctx, stateID) if err != nil { return err } referrer = ref.Referrer err = appl.OauthStates().Delete(ctx, stateID) return err }) if err != nil { log.Error(ctx, map[string]interface{}{ "state": state, "err": err, }, "unable to delete oauth state reference") return "", errors.New("Unable to delete oauth state reference " + err.Error()) } return referrer, nil } func getProviderURL(req *goa.RequestData, state string, sessionState string, clientSession string, provider string, nextProvider *string, brokerEndpoint string, clientID string) (string, error) { var nextParam string if nextProvider != nil { nextParam = "&next=" + *nextProvider } callbackURL := rest.AbsoluteURL(req, "/api/login/linkcallback?provider="+provider+nextParam+"&sessionState="+sessionState+"&clientSession="+clientSession+"&state="+state) nonce := uuid.NewV4().String() s := nonce + sessionState + clientSession + provider h := sha256.New() h.Write([]byte(s)) hash := base64.StdEncoding.EncodeToString(h.Sum(nil)) linkingURL, err := url.Parse(brokerEndpoint + "/" + provider + "/link") if err != nil { return "", err } parameters := url.Values{} parameters.Add("client_id", clientID) parameters.Add("redirect_uri", callbackURL) parameters.Add("nonce", nonce) parameters.Add("hash", hash) linkingURL.RawQuery = parameters.Encode() return linkingURL.String(), nil } func encodeToken(referrer *url.URL, outhToken *oauth2.Token) error { str := outhToken.Extra("expires_in") expiresIn, err := strconv.Atoi(fmt.Sprintf("%v", str)) if err != nil { return errs.WithStack(errors.New("cant convert expires_in to integer " + err.Error())) } str = outhToken.Extra("refresh_expires_in") refreshExpiresIn, err := strconv.Atoi(fmt.Sprintf("%v", str)) if err != nil { return errs.WithStack(errors.New("cant convert refresh_expires_in to integer " + err.Error())) } tokenData := &app.TokenData{ AccessToken: &outhToken.AccessToken, RefreshToken: &outhToken.RefreshToken, TokenType: &outhToken.TokenType, ExpiresIn: &expiresIn, RefreshExpiresIn: &refreshExpiresIn, } b, err := json.Marshal(tokenData) if err != nil { return errs.WithStack(errors.New("cant marshal token data struct " + err.Error())) } parameters := referrer.Query() parameters.Add("token", outhToken.AccessToken) // Temporary keep the old "token" param. We will drop this param as soon as UI adopt the new json param. parameters.Add("token_json", string(b)) referrer.RawQuery = parameters.Encode() return nil } // CreateOrUpdateKeycloakUser creates a user and a keyclaok identity. If the user and identity already exist then update them. func (keycloak *KeycloakOAuthProvider) CreateOrUpdateKeycloakUser(accessToken string, ctx context.Context) (*account.Identity, *account.User, error) { var identity *account.Identity var user *account.User claims, err := parseToken(accessToken, keycloak.TokenManager.PublicKey()) if err != nil { log.Error(ctx, map[string]interface{}{ "token": accessToken, "err": err, }, "unable to parse the token") return nil, nil, errors.New("unable to parse the token " + err.Error()) } if err := checkClaims(claims); err != nil { log.Error(ctx, map[string]interface{}{ "token": accessToken, "err": err, }, "invalid keycloak token claims") return nil, nil, errors.New("invalid keycloak token claims " + err.Error()) } keycloakIdentityID, _ := uuid.FromString(claims.Subject) identities, err := keycloak.Identities.Query(account.IdentityFilterByID(keycloakIdentityID), account.IdentityWithUser()) if err != nil { log.Error(ctx, map[string]interface{}{ "keycloak_identity_id": keycloakIdentityID, "err": err, }, "unable to query for an identity by ID") return nil, nil, errors.New("Error during querying for an identity by ID " + err.Error()) } // TODO REMOVE THIS WORKAROUND // ----------------- BEGIN WORKAROUND ----------------- if len(identities) == 0 { // This is not what actaully should happen. // This is a workaround for Keyclaok and DB unsynchronization. // The old identity will be removed. The new one with proper ID will be created. // All links to the old identities (in Work Items for example) will still point to the deleted identity. // No Idenity with the keycloak user ID is found, try to search by the username identities, err = keycloak.Identities.Query(account.IdentityFilterByUsername(claims.Username), account.IdentityWithUser()) if err != nil { log.Error(ctx, map[string]interface{}{ "keycloakIdentityUsername": claims.Username, "err": err, }, "unable to query for an identity by username") return nil, nil, errors.New("Error during querying for an identity by username " + err.Error()) } if len(identities) != 0 { idn := identities[0] if idn.ProviderType == account.KeycloakIDP { log.Warn(ctx, map[string]interface{}{ "keycloak_identity_id": keycloakIdentityID, "core_identity_id": idn.ID, "keycloak_identity_username": claims.Username, }, "the identity ID fetched from Keycloak and the identity ID from the core DB for the same username don't match. The identity will be re-created.") err = application.Transactional(keycloak.db, func(appl application.Application) error { user = &idn.User identity = &account.Identity{ ID: keycloakIdentityID, Username: claims.Username, ProviderType: account.KeycloakIDP, UserID: account.NullUUID{UUID: user.ID, Valid: true}, User: *user} err := appl.Identities().Delete(ctx, idn.ID) if err != nil { return err } err = appl.Identities().Create(ctx, identity) return err }) if err != nil { log.Error(ctx, map[string]interface{}{ "keycloak_identity_id": keycloakIdentityID, "core_identity_id": idn.ID, "keycloak_identity_username": claims.Username, "err": err, }, "unable to update identity") return nil, nil, errors.New("Cant' create user/identity " + err.Error()) } identities[0] = identity } else { // The found identity is not a KC identity, ignore it // TODO we also should make sure that the email used by this Identity is not the same. // It may happen if the found identity was imported from a remote issue tracker and has the same email identities = []*account.Identity{} } } } // ----------------- END WORKAROUND ----------------- if len(identities) == 0 { // No Idenity found, create a new Identity and User user = new(account.User) fillUser(claims, user) err = application.Transactional(keycloak.db, func(appl application.Application) error { err := appl.Users().Create(ctx, user) if err != nil { return err } identity = &account.Identity{ ID: keycloakIdentityID, Username: claims.Username, ProviderType: account.KeycloakIDP, UserID: account.NullUUID{UUID: user.ID, Valid: true}, User: *user} err = appl.Identities().Create(ctx, identity) return err }) if err != nil { log.Error(ctx, map[string]interface{}{ "keycloak_identity_id": keycloakIdentityID, "username": claims.Username, "err": err, }, "unable to create user/identity") return nil, nil, errors.New("Cant' create user/identity " + err.Error()) } } else { user = &identities[0].User if user.ID == uuid.Nil { log.Error(ctx, map[string]interface{}{ "identity_id": keycloakIdentityID, }, "Found Keycloak identity is not linked to any User") return nil, nil, errors.New("found Keycloak identity is not linked to any User") } // let's update the existing user with the fullname, email and avatar from Keycloak, // in case the user changed them since the last time he/she logged in fillUser(claims, user) err = keycloak.Users.Save(ctx, user) if err != nil { log.Error(ctx, map[string]interface{}{ "user_id": user.ID, "err": err, }, "unable to update user") return nil, nil, errors.New("Cant' update user " + err.Error()) } } return identity, user, nil } func redirectWithError(ctx *app.AuthorizeLoginContext, knownReferrer string, errorString string) error { ctx.ResponseData.Header().Set("Location", knownReferrer+"?error="+errorString) return ctx.TemporaryRedirect() } func parseToken(tokenString string, publicKey *rsa.PublicKey) (*keycloakTokenClaims, error) { token, err := jwt.ParseWithClaims(tokenString, &keycloakTokenClaims{}, func(t *jwt.Token) (interface{}, error) { return publicKey, nil }) if err != nil { return nil, err } claims := token.Claims.(*keycloakTokenClaims) if token.Valid { return claims, nil } return nil, errs.WithStack(errors.New("token is not valid")) } func generateGravatarURL(email string) (string, error) { if email == "" { return "", nil } grURL, err := url.Parse("https://www.gravatar.com/avatar/") if err != nil { return "", errs.WithStack(err) } hash := md5.New() hash.Write([]byte(email)) grURL.Path += fmt.Sprintf("%v", hex.EncodeToString(hash.Sum(nil))) + ".jpg" // We can use our own default image if there is no gravatar available for this email // defaultImage := "someDefaultImageURL.jpg" // parameters := url.Values{} // parameters.Add("d", fmt.Sprintf("%v", defaultImage)) // grURL.RawQuery = parameters.Encode() urlStr := grURL.String() return urlStr, nil } func checkClaims(claims *keycloakTokenClaims) error { if claims.Subject == "" { return errors.New("subject claim not found in token") } _, err := uuid.FromString(claims.Subject) if err != nil { return errors.New("subject claim from token is not UUID " + err.Error()) } if claims.Username == "" { return errors.New("username claim not found in token") } if claims.Email == "" { return errors.New("email claim not found in token") } return nil } func fillUser(claims *keycloakTokenClaims, user *account.User) error { user.FullName = claims.Name user.Email = claims.Email image, err := generateGravatarURL(claims.Email) if err != nil { log.Warn(nil, map[string]interface{}{ "user_full_name": user.FullName, "err": err, }, "error when generating gravatar") return errors.New("Error when generating gravatar " + err.Error()) } user.ImageURL = image return nil } // ContextIdentity returns the identity's ID found in given context // Uses tokenManager.Locate to fetch the identity of currently logged in user func ContextIdentity(ctx context.Context) (*uuid.UUID, error) { tm := tokencontext.ReadTokenManagerFromContext(ctx) if tm == nil { log.Error(ctx, map[string]interface{}{ "token": tm, }, "missing token manager") return nil, errs.New("Missing token manager") } // As mentioned in token.go, we can now safely convert tm to a token.Manager manager := tm.(token.Manager) uuid, err := manager.Locate(ctx) if err != nil { // TODO : need a way to define user as Guest log.Error(ctx, map[string]interface{}{ "uuid": uuid, "token_manager": manager, "err": err, }, "identity belongs to a Guest User") return nil, errs.WithStack(err) } return &uuid, nil } // InjectTokenManager is a middleware responsible for setting up tokenManager in the context for every request. func InjectTokenManager(tokenManager token.Manager) goa.Middleware { return func(h goa.Handler) goa.Handler { return func(ctx context.Context, rw http.ResponseWriter, req *http.Request) error { ctxWithTM := tokencontext.ContextWithTokenManager(ctx, tokenManager) return h(ctxWithTM, rw, req) } } }
package models import ( "os/exec" "github.com/sai-lab/mouryou/lib/check" ) type LoadBalancerStruct struct { VirtualIP string `json:"virtual_ip"` Algorithm string `json:"algorithm"` ThresholdOut float64 `json:"threshold_out"` ThresholdIn float64 `json:"threshold_in"` Margin float64 `json:"margin"` ScaleOut int `json:"scale_out"` ScaleIn int `json:"scale_in"` } func (balancer LoadBalancerStruct) Initialize() { exec.Command("ip", "addr", "add", balancer.VirtualIP, "label", "eth0:vip", "dev", "eth0").Run() err := exec.Command("ipvsadm", "-C").Run() check.Error(err) err = exec.Command("ipvsadm", "-A", "-t", balancer.VirtualIP+":http", "-s", balancer.Algorithm).Run() check.Error(err) } func (balancer LoadBalancerStruct) ChangeThresholdOut(w, n int) { var ocRate float64 ocRate = float64(w) / float64(n) switch { case ocRate <= 0.3: threshold = 0.5 case ocRate <= 0.5: threshold = 0.6 case ocRate <= 0.7: threshold = 0.7 case ocRate <= 0.9: threshold = 0.8 case ocRate <= 1.0: threshold = 0.9 } } func (balancer LoadBalancerStruct) ThHigh(w, n int) float64 { return threshold } func (balancer LoadBalancerStruct) ThLow(w int) float64 { return threshold*float64(w) - balancer.Margin } func (balancer LoadBalancerStruct) Add(host string) error { err := exec.Command("ipvsadm", "-a", "-t", balancer.VirtualIP+":http", "-r", host+":http", "-w", "0", "-g").Run() if err != nil { return err } return nil } func (balancer LoadBalancerStruct) Remove(host string) error { err := exec.Command("ipvsadm", "-d", "-t", balancer.VirtualIP+":http", "-r", host+":http").Run() if err != nil { return err } return nil } func (balancer LoadBalancerStruct) Active(host string) error { err := exec.Command("ipvsadm", "-e", "-t", balancer.VirtualIP+":http", "-r", host+":http", "-w", "1", "-g").Run() if err != nil { return err } return nil } func (balancer LoadBalancerStruct) Inactive(host string) error { err := exec.Command("ipvsadm", "-e", "-t", balancer.VirtualIP+":http", "-r", host+":http", "-w", "0", "-g").Run() if err != nil { return err } return nil } Thlowの値を変更 package models import ( "os/exec" "github.com/sai-lab/mouryou/lib/check" ) type LoadBalancerStruct struct { VirtualIP string `json:"virtual_ip"` Algorithm string `json:"algorithm"` ThresholdOut float64 `json:"threshold_out"` ThresholdIn float64 `json:"threshold_in"` Margin float64 `json:"margin"` ScaleOut int `json:"scale_out"` ScaleIn int `json:"scale_in"` } func (balancer LoadBalancerStruct) Initialize() { exec.Command("ip", "addr", "add", balancer.VirtualIP, "label", "eth0:vip", "dev", "eth0").Run() err := exec.Command("ipvsadm", "-C").Run() check.Error(err) err = exec.Command("ipvsadm", "-A", "-t", balancer.VirtualIP+":http", "-s", balancer.Algorithm).Run() check.Error(err) } func (balancer LoadBalancerStruct) ChangeThresholdOut(w, n int) { var ocRate float64 ocRate = float64(w) / float64(n) switch { case ocRate <= 0.3: threshold = 0.5 case ocRate <= 0.5: threshold = 0.6 case ocRate <= 0.7: threshold = 0.7 case ocRate <= 0.9: threshold = 0.8 case ocRate <= 1.0: threshold = 0.9 } } func (balancer LoadBalancerStruct) ThHigh(w, n int) float64 { return threshold } func (balancer LoadBalancerStruct) ThLow(w int) float64 { return (threshold-0.2)*float64(w) - balancer.Margin } func (balancer LoadBalancerStruct) Add(host string) error { err := exec.Command("ipvsadm", "-a", "-t", balancer.VirtualIP+":http", "-r", host+":http", "-w", "0", "-g").Run() if err != nil { return err } return nil } func (balancer LoadBalancerStruct) Remove(host string) error { err := exec.Command("ipvsadm", "-d", "-t", balancer.VirtualIP+":http", "-r", host+":http").Run() if err != nil { return err } return nil } func (balancer LoadBalancerStruct) Active(host string) error { err := exec.Command("ipvsadm", "-e", "-t", balancer.VirtualIP+":http", "-r", host+":http", "-w", "1", "-g").Run() if err != nil { return err } return nil } func (balancer LoadBalancerStruct) Inactive(host string) error { err := exec.Command("ipvsadm", "-e", "-t", balancer.VirtualIP+":http", "-r", host+":http", "-w", "0", "-g").Run() if err != nil { return err } return nil }
// Copyright 2017 The go-vm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package fusion import "github.com/go-vm/vmware" const app = "fusion" // Start start a VM or Team. func Start(vmwarevm string, gui bool) error { cmd := vmware.VMRun(app, "start", vmwarevm) if gui { cmd.Args = append(cmd.Args, "gui") } else { cmd.Args = append(cmd.Args, "nogui") } return cmd.Run() } // Stop stop a VM or Team. func Stop(vmwarevm string, force bool) error { cmd := vmware.VMRun(app, "stop", vmwarevm) if force { cmd.Args = append(cmd.Args, "hard") } else { cmd.Args = append(cmd.Args, "soft") } return cmd.Run() } // Reset reset a VM or Team. func Reset(vmwarevm string, force bool) error { cmd := vmware.VMRun(app, "reset", vmwarevm) if force { cmd.Args = append(cmd.Args, "hard") } else { cmd.Args = append(cmd.Args, "soft") } return cmd.Run() } // Suspend Suspend a VM or Team. func Suspend(vmwarevm string, force bool) error { cmd := vmware.VMRun(app, "suspend", vmwarevm) if force { cmd.Args = append(cmd.Args, "hard") } else { cmd.Args = append(cmd.Args, "soft") } return cmd.Run() } // Pause pause a VM. func Pause(vmwarevm string) error { cmd := vmware.VMRun(app, "pause", vmwarevm) return cmd.Run() } // Unpause unpause a VM. func Unpause(vmwarevm string) error { cmd := vmware.VMRun(app, "unpause", vmwarevm) return cmd.Run() } fusion: add error handling Signed-off-by: Koichi Shiraishi <2e5bdfebde234ed3509bcfc18121c70b6631e207@gmail.com> // Copyright 2017 The go-vm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package fusion import ( "bytes" "fmt" "os/exec" "github.com/go-vm/vmware" ) const app = "fusion" // Start start a VM or Team. func Start(vmwarevm string, gui bool) error { cmd := vmware.VMRun(app, "start", vmwarevm) if gui { cmd.Args = append(cmd.Args, "gui") } else { cmd.Args = append(cmd.Args, "nogui") } var stdout bytes.Buffer cmd.Stdout = &stdout if err := cmd.Run(); err != nil { if e := err.(*exec.ExitError); e != nil { return fmt.Errorf(stdout.String()) } return err } return nil } // Stop stop a VM or Team. func Stop(vmwarevm string, force bool) error { cmd := vmware.VMRun(app, "stop", vmwarevm) if force { cmd.Args = append(cmd.Args, "hard") } else { cmd.Args = append(cmd.Args, "soft") } var stdout bytes.Buffer cmd.Stdout = &stdout if err := cmd.Run(); err != nil { if e := err.(*exec.ExitError); e != nil { return fmt.Errorf(stdout.String()) } return err } return nil } // Reset reset a VM or Team. func Reset(vmwarevm string, force bool) error { cmd := vmware.VMRun(app, "reset", vmwarevm) if force { cmd.Args = append(cmd.Args, "hard") } else { cmd.Args = append(cmd.Args, "soft") } var stdout bytes.Buffer cmd.Stdout = &stdout if err := cmd.Run(); err != nil { if e := err.(*exec.ExitError); e != nil { return fmt.Errorf(stdout.String()) } return err } return nil } // Suspend Suspend a VM or Team. func Suspend(vmwarevm string, force bool) error { cmd := vmware.VMRun(app, "suspend", vmwarevm) if force { cmd.Args = append(cmd.Args, "hard") } else { cmd.Args = append(cmd.Args, "soft") } var stdout bytes.Buffer cmd.Stdout = &stdout if err := cmd.Run(); err != nil { if e := err.(*exec.ExitError); e != nil { return fmt.Errorf(stdout.String()) } return err } return nil } // Pause pause a VM. func Pause(vmwarevm string) error { cmd := vmware.VMRun(app, "pause", vmwarevm) var stdout bytes.Buffer cmd.Stdout = &stdout if err := cmd.Run(); err != nil { if e := err.(*exec.ExitError); e != nil { return fmt.Errorf(stdout.String()) } return err } return nil } // Unpause unpause a VM. func Unpause(vmwarevm string) error { cmd := vmware.VMRun(app, "unpause", vmwarevm) var stdout bytes.Buffer cmd.Stdout = &stdout if err := cmd.Run(); err != nil { if e := err.(*exec.ExitError); e != nil { return fmt.Errorf(stdout.String()) } return err } return nil }
package gologspace import( "github.com/stretchr/testify/assert" "fmt" "testing" ) const( Angstrom = 1E-10 ) func BenchmarkLogSpaceAdd(b *testing.B) { s := LogSpace{} b.ResetTimer() for i := 0; i < b.N; i++ { a := s.Enter(float64(i)) s.Add(a, a) } } func BenchmarkLogSpaceSub(b *testing.B) { s := LogSpace{} b.ResetTimer() for i := 0; i < b.N; i++ { a := s.Enter(float64(i)) s.Sub(a, a) } } func BenchmarkLogSpaceMul(b *testing.B) { s := LogSpace{} b.ResetTimer() for i := 0; i < b.N; i++ { a := s.Enter(float64(i)) s.Mul(a, a) } } func BenchmarkLogSpaceDiv(b *testing.B) { s := LogSpace{} b.ResetTimer() for i := 0; i < b.N; i++ { a := s.Enter(float64(i)) s.Div(a, a) } } func TestAdd(t *testing.T) { s := LogSpace{} L := 100 for x := 0; x < L; x += 10 { xA := float64(2 * x + 1) yA := float64(x - 25) A := xA + yA xB := s.Enter(xA) yB := s.Enter(yA) B := s.Exit(s.Add(xB, yB)) assert.InDelta(t, A, B, Angstrom, fmt.Sprintf("Add(%v, %v)", xB, yB)) } } func TestSub(t *testing.T) { s := LogSpace{} L := 100 for x := 0; x < L; x += 10 { xA := float64(2 * x) yA := float64(x - 25) A := xA - yA xB := s.Enter(xA) yB := s.Enter(yA) B := s.Exit(s.Sub(xB, yB)) assert.InDelta(t, A, B, Angstrom, fmt.Sprintf("Sub(%v,%v)", xA, yA)) } } func TestMul(t *testing.T) { s := LogSpace{} L := 100 for x := 0; x < L; x += 10 { xA := float64(2 * x) yA := float64(x - 25) A := xA * yA xB := s.Enter(xA) yB := s.Enter(yA) B := s.Exit(s.Mul(xB, yB)) assert.InDelta(t, A, B, Angstrom, fmt.Sprintf("Mul(%v,%v)", xA, yA)) } } func TestDiv(t *testing.T) { s := LogSpace{} L := 100 for x := 0; x < L; x += 10 { xA := float64(2 * x) yA := float64(x - 25) A := xA / yA xB := s.Enter(xA) yB := s.Enter(yA) B := s.Exit(s.Div(xB, yB)) assert.InDelta(t, A, B, Angstrom, fmt.Sprintf("Div(%v,%v)", xA, yA)) } } removing nasty +1 in test package gologspace import( "github.com/stretchr/testify/assert" "fmt" "testing" ) const( Angstrom = 1E-10 ) func BenchmarkLogSpaceAdd(b *testing.B) { s := LogSpace{} b.ResetTimer() for i := 0; i < b.N; i++ { a := s.Enter(float64(i)) s.Add(a, a) } } func BenchmarkLogSpaceSub(b *testing.B) { s := LogSpace{} b.ResetTimer() for i := 0; i < b.N; i++ { a := s.Enter(float64(i)) s.Sub(a, a) } } func BenchmarkLogSpaceMul(b *testing.B) { s := LogSpace{} b.ResetTimer() for i := 0; i < b.N; i++ { a := s.Enter(float64(i)) s.Mul(a, a) } } func BenchmarkLogSpaceDiv(b *testing.B) { s := LogSpace{} b.ResetTimer() for i := 0; i < b.N; i++ { a := s.Enter(float64(i)) s.Div(a, a) } } func TestAdd(t *testing.T) { s := LogSpace{} L := 100 for x := 0; x < L; x += 10 { xA := float64(2 * x) yA := float64(x + 25) A := xA + yA xB := s.Enter(xA) yB := s.Enter(yA) B := s.Exit(s.Add(xB, yB)) assert.InDelta(t, A, B, Angstrom, fmt.Sprintf("Add(%v, %v)", xB, yB)) } } func TestSub(t *testing.T) { s := LogSpace{} L := 100 for x := 0; x < L; x += 10 { xA := float64(2 * x) yA := float64(x - 25) A := xA - yA xB := s.Enter(xA) yB := s.Enter(yA) B := s.Exit(s.Sub(xB, yB)) assert.InDelta(t, A, B, Angstrom, fmt.Sprintf("Sub(%v,%v)", xA, yA)) } } func TestMul(t *testing.T) { s := LogSpace{} L := 100 for x := 0; x < L; x += 10 { xA := float64(2 * x) yA := float64(x - 25) A := xA * yA xB := s.Enter(xA) yB := s.Enter(yA) B := s.Exit(s.Mul(xB, yB)) assert.InDelta(t, A, B, Angstrom, fmt.Sprintf("Mul(%v,%v)", xA, yA)) } } func TestDiv(t *testing.T) { s := LogSpace{} L := 100 for x := 0; x < L; x += 10 { xA := float64(2 * x) yA := float64(x - 25) A := xA / yA xB := s.Enter(xA) yB := s.Enter(yA) B := s.Exit(s.Div(xB, yB)) assert.InDelta(t, A, B, Angstrom, fmt.Sprintf("Div(%v,%v)", xA, yA)) } }
package main import ( "fmt" "io/ioutil" "log" "os" "path" "sort" "time" "runtime/pprof" "github.com/phil-mansfield/table" "github.com/phil-mansfield/gotetra/render/io" "github.com/phil-mansfield/gotetra/render/halo" rGeom "github.com/phil-mansfield/gotetra/render/geom" "github.com/phil-mansfield/gotetra/los" "github.com/phil-mansfield/gotetra/los/geom" ) const ( rType = halo.R200m rMult = 3.0 ) func main() { fmt.Println("Running") if len(os.Args) != 3 { log.Fatalf("Usage: $ %s input_dir halo_file", os.Args[0]) } dirName := os.Args[1] haloFileName := os.Args[2] files, err := fileNames(dirName) if err != nil { log.Fatal(err.Error()) } hds := make([]io.SheetHeader, len(files)) for i := range files { if i % 50 == 0 { fmt.Println(i) } err = io.ReadSheetHeaderAt(files[i], &hds[i]) if err != nil { log.Fatal(err.Error()) } } xs, ys, zs, ms, rs, err := readHalos(haloFileName, &hds[0].Cosmo) if err != nil { log.Fatal(err.Error()) } xsBuf, tsBuf, ssBuf, rhosBuf := createBuffers(&hds[0]) h := new(los.HaloProfiles) f, err := os.Create("out.pprof") if err != nil { log.Fatal(err.Error()) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() for _, i := range []int{1000, 1001, 1002, 1003, 1004} { origin := &geom.Vec{float32(xs[i]), float32(ys[i]), float32(zs[i])} h.Init(i, 1, origin, 0, rs[i] * rMult, 200, 1000) hdIntrs, fileIntrs := intersectingSheets(h, hds, files) fmt.Printf( "Halo mass is: %.3g, intersects are: %d\n", ms[i], len(hdIntrs), ) intersectionTest( h, hdIntrs, fileIntrs, xsBuf, tsBuf, ssBuf, rhosBuf, ) } } // createBuffers allocates all the buffers needed for repeated calls to the // various sheet transformation functions. func createBuffers( hd *io.SheetHeader, ) ([]rGeom.Vec, []geom.Tetra, []geom.Sphere, []float64) { xsBuf := make([]rGeom.Vec, hd.GridCount) sw := hd.SegmentWidth tsBuf := make([]geom.Tetra, 6*sw*sw*sw) ssBuf := make([]geom.Sphere, 6*sw*sw*sw) rhosBuf := make([]float64, 6*sw*sw*sw) return xsBuf, tsBuf, ssBuf, rhosBuf } // fileNames returns the names of all the files ina directory. func fileNames(dirName string) ([]string, error) { infos, err := ioutil.ReadDir(dirName) if err != nil { return nil, err } files := make([]string, len(infos)) for i := range infos { files[i] = path.Join(dirName, infos[i].Name()) } return files, nil } // halos allows for arrays of halo properties to be sorted simultaneously. type halos struct { xs, ys, zs, ms, rs []float64 } func (hs *halos) Len() int { return len(hs.rs) } func (hs *halos) Less(i, j int) bool { return hs.rs[i] < hs.rs[j] } func (hs *halos) Swap(i, j int) { hs.rs[i], hs.rs[j] = hs.rs[j], hs.rs[i] hs.ms[i], hs.ms[j] = hs.ms[j], hs.ms[i] hs.xs[i], hs.xs[j] = hs.xs[j], hs.xs[i] hs.ys[i], hs.ys[j] = hs.ys[j], hs.ys[i] hs.zs[i], hs.zs[j] = hs.zs[j], hs.zs[i] } // readHalos reads halo information from the given Rockstar catalog. func readHalos( file string, cosmo *io.CosmologyHeader, ) (xs, ys, zs, ms, rs []float64, err error) { rCol := rType.RockstarColumn() xCol, yCol, zCol := 17, 18, 19 colIdxs := []int{ xCol, yCol, zCol, rCol } cols, err := table.ReadTable(file, colIdxs, nil) if err != nil { return nil, nil, nil, nil, nil, err } xs, ys, zs = cols[0], cols[1], cols[2] if rType.RockstarMass() { ms = cols[3] rs = make([]float64, len(ms)) rType.Radius(cosmo, ms, rs) } else { rs = cols[3] ms = make([]float64, len(rs)) for i := range rs { rs[i] /= 1000 } // kpc -> Mpc rType.Mass(cosmo, rs, ms) } sort.Sort(sort.Reverse(&halos{ xs, ys, zs, ms, rs })) return xs, ys, zs, ms, rs, nil } // intersectingSheets returns all the SheetHeaders and file names that intersect // with a given halo. func intersectingSheets( h *los.HaloProfiles, hds []io.SheetHeader, files []string, ) ([]io.SheetHeader, []string) { hdOuts, fileOuts := []io.SheetHeader{}, []string{} for i := range hds { if h.SheetIntersect(&hds[i]) { hdOuts = append(hdOuts, hds[i]) fileOuts = append(fileOuts, files[i]) } } return hdOuts, fileOuts } // intersectionTest counts how many candidate intersections we have by: // (a). Only considering tetrahedra with points insde the halo. // (b). Conisdering tetrahedra whose bounding sphere intersects the halo. func intersectionTest( h *los.HaloProfiles, hds []io.SheetHeader, files []string, xsBuf []rGeom.Vec, tsBuf []geom.Tetra, ssBuf []geom.Sphere, rhosBuf []float64, ) { hs := []los.HaloProfiles{*h} h = &hs[0] cCopy := h.C for i, file := range files { hd := &hds[i] fmt.Printf(" Reading %s -> ", path.Base(file)) h.C = cCopy t1 := float64(time.Now().UnixNano()) io.ReadSheetPositionsAt(file, xsBuf) los.WrapHalo(hs, hd) los.WrapXs(xsBuf, hd) los.UnpackTetrahedra(xsBuf, hd, tsBuf) los.TetraDensity(hd, tsBuf, rhosBuf) for j := range tsBuf { tsBuf[j].BoundingSphere(&ssBuf[j]) } t2 := float64(time.Now().UnixNano()) los.DensityAll(hs, tsBuf, ssBuf, rhosBuf) t3 := float64(time.Now().UnixNano()) fmt.Printf("%27s Setup: %.3g s Density: %.3g s\n", "", (t2 - t1) / 1e9, (t3 - t2) / 1e9) } fmt.Printf(" Rho: %.3g\n", h.Rho()) } Updated main.go or something. I have no idea what's happening in this file. package main import ( "fmt" "io/ioutil" "log" "os" "path" "sort" "time" "runtime/pprof" "github.com/phil-mansfield/table" "github.com/phil-mansfield/gotetra/render/io" "github.com/phil-mansfield/gotetra/render/halo" rGeom "github.com/phil-mansfield/gotetra/render/geom" "github.com/phil-mansfield/gotetra/los" "github.com/phil-mansfield/gotetra/los/geom" ) const ( rType = halo.R200m rMaxMult = 3.0 rMinMult = 1.0 ) func main() { fmt.Println("Running") if len(os.Args) != 3 { log.Fatalf("Usage: $ %s input_dir halo_file", os.Args[0]) } dirName := os.Args[1] haloFileName := os.Args[2] files, err := fileNames(dirName) if err != nil { log.Fatal(err.Error()) } hds := make([]io.SheetHeader, len(files)) for i := range files { if i % 50 == 0 { fmt.Println(i) } err = io.ReadSheetHeaderAt(files[i], &hds[i]) if err != nil { log.Fatal(err.Error()) } } xs, ys, zs, ms, rs, err := readHalos(haloFileName, &hds[0].Cosmo) if err != nil { log.Fatal(err.Error()) } xsBuf, tsBuf, ssBuf, rhosBuf := createBuffers(&hds[0]) h := new(los.HaloProfiles) f, err := os.Create("out.pprof") if err != nil { log.Fatal(err.Error()) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() for _, i := range []int{1000, 1001, 1002, 1003, 1004} { origin := &geom.Vec{float32(xs[i]), float32(ys[i]), float32(zs[i])} h.Init(i, 10, origin, rs[i] * rMinMult, rs[i] * rMaxMult, 200, 1000) hdIntrs, fileIntrs := intersectingSheets(h, hds, files) fmt.Printf( "Halo mass is: %.3g, intersects are: %d\n", ms[i], len(hdIntrs), ) intersectionTest( h, hdIntrs, fileIntrs, xsBuf, tsBuf, ssBuf, rhosBuf, ) } } // createBuffers allocates all the buffers needed for repeated calls to the // various sheet transformation functions. func createBuffers( hd *io.SheetHeader, ) ([]rGeom.Vec, []geom.Tetra, []geom.Sphere, []float64) { xsBuf := make([]rGeom.Vec, hd.GridCount) sw := hd.SegmentWidth tsBuf := make([]geom.Tetra, 6*sw*sw*sw) ssBuf := make([]geom.Sphere, 6*sw*sw*sw) rhosBuf := make([]float64, 6*sw*sw*sw) return xsBuf, tsBuf, ssBuf, rhosBuf } // fileNames returns the names of all the files ina directory. func fileNames(dirName string) ([]string, error) { infos, err := ioutil.ReadDir(dirName) if err != nil { return nil, err } files := make([]string, len(infos)) for i := range infos { files[i] = path.Join(dirName, infos[i].Name()) } return files, nil } // halos allows for arrays of halo properties to be sorted simultaneously. type halos struct { xs, ys, zs, ms, rs []float64 } func (hs *halos) Len() int { return len(hs.rs) } func (hs *halos) Less(i, j int) bool { return hs.rs[i] < hs.rs[j] } func (hs *halos) Swap(i, j int) { hs.rs[i], hs.rs[j] = hs.rs[j], hs.rs[i] hs.ms[i], hs.ms[j] = hs.ms[j], hs.ms[i] hs.xs[i], hs.xs[j] = hs.xs[j], hs.xs[i] hs.ys[i], hs.ys[j] = hs.ys[j], hs.ys[i] hs.zs[i], hs.zs[j] = hs.zs[j], hs.zs[i] } // readHalos reads halo information from the given Rockstar catalog. func readHalos( file string, cosmo *io.CosmologyHeader, ) (xs, ys, zs, ms, rs []float64, err error) { rCol := rType.RockstarColumn() xCol, yCol, zCol := 17, 18, 19 colIdxs := []int{ xCol, yCol, zCol, rCol } cols, err := table.ReadTable(file, colIdxs, nil) if err != nil { return nil, nil, nil, nil, nil, err } xs, ys, zs = cols[0], cols[1], cols[2] if rType.RockstarMass() { ms = cols[3] rs = make([]float64, len(ms)) rType.Radius(cosmo, ms, rs) } else { rs = cols[3] ms = make([]float64, len(rs)) for i := range rs { rs[i] /= 1000 } // kpc -> Mpc rType.Mass(cosmo, rs, ms) } sort.Sort(sort.Reverse(&halos{ xs, ys, zs, ms, rs })) return xs, ys, zs, ms, rs, nil } // intersectingSheets returns all the SheetHeaders and file names that intersect // with a given halo. func intersectingSheets( h *los.HaloProfiles, hds []io.SheetHeader, files []string, ) ([]io.SheetHeader, []string) { hdOuts, fileOuts := []io.SheetHeader{}, []string{} for i := range hds { if h.SheetIntersect(&hds[i]) { hdOuts = append(hdOuts, hds[i]) fileOuts = append(fileOuts, files[i]) } } return hdOuts, fileOuts } // intersectionTest counts how many candidate intersections we have by: // (a). Only considering tetrahedra with points insde the halo. // (b). Conisdering tetrahedra whose bounding sphere intersects the halo. func intersectionTest( h *los.HaloProfiles, hds []io.SheetHeader, files []string, xsBuf []rGeom.Vec, tsBuf []geom.Tetra, ssBuf []geom.Sphere, rhosBuf []float64, ) { hs := []los.HaloProfiles{*h} h = &hs[0] cCopy := h.C for i, file := range files { hd := &hds[i] fmt.Printf(" Reading %s -> ", path.Base(file)) h.C = cCopy t1 := float64(time.Now().UnixNano()) io.ReadSheetPositionsAt(file, xsBuf) los.WrapHalo(hs, hd) los.WrapXs(xsBuf, hd) los.UnpackTetrahedra(xsBuf, hd, tsBuf) los.TetraDensity(hd, tsBuf, rhosBuf) for j := range tsBuf { tsBuf[j].BoundingSphere(&ssBuf[j]) } t2 := float64(time.Now().UnixNano()) los.DensityAll(hs, tsBuf, ssBuf, rhosBuf) t3 := float64(time.Now().UnixNano()) fmt.Printf("Setup: %.3g s Density: %.3g s\n", (t2 - t1) / 1e9, (t3 - t2) / 1e9) } fmt.Printf(" Rho: %.3g\n", h.Rho()) }
package main import ( "archive/tar" "bytes" "crypto/rand" "database/sql" "encoding/json" "fmt" "io" "io/ioutil" "math/big" "net" "os" "os/exec" "path" "path/filepath" "reflect" "regexp" "sort" "strings" "syscall" "time" "gopkg.in/flosch/pongo2.v3" "gopkg.in/lxc/go-lxc.v2" "gopkg.in/yaml.v2" "github.com/lxc/lxd/shared" log "gopkg.in/inconshreveable/log15.v2" ) // ExtractInterfaceFromConfigName returns "eth0" from "volatile.eth0.hwaddr", // or an error if the key does not match this pattern. func extractInterfaceFromConfigName(k string) (string, error) { re := regexp.MustCompile("volatile\\.([^.]*)\\.hwaddr") m := re.FindStringSubmatch(k) if m != nil && len(m) > 1 { return m[1], nil } return "", fmt.Errorf("%s did not match", k) } func validateRawLxc(rawLxc string) error { for _, line := range strings.Split(rawLxc, "\n") { membs := strings.SplitN(line, "=", 2) if strings.ToLower(strings.Trim(membs[0], " \t")) == "lxc.logfile" { return fmt.Errorf("setting lxc.logfile is not allowed") } } return nil } // GenerateMacAddr generates a mac address from a string template: // e.g. "00:11:22:xx:xx:xx" -> "00:11:22:af:3e:51" func generateMacAddr(template string) (string, error) { ret := bytes.Buffer{} for _, c := range template { if c == 'x' { c, err := rand.Int(rand.Reader, big.NewInt(16)) if err != nil { return "", err } ret.WriteString(fmt.Sprintf("%x", c.Int64())) } else { ret.WriteString(string(c)) } } return ret.String(), nil } func containerPathGet(name string, isSnapshot bool) string { if isSnapshot { return shared.VarPath("snapshots", name) } return shared.VarPath("containers", name) } // containerLXDArgs contains every argument needed to create an LXD Container type containerLXDArgs struct { ID int // Leave it empty when you create one. Ctype containerType Config map[string]string Profiles []string Ephemeral bool BaseImage string Architecture int Devices shared.Devices } type containerLXD struct { c *lxc.Container daemon *Daemon id int name string config map[string]string profiles []string devices shared.Devices architecture int ephemeral bool idmapset *shared.IdmapSet cType containerType baseConfig map[string]string baseDevices shared.Devices Storage storage } type container interface { RenderState() (*shared.ContainerState, error) Reboot() error Freeze() error Shutdown(timeout time.Duration) error Start() error Stop() error Unfreeze() error Delete() error Restore(sourceContainer container) error Rename(newName string) error ConfigReplace(newConfig containerLXDArgs) error StorageStart() error StorageStop() error StorageGet() storage IsPrivileged() bool IsRunning() bool IsEphemeral() bool IsSnapshot() bool IDGet() int NameGet() string ArchitectureGet() int ConfigGet() map[string]string ConfigKeySet(key string, value string) error DevicesGet() shared.Devices ProfilesGet() []string PathGet(newName string) string RootfsPathGet() string TemplatesPathGet() string StateDirGet() string LogFilePathGet() string LogPathGet() string InitPidGet() int StateGet() string IdmapSetGet() *shared.IdmapSet LastIdmapSetGet() (*shared.IdmapSet, error) TemplateApply(trigger string) error ExportToTar(snap string, w io.Writer) error Checkpoint(opts lxc.CheckpointOptions) error StartFromMigration(imagesDir string) error // TODO: Remove every use of this and remove it. LXContainerGet() *lxc.Container DetachMount(m shared.Device) error AttachMount(m shared.Device) error } func containerLXDCreateAsEmpty(d *Daemon, name string, args containerLXDArgs) (container, error) { // Create the container c, err := containerLXDCreateInternal(d, name, args) if err != nil { return nil, err } // Now create the empty storage if err := c.Storage.ContainerCreate(c); err != nil { c.Delete() return nil, err } return c, nil } func containerLXDCreateFromImage(d *Daemon, name string, args containerLXDArgs, hash string) (container, error) { // Create the container c, err := containerLXDCreateInternal(d, name, args) if err != nil { return nil, err } if err := dbImageLastAccessUpdate(d.db, hash); err != nil { return nil, fmt.Errorf("Error updating image last use date: %s", err) } // Now create the storage from an image if err := c.Storage.ContainerCreateFromImage(c, hash); err != nil { c.Delete() return nil, err } return c, nil } func containerLXDCreateAsCopy(d *Daemon, name string, args containerLXDArgs, sourceContainer container) (container, error) { c, err := containerLXDCreateInternal(d, name, args) if err != nil { return nil, err } if err := c.ConfigReplace(args); err != nil { c.Delete() return nil, err } if err := c.Storage.ContainerCopy(c, sourceContainer); err != nil { c.Delete() return nil, err } return c, nil } func containerLXDCreateAsSnapshot(d *Daemon, name string, args containerLXDArgs, sourceContainer container, stateful bool) (container, error) { c, err := containerLXDCreateInternal(d, name, args) if err != nil { return nil, err } c.Storage = sourceContainer.StorageGet() if err := c.Storage.ContainerSnapshotCreate(c, sourceContainer); err != nil { c.Delete() return nil, err } if stateful { stateDir := c.StateDirGet() err = os.MkdirAll(stateDir, 0700) if err != nil { c.Delete() return nil, err } // TODO - shouldn't we freeze for the duration of rootfs snapshot below? if !sourceContainer.IsRunning() { c.Delete() return nil, fmt.Errorf("Container not running\n") } opts := lxc.CheckpointOptions{Directory: stateDir, Stop: true, Verbose: true} err = sourceContainer.Checkpoint(opts) err2 := CollectCRIULogFile(sourceContainer, stateDir, "snapshot", "dump") if err2 != nil { shared.Log.Warn("failed to collect criu log file", log.Ctx{"error": err2}) } if err != nil { c.Delete() return nil, err } } return c, nil } func validContainerName(name string) error { if strings.Contains(name, shared.SnapshotDelimiter) { return fmt.Errorf( "The character '%s' is reserved for snapshots.", shared.SnapshotDelimiter) } return nil } func containerLXDCreateInternal( d *Daemon, name string, args containerLXDArgs) (*containerLXD, error) { shared.Log.Info( "Container create", log.Ctx{ "container": name, "isSnapshot": args.Ctype == cTypeSnapshot}) if args.Ctype != cTypeSnapshot { if err := validContainerName(name); err != nil { return nil, err } } path := containerPathGet(name, args.Ctype == cTypeSnapshot) if shared.PathExists(path) { shared.Log.Error( "The container already exists on disk", log.Ctx{ "container": name, "path": path}) return nil, fmt.Errorf( "The container already exists on disk, container: '%s', path: '%s'", name, path) } if args.Profiles == nil { args.Profiles = []string{"default"} } if args.Config == nil { args.Config = map[string]string{} } if args.BaseImage != "" { args.Config["volatile.base_image"] = args.BaseImage } if args.Devices == nil { args.Devices = shared.Devices{} } profiles, err := dbProfilesGet(d.db) if err != nil { return nil, err } for _, profile := range args.Profiles { if !shared.StringInSlice(profile, profiles) { return nil, fmt.Errorf("Requested profile '%s' doesn't exist", profile) } } id, err := dbContainerCreate(d.db, name, args) if err != nil { return nil, err } shared.Log.Debug( "Container created in the DB", log.Ctx{"container": name, "id": id}) baseConfig := map[string]string{} if err := shared.DeepCopy(&args.Config, &baseConfig); err != nil { return nil, err } baseDevices := shared.Devices{} if err := shared.DeepCopy(&args.Devices, &baseDevices); err != nil { return nil, err } c := &containerLXD{ daemon: d, id: id, name: name, ephemeral: args.Ephemeral, architecture: args.Architecture, config: args.Config, profiles: args.Profiles, devices: args.Devices, cType: args.Ctype, baseConfig: baseConfig, baseDevices: baseDevices} // No need to detect storage here, its a new container. c.Storage = d.Storage if err := c.init(); err != nil { c.Delete() // Delete the container from the DB. return nil, err } idmap := c.IdmapSetGet() var jsonIdmap string if idmap != nil { idmapBytes, err := json.Marshal(idmap.Idmap) if err != nil { c.Delete() return nil, err } jsonIdmap = string(idmapBytes) } else { jsonIdmap = "[]" } err = c.ConfigKeySet("volatile.last_state.idmap", jsonIdmap) if err != nil { c.Delete() return nil, err } return c, nil } func containerLXDLoad(d *Daemon, name string) (container, error) { shared.Log.Debug("Container load", log.Ctx{"container": name}) args, err := dbContainerGet(d.db, name) if err != nil { return nil, err } baseConfig := map[string]string{} if err := shared.DeepCopy(&args.Config, &baseConfig); err != nil { return nil, err } baseDevices := shared.Devices{} if err := shared.DeepCopy(&args.Devices, &baseDevices); err != nil { return nil, err } c := &containerLXD{ daemon: d, id: args.ID, name: name, ephemeral: args.Ephemeral, architecture: args.Architecture, config: args.Config, profiles: args.Profiles, devices: args.Devices, cType: args.Ctype, baseConfig: baseConfig, baseDevices: baseDevices} s, err := storageForFilename(d, c.PathGet("")) if err != nil { shared.Log.Warn("Couldn't detect storage.", log.Ctx{"container": c.NameGet()}) c.Storage = d.Storage } else { c.Storage = s } if err := c.init(); err != nil { return nil, err } return c, nil } // init prepares the LXContainer for this LXD Container // TODO: This gets called on each load of the container, // we might be able to split this is up into c.Start(). func (c *containerLXD) init() error { templateConfBase := "ubuntu" templateConfDir := os.Getenv("LXD_LXC_TEMPLATE_CONFIG") if templateConfDir == "" { templateConfDir = "/usr/share/lxc/config" } cc, err := lxc.NewContainer(c.NameGet(), c.daemon.lxcpath) if err != nil { return err } c.c = cc logfile := c.LogFilePathGet() if err := os.MkdirAll(filepath.Dir(logfile), 0700); err != nil { return err } if err = c.c.SetLogFile(logfile); err != nil { return err } personality, err := shared.ArchitecturePersonality(c.architecture) if err == nil { if err := c.c.SetConfigItem("lxc.arch", personality); err != nil { return err } } err = c.c.SetConfigItem("lxc.include", fmt.Sprintf("%s/%s.common.conf", templateConfDir, templateConfBase)) if err != nil { return err } if !c.IsPrivileged() { err = c.c.SetConfigItem("lxc.include", fmt.Sprintf("%s/%s.userns.conf", templateConfDir, templateConfBase)) if err != nil { return err } } if c.IsNesting() { shared.Debugf("Setting up %s for nesting", c.name) orig := c.c.ConfigItem("lxc.mount.auto") auto := "" if len(orig) == 1 { auto = orig[0] } if !strings.Contains(auto, "cgroup") { auto = fmt.Sprintf("%s %s", auto, "cgroup:mixed") err = c.c.SetConfigItem("lxc.mount.auto", auto) if err != nil { return err } } /* * mount extra /proc and /sys to work around kernel * restrictions on remounting them when covered */ err = c.c.SetConfigItem("lxc.mount.entry", "proc dev/.lxc/proc proc create=dir,optional") if err != nil { return err } err = c.c.SetConfigItem("lxc.mount.entry", "sys dev/.lxc/sys sysfs create=dir,optional") if err != nil { return err } } if err := c.c.SetConfigItem("lxc.rootfs", c.RootfsPathGet()); err != nil { return err } if err := c.c.SetConfigItem("lxc.loglevel", "0"); err != nil { return err } if err := c.c.SetConfigItem("lxc.utsname", c.NameGet()); err != nil { return err } if err := c.c.SetConfigItem("lxc.tty", "0"); err != nil { return err } if err := setupDevLxdMount(c.c); err != nil { return err } /* * Until stacked apparmor profiles are possible, we have to run nested * containers unconfined */ if aaEnabled { if aaConfined() { curProfile := aaProfile() shared.Debugf("Running %s in current profile %s (nested container)", c.name, curProfile) curProfile = strings.TrimSuffix(curProfile, " (enforce)") if err := c.c.SetConfigItem("lxc.aa_profile", curProfile); err != nil { return err } } else if err := c.c.SetConfigItem("lxc.aa_profile", AAProfileName(c)); err != nil { return err } } if err := c.c.SetConfigItem("lxc.seccomp", SeccompProfilePath(c)); err != nil { return err } for _, p := range c.profiles { if err := c.applyProfile(p); err != nil { return err } } // base per-container config should override profile config, so we apply it second if err := c.applyConfig(c.baseConfig); err != nil { return err } if err := c.setupMacAddresses(); err != nil { return err } // Allow overwrites of devices for k, v := range c.baseDevices { c.devices[k] = v } /* now add the lxc.* entries for the configured devices */ if err := c.applyDevices(); err != nil { return err } if !c.IsPrivileged() { if c.daemon.IdmapSet == nil { return fmt.Errorf("user has no subuids") } c.idmapset = c.daemon.IdmapSet // TODO - per-tenant idmaps } if err := c.mountShared(); err != nil { return err } if err := c.applyIdmapSet(); err != nil { return err } if err := c.applyPostDeviceConfig(); err != nil { return err } return nil } func (c *containerLXD) RenderState() (*shared.ContainerState, error) { statusCode := shared.FromLXCState(int(c.c.State())) status := shared.ContainerStatus{ Status: statusCode.String(), StatusCode: statusCode, } if c.IsRunning() { pid := c.InitPidGet() status.Init = pid status.Ips = c.iPsGet() } return &shared.ContainerState{ Name: c.name, Profiles: c.profiles, Config: c.baseConfig, ExpandedConfig: c.config, Userdata: []byte{}, Status: status, Devices: c.baseDevices, ExpandedDevices: c.devices, Ephemeral: c.ephemeral, }, nil } func (c *containerLXD) Start() error { if c.IsRunning() { return fmt.Errorf("the container is already running") } // Start the storage for this container if err := c.StorageStart(); err != nil { return err } /* (Re)Load the AA profile; we set it in the container's config above * in init() */ if err := AALoadProfile(c); err != nil { c.StorageStop() return err } if err := SeccompCreateProfile(c); err != nil { c.StorageStop() return err } f, err := ioutil.TempFile("", "lxd_lxc_startconfig_") if err != nil { c.StorageStop() return err } configPath := f.Name() if err = f.Chmod(0600); err != nil { f.Close() os.Remove(configPath) c.StorageStop() return err } f.Close() err = c.c.SaveConfigFile(configPath) if err != nil { c.StorageStop() return err } err = c.TemplateApply("start") if err != nil { c.StorageStop() return err } /* Deal with idmap changes */ idmap := c.IdmapSetGet() lastIdmap, err := c.LastIdmapSetGet() if err != nil { return err } var jsonIdmap string if idmap != nil { idmapBytes, err := json.Marshal(idmap.Idmap) if err != nil { c.StorageStop() return err } jsonIdmap = string(idmapBytes) } else { jsonIdmap = "[]" } if !reflect.DeepEqual(idmap, lastIdmap) { shared.Debugf("Container idmap changed, remapping") if lastIdmap != nil { if err := lastIdmap.UnshiftRootfs(c.RootfsPathGet()); err != nil { c.StorageStop() return err } } if idmap != nil { if err := idmap.ShiftRootfs(c.RootfsPathGet()); err != nil { c.StorageStop() return err } } } err = c.ConfigKeySet("volatile.last_state.idmap", jsonIdmap) if err != nil { c.StorageStop() return err } /* Actually start the container */ err = exec.Command( os.Args[0], "forkstart", c.name, c.daemon.lxcpath, configPath).Run() if err != nil { c.StorageStop() err = fmt.Errorf( "Error calling 'lxd forkstart %s %s %s': err='%v'", c.name, c.daemon.lxcpath, path.Join(c.LogPathGet(), "lxc.conf"), err) } if err == nil && c.ephemeral == true { containerWatchEphemeral(c.daemon, c) } return err } func (c *containerLXD) Reboot() error { return c.c.Reboot() } func (c *containerLXD) Freeze() error { return c.c.Freeze() } func (c *containerLXD) IsNesting() bool { switch strings.ToLower(c.config["security.nesting"]) { case "1": return true case "true": return true } return false } func (c *containerLXD) IsPrivileged() bool { switch strings.ToLower(c.config["security.privileged"]) { case "1": return true case "true": return true } return false } func (c *containerLXD) IsRunning() bool { return c.c.Running() } func (c *containerLXD) Shutdown(timeout time.Duration) error { if err := c.c.Shutdown(timeout); err != nil { // Still try to unload the storage. c.StorageStop() return err } // Stop the storage for this container if err := c.StorageStop(); err != nil { return err } if err := AAUnloadProfile(c); err != nil { return err } return nil } func (c *containerLXD) Stop() error { if err := c.c.Stop(); err != nil { // Still try to unload the storage. c.StorageStop() return err } // Stop the storage for this container if err := c.StorageStop(); err != nil { return err } if err := AAUnloadProfile(c); err != nil { return err } return nil } func (c *containerLXD) Unfreeze() error { return c.c.Unfreeze() } func (c *containerLXD) StorageFromImage(hash string) error { return c.Storage.ContainerCreateFromImage(c, hash) } func (c *containerLXD) StorageFromNone() error { return c.Storage.ContainerCreate(c) } func (c *containerLXD) StorageStart() error { return c.Storage.ContainerStart(c) } func (c *containerLXD) StorageStop() error { return c.Storage.ContainerStop(c) } func (c *containerLXD) StorageGet() storage { return c.Storage } func (c *containerLXD) Restore(sourceContainer container) error { /* * restore steps: * 1. stop container if already running * 2. copy snapshot rootfs to container * 3. overwrite existing config with snapshot config */ // Stop the container // TODO: stateful restore ? wasRunning := false if c.IsRunning() { wasRunning = true if err := c.Stop(); err != nil { shared.Log.Error( "RESTORE => could not stop container", log.Ctx{ "container": c.NameGet(), "err": err}) return err } shared.Log.Debug( "RESTORE => Stopped container", log.Ctx{"container": c.NameGet()}) } // Restore the FS. // TODO: I switched the FS and config restore, think thats the correct way // (pcdummy) err := c.Storage.ContainerRestore(c, sourceContainer) if err != nil { shared.Log.Error("RESTORE => Restoring the filesystem failed", log.Ctx{ "source": sourceContainer.NameGet(), "destination": c.NameGet()}) return err } args := containerLXDArgs{ Ctype: cTypeRegular, Config: sourceContainer.ConfigGet(), Profiles: sourceContainer.ProfilesGet(), Ephemeral: sourceContainer.IsEphemeral(), Architecture: sourceContainer.ArchitectureGet(), Devices: sourceContainer.DevicesGet(), } err = c.ConfigReplace(args) if err != nil { shared.Log.Error("RESTORE => Restore of the configuration failed", log.Ctx{ "source": sourceContainer.NameGet(), "destination": c.NameGet()}) return err } if wasRunning { c.Start() } return nil } func (c *containerLXD) Delete() error { shared.Log.Debug("containerLXD.Delete", log.Ctx{"c.name": c.NameGet(), "type": c.cType}) switch c.cType { case cTypeRegular: if err := containerDeleteSnapshots(c.daemon, c.NameGet()); err != nil { return err } if err := c.Storage.ContainerDelete(c); err != nil { return err } case cTypeSnapshot: if err := c.Storage.ContainerSnapshotDelete(c); err != nil { return err } default: return fmt.Errorf("Unknown cType: %d", c.cType) } if err := dbContainerRemove(c.daemon.db, c.NameGet()); err != nil { return err } AADeleteProfile(c) SeccompDeleteProfile(c) return nil } func (c *containerLXD) Rename(newName string) error { oldName := c.NameGet() if c.IsRunning() { return fmt.Errorf("renaming of running container not allowed") } if c.IsSnapshot() { if err := c.Storage.ContainerSnapshotRename(c, newName); err != nil { return err } } else { if err := c.Storage.ContainerRename(c, newName); err != nil { return err } } if err := dbContainerRename(c.daemon.db, oldName, newName); err != nil { return err } results, err := dbContainerGetSnapshots(c.daemon.db, oldName) if err != nil { return err } for _, sname := range results { sc, err := containerLXDLoad(c.daemon, sname) if err != nil { shared.Log.Error( "containerDeleteSnapshots: Failed to load the snapshotcontainer", log.Ctx{"container": oldName, "snapshot": sname}) continue } baseSnapName := filepath.Base(sname) newSnapshotName := newName + shared.SnapshotDelimiter + baseSnapName if err := sc.Rename(newSnapshotName); err != nil { shared.Log.Error( "containerDeleteSnapshots: Failed to rename a snapshotcontainer", log.Ctx{"container": oldName, "snapshot": sname, "err": err}) } } c.name = newName // Recreate the LX Container c.c = nil c.init() return nil } func (c *containerLXD) IsEphemeral() bool { return c.ephemeral } func (c *containerLXD) IsSnapshot() bool { return c.cType == cTypeSnapshot } func (c *containerLXD) IDGet() int { return c.id } func (c *containerLXD) NameGet() string { return c.name } func (c *containerLXD) ArchitectureGet() int { return c.architecture } func (c *containerLXD) PathGet(newName string) string { if newName != "" { return containerPathGet(newName, c.IsSnapshot()) } return containerPathGet(c.NameGet(), c.IsSnapshot()) } func (c *containerLXD) RootfsPathGet() string { return path.Join(c.PathGet(""), "rootfs") } func (c *containerLXD) TemplatesPathGet() string { return path.Join(c.PathGet(""), "templates") } func (c *containerLXD) StateDirGet() string { return path.Join(c.PathGet(""), "state") } func (c *containerLXD) LogPathGet() string { return shared.LogPath(c.NameGet()) } func (c *containerLXD) LogFilePathGet() string { return filepath.Join(c.LogPathGet(), "lxc.log") } func (c *containerLXD) InitPidGet() int { return c.c.InitPid() } func (c *containerLXD) StateGet() string { return c.c.State().String() } func (c *containerLXD) IdmapSetGet() *shared.IdmapSet { return c.idmapset } func (c *containerLXD) LastIdmapSetGet() (*shared.IdmapSet, error) { config := c.ConfigGet() lastJsonIdmap := config["volatile.last_state.idmap"] if lastJsonIdmap == "" { return c.IdmapSetGet(), nil } lastIdmap := new(shared.IdmapSet) err := json.Unmarshal([]byte(lastJsonIdmap), &lastIdmap.Idmap) if err != nil { return nil, err } if len(lastIdmap.Idmap) == 0 { return nil, nil } return lastIdmap, nil } func (c *containerLXD) ConfigKeySet(key string, value string) error { c.baseConfig[key] = value args := containerLXDArgs{ Ctype: c.cType, Config: c.baseConfig, Profiles: c.profiles, Ephemeral: c.ephemeral, Architecture: c.architecture, Devices: c.baseDevices, } return c.ConfigReplace(args) } func (c *containerLXD) LXContainerGet() *lxc.Container { return c.c } // ConfigReplace replaces the config of container and tries to live apply // the new configuration. func (c *containerLXD) ConfigReplace(newContainerArgs containerLXDArgs) error { /* check to see that the config actually applies to the container * successfully before saving it. in particular, raw.lxc and * raw.apparmor need to be parsed once to make sure they make sense. */ preDevList := c.devices /* Validate devices */ if err := validateConfig(c, newContainerArgs.Devices); err != nil { return err } if err := c.applyConfig(newContainerArgs.Config); err != nil { return err } tx, err := dbBegin(c.daemon.db) if err != nil { return err } /* Update config or profiles */ if err = dbContainerConfigClear(tx, c.id); err != nil { shared.Log.Debug( "Error clearing configuration for container", log.Ctx{"name": c.NameGet()}) tx.Rollback() return err } if err = dbContainerConfigInsert(tx, c.id, newContainerArgs.Config); err != nil { shared.Debugf("Error inserting configuration for container %s", c.NameGet()) tx.Rollback() return err } /* handle profiles */ if emptyProfile(newContainerArgs.Profiles) { _, err := tx.Exec("DELETE from containers_profiles where container_id=?", c.id) if err != nil { tx.Rollback() return err } } else { if err := dbContainerProfilesInsert(tx, c.id, newContainerArgs.Profiles); err != nil { tx.Rollback() return err } } err = dbDevicesAdd(tx, "container", int64(c.id), newContainerArgs.Devices) if err != nil { tx.Rollback() return err } if err := c.applyPostDeviceConfig(); err != nil { return err } c.baseConfig = newContainerArgs.Config c.baseDevices = newContainerArgs.Devices /* Let's try to load the apparmor profile again, in case the * raw.apparmor config was changed (or deleted). Make sure we do this * before commit, in case it fails because the user screwed something * up so we can roll back and not hose their container. * * For containers that aren't running, we just want to parse the new * profile; this is because this code is called during the start * process after the profile is loaded but before the container starts, * which will cause a container start to fail. If the container is * running, we /do/ want to reload the profile, because we want the * changes to take effect immediately. */ if !c.IsRunning() { AAParseProfile(c) return txCommit(tx) } if err := AALoadProfile(c); err != nil { tx.Rollback() return err } if err := txCommit(tx); err != nil { return err } // add devices from new profile list to the desired goal set for _, p := range c.profiles { profileDevs, err := dbDevicesGet(c.daemon.db, p, true) if err != nil { return fmt.Errorf("Error reading devices from profile '%s': %v", p, err) } newContainerArgs.Devices.ExtendFromProfile(preDevList, profileDevs) } tx, err = dbBegin(c.daemon.db) if err != nil { return err } if err := devicesApplyDeltaLive(tx, c, preDevList, newContainerArgs.Devices); err != nil { return err } if err := txCommit(tx); err != nil { return err } return nil } func (c *containerLXD) ConfigGet() map[string]string { return c.config } func (c *containerLXD) DevicesGet() shared.Devices { return c.devices } func (c *containerLXD) ProfilesGet() []string { return c.profiles } /* * Export the container to a unshifted tarfile containing: * dir/ * metadata.yaml * rootfs/ */ func (c *containerLXD) ExportToTar(snap string, w io.Writer) error { if snap == "" && c.IsRunning() { return fmt.Errorf("Cannot export a running container as image") } idmap, err := c.LastIdmapSetGet() if err != nil { return err } if idmap != nil { if err := idmap.UnshiftRootfs(c.RootfsPathGet()); err != nil { return err } defer idmap.ShiftRootfs(c.RootfsPathGet()) } tw := tar.NewWriter(w) // keep track of the first path we saw for each path with nlink>1 linkmap := map[uint64]string{} cDir := c.PathGet("") // Path inside the tar image is the pathname starting after cDir offset := len(cDir) + 1 writeToTar := func(path string, fi os.FileInfo, err error) error { if err := c.tarStoreFile(linkmap, offset, tw, path, fi); err != nil { shared.Debugf("Error tarring up %s: %s", path, err) return err } return nil } fnam := filepath.Join(cDir, "metadata.yaml") if shared.PathExists(fnam) { fi, err := os.Lstat(fnam) if err != nil { shared.Debugf("Error statting %s during exportToTar", fnam) tw.Close() return err } if err := c.tarStoreFile(linkmap, offset, tw, fnam, fi); err != nil { shared.Debugf("Error writing to tarfile: %s", err) tw.Close() return err } } fnam = filepath.Join(cDir, "rootfs") filepath.Walk(fnam, writeToTar) fnam = filepath.Join(cDir, "templates") if shared.PathExists(fnam) { filepath.Walk(fnam, writeToTar) } return tw.Close() } func (c *containerLXD) TemplateApply(trigger string) error { fname := path.Join(c.PathGet(""), "metadata.yaml") if !shared.PathExists(fname) { return nil } content, err := ioutil.ReadFile(fname) if err != nil { return err } metadata := new(imageMetadata) err = yaml.Unmarshal(content, &metadata) if err != nil { return fmt.Errorf("Could not parse %s: %v", fname, err) } for filepath, template := range metadata.Templates { var w *os.File found := false for _, tplTrigger := range template.When { if tplTrigger == trigger { found = true break } } if !found { continue } fullpath := shared.VarPath("containers", c.name, "rootfs", strings.TrimLeft(filepath, "/")) if shared.PathExists(fullpath) { w, err = os.Create(fullpath) if err != nil { return err } } else { uid := 0 gid := 0 if !c.IsPrivileged() { uid, gid = c.idmapset.ShiftIntoNs(0, 0) } shared.MkdirAllOwner(path.Dir(fullpath), 0755, uid, gid) w, err = os.Create(fullpath) if err != nil { return err } if !c.IsPrivileged() { w.Chown(uid, gid) } w.Chmod(0644) } tplString, err := ioutil.ReadFile(shared.VarPath("containers", c.name, "templates", template.Template)) if err != nil { return err } tpl, err := pongo2.FromString("{% autoescape off %}" + string(tplString) + "{% endautoescape %}") if err != nil { return err } containerMeta := make(map[string]string) containerMeta["name"] = c.name containerMeta["architecture"], _ = shared.ArchitectureName(c.architecture) if c.ephemeral { containerMeta["ephemeral"] = "true" } else { containerMeta["ephemeral"] = "false" } if c.IsPrivileged() { containerMeta["privileged"] = "true" } else { containerMeta["privileged"] = "false" } configGet := func(confKey, confDefault *pongo2.Value) *pongo2.Value { val, ok := c.config[confKey.String()] if !ok { return confDefault } return pongo2.AsValue(strings.TrimRight(val, "\r\n")) } tpl.ExecuteWriter(pongo2.Context{"trigger": trigger, "path": filepath, "container": containerMeta, "config": c.config, "devices": c.devices, "properties": template.Properties, "config_get": configGet}, w) } return nil } func (c *containerLXD) DetachMount(m shared.Device) error { // TODO - in case of reboot, we should remove the lxc.mount.entry. Trick // is, we can't d.c.ClearConfigItem bc that will clear all the keys. So // we should get the full list, clear, then reinsert all but the one we're // removing shared.Debugf("Mounts detach not yet implemented") pid := c.c.InitPid() if pid == -1 { // container not running return nil } pidstr := fmt.Sprintf("%d", pid) return exec.Command(os.Args[0], "forkumount", pidstr, m["path"]).Run() } func (c *containerLXD) AttachMount(m shared.Device) error { dest := m["path"] source := m["source"] opts := "" fstype := "none" flags := 0 sb, err := os.Stat(source) if err != nil { return err } if sb.IsDir() { flags |= syscall.MS_BIND opts = "bind,create=dir" } else { if !shared.IsBlockdev(sb.Mode()) { // Not sure if we want to try dealing with loopdevs, but // since we'd need to deal with partitions i think not. // We also might want to support file bind mounting, but // this doesn't do that. return fmt.Errorf("non-block device file not supported\n") } fstype, err = shared.BlockFsDetect(source) if err != nil { return fmt.Errorf("Unable to detect fstype for %s: %s\n", source, err) } } // add a lxc.mount.entry = souce destination, in case of reboot if m["readonly"] == "1" || m["readonly"] == "true" { if opts == "" { opts = "ro" } else { opts = opts + ",ro" } } optional := false if m["optional"] == "1" || m["optional"] == "true" { optional = true opts = opts + ",optional" } entry := fmt.Sprintf("%s %s %s %s 0 0", source, dest, fstype, opts) if err := c.c.SetConfigItem("lxc.mount.entry", entry); err != nil { return err } pid := c.c.InitPid() if pid == -1 { // container not running - we're done return nil } // now live-mount tmpMount, err := ioutil.TempDir(shared.VarPath("shmounts", c.name), "lxdmount_") if err != nil { return err } err = syscall.Mount(m["source"], tmpMount, fstype, uintptr(flags), "") if err != nil { return err } mntsrc := filepath.Join("/dev/.lxd-mounts", filepath.Base(tmpMount)) // finally we need to move-mount this in the container pidstr := fmt.Sprintf("%d", pid) err = exec.Command(os.Args[0], "forkmount", pidstr, mntsrc, m["path"]).Run() syscall.Unmount(tmpMount, syscall.MNT_DETACH) // in case forkmount failed os.Remove(tmpMount) if err != nil && !optional { return err } return nil } func (c *containerLXD) applyConfig(config map[string]string) error { var err error for k, v := range config { switch k { case "limits.cpus": // TODO - Come up with a way to choose cpus for multiple // containers var vint int count, err := fmt.Sscanf(v, "%d", &vint) if err != nil { return err } if count != 1 || vint < 0 || vint > 65000 { return fmt.Errorf("Bad cpu limit: %s\n", v) } cpuset := fmt.Sprintf("0-%d", vint-1) err = c.c.SetConfigItem("lxc.cgroup.cpuset.cpus", cpuset) case "limits.memory": err = c.c.SetConfigItem("lxc.cgroup.memory.limit_in_bytes", v) default: if strings.HasPrefix(k, "environment.") { c.c.SetConfigItem("lxc.environment", fmt.Sprintf("%s=%s", strings.TrimPrefix(k, "environment."), v)) } /* Things like security.privileged need to be propagated */ c.config[k] = v } if err != nil { shared.Debugf("Error setting %s: %q", k, err) return err } } return nil } func (c *containerLXD) applyPostDeviceConfig() error { // applies config that must be delayed until after devices are // instantiated, see bug #588 and fix #635 if lxcConfig, ok := c.config["raw.lxc"]; ok { if err := validateRawLxc(lxcConfig); err != nil { return err } f, err := ioutil.TempFile("", "lxd_config_") if err != nil { return err } err = shared.WriteAll(f, []byte(lxcConfig)) f.Close() defer os.Remove(f.Name()) if err != nil { return err } if err := c.c.LoadConfigFile(f.Name()); err != nil { return fmt.Errorf("problem applying raw.lxc, perhaps there is a syntax error?") } } return nil } func (c *containerLXD) applyProfile(p string) error { q := `SELECT key, value FROM profiles_config JOIN profiles ON profiles.id=profiles_config.profile_id WHERE profiles.name=?` var k, v string inargs := []interface{}{p} outfmt := []interface{}{k, v} result, err := dbQueryScan(c.daemon.db, q, inargs, outfmt) if err != nil { return err } config := map[string]string{} for _, r := range result { k = r[0].(string) v = r[1].(string) config[k] = v } newdevs, err := dbDevicesGet(c.daemon.db, p, true) if err != nil { return err } for k, v := range newdevs { c.devices[k] = v } return c.applyConfig(config) } func (c *containerLXD) updateContainerHWAddr(k, v string) { for name, d := range c.devices { if d["type"] != "nic" { continue } for key := range c.config { device, err := extractInterfaceFromConfigName(key) if err == nil && device == name { d["hwaddr"] = v c.config[key] = v return } } } } func (c *containerLXD) setupMacAddresses() error { newConfigEntries := map[string]string{} for name, d := range c.devices { if d["type"] != "nic" { continue } found := false for key, val := range c.config { device, err := extractInterfaceFromConfigName(key) if err == nil && device == name { found = true d["hwaddr"] = val } } if !found { var hwaddr string var err error if d["hwaddr"] != "" { hwaddr, err = generateMacAddr(d["hwaddr"]) if err != nil { return err } } else { hwaddr, err = generateMacAddr("00:16:3e:xx:xx:xx") if err != nil { return err } } if hwaddr != d["hwaddr"] { d["hwaddr"] = hwaddr key := fmt.Sprintf("volatile.%s.hwaddr", name) c.config[key] = hwaddr c.baseConfig[key] = hwaddr newConfigEntries[key] = hwaddr } } } if len(newConfigEntries) > 0 { tx, err := dbBegin(c.daemon.db) if err != nil { return err } /* * My logic may be flawed here, but it seems to me that one of * the following must be true: * 1. The current database entry equals what we had stored. * Our update akes precedence * 2. The current database entry is different from what we had * stored. Someone updated it since we last grabbed the * container configuration. So either * a. it contains 'x' and is a template. We have generated * a real mac, so our update takes precedence * b. it contains no 'x' and is an hwaddr, not template. We * defer to the racer's update since it may be actually * starting the container. */ str := "INSERT INTO containers_config (container_id, key, value) values (?, ?, ?)" stmt, err := tx.Prepare(str) if err != nil { tx.Rollback() return err } defer stmt.Close() ustr := "UPDATE containers_config SET value=? WHERE container_id=? AND key=?" ustmt, err := tx.Prepare(ustr) if err != nil { tx.Rollback() return err } defer ustmt.Close() qstr := "SELECT value FROM containers_config WHERE container_id=? AND key=?" qstmt, err := tx.Prepare(qstr) if err != nil { tx.Rollback() return err } defer qstmt.Close() for k, v := range newConfigEntries { var racer string err := qstmt.QueryRow(c.id, k).Scan(&racer) if err == sql.ErrNoRows { _, err = stmt.Exec(c.id, k, v) if err != nil { shared.Debugf("Error adding mac address to container") tx.Rollback() return err } } else if err != nil { tx.Rollback() return err } else if strings.Contains(racer, "x") { _, err = ustmt.Exec(v, c.id, k) if err != nil { shared.Debugf("Error updating mac address to container") tx.Rollback() return err } } else { // we accept the racing task's update c.updateContainerHWAddr(k, v) } } err = txCommit(tx) if err != nil { fmt.Printf("setupMacAddresses: (TxCommit) error %s\n", err) } return err } return nil } func (c *containerLXD) applyIdmapSet() error { if c.idmapset == nil { return nil } lines := c.idmapset.ToLxcString() for _, line := range lines { err := c.c.SetConfigItem("lxc.id_map", line+"\n") if err != nil { return err } } return nil } func (c *containerLXD) applyDevices() error { var keys []string for k := range c.devices { keys = append(keys, k) } sort.Strings(keys) for _, name := range keys { d := c.devices[name] if name == "type" { continue } configs, err := deviceToLxc(d) if err != nil { return fmt.Errorf("Failed configuring device %s: %s\n", name, err) } for _, line := range configs { err := c.c.SetConfigItem(line[0], line[1]) if err != nil { return fmt.Errorf("Failed configuring device %s: %s\n", name, err) } } } return nil } func (c *containerLXD) iPsGet() []shared.Ip { ips := []shared.Ip{} names, err := c.c.Interfaces() if err != nil { return ips } for _, n := range names { addresses, err := c.c.IPAddress(n) if err != nil { continue } veth := "" for i := 0; i < len(c.c.ConfigItem("lxc.network")); i++ { nicName := c.c.RunningConfigItem(fmt.Sprintf("lxc.network.%d.name", i))[0] if nicName != n { continue } interfaceType := c.c.RunningConfigItem(fmt.Sprintf("lxc.network.%d.type", i)) if interfaceType[0] != "veth" { continue } veth = c.c.RunningConfigItem(fmt.Sprintf("lxc.network.%d.veth.pair", i))[0] break } for _, a := range addresses { ip := shared.Ip{Interface: n, Address: a, HostVeth: veth} if net.ParseIP(a).To4() == nil { ip.Protocol = "IPV6" } else { ip.Protocol = "IPV4" } ips = append(ips, ip) } } return ips } func (c *containerLXD) tarStoreFile(linkmap map[uint64]string, offset int, tw *tar.Writer, path string, fi os.FileInfo) error { var err error var major, minor, nlink int var ino uint64 link := "" if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err = os.Readlink(path) if err != nil { return err } } hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return err } hdr.Name = path[offset:] if fi.IsDir() || fi.Mode()&os.ModeSymlink == os.ModeSymlink { hdr.Size = 0 } else { hdr.Size = fi.Size() } hdr.Uid, hdr.Gid, major, minor, ino, nlink, err = shared.GetFileStat(path) if err != nil { return fmt.Errorf("error getting file info: %s\n", err) } // unshift the id under /rootfs/ for unpriv containers if !c.IsPrivileged() && strings.HasPrefix(hdr.Name, "/rootfs") { hdr.Uid, hdr.Gid = c.idmapset.ShiftFromNs(hdr.Uid, hdr.Gid) if hdr.Uid == -1 || hdr.Gid == -1 { return nil } } if major != -1 { hdr.Devmajor = int64(major) hdr.Devminor = int64(minor) } // If it's a hardlink we've already seen use the old name if fi.Mode().IsRegular() && nlink > 1 { if firstpath, found := linkmap[ino]; found { hdr.Typeflag = tar.TypeLink hdr.Linkname = firstpath hdr.Size = 0 } else { linkmap[ino] = hdr.Name } } // todo - handle xattrs if err := tw.WriteHeader(hdr); err != nil { return fmt.Errorf("error writing header: %s\n", err) } if hdr.Typeflag == tar.TypeReg { f, err := os.Open(path) if err != nil { return fmt.Errorf("tarStoreFile: error opening file: %s\n", err) } defer f.Close() if _, err := io.Copy(tw, f); err != nil { return fmt.Errorf("error copying file %s\n", err) } } return nil } func (c *containerLXD) mkdirAllContainerRoot(path string, perm os.FileMode) error { var uid int var gid int if !c.IsPrivileged() { uid, gid = c.idmapset.ShiftIntoNs(0, 0) if uid == -1 { uid = 0 } if gid == -1 { gid = 0 } } return shared.MkdirAllOwner(path, perm, uid, gid) } func (c *containerLXD) mountShared() error { source := shared.VarPath("shmounts", c.NameGet()) entry := fmt.Sprintf("%s dev/.lxd-mounts none bind,create=dir 0 0", source) if !shared.PathExists(source) { if err := c.mkdirAllContainerRoot(source, 0755); err != nil { return err } } return c.c.SetConfigItem("lxc.mount.entry", entry) } func (c *containerLXD) Checkpoint(opts lxc.CheckpointOptions) error { return c.c.Checkpoint(opts) } func (c *containerLXD) StartFromMigration(imagesDir string) error { f, err := ioutil.TempFile("", "lxd_lxc_migrateconfig_") if err != nil { return err } if err = f.Chmod(0600); err != nil { f.Close() os.Remove(f.Name()) return err } f.Close() os.Remove(f.Name()) if err := c.c.SaveConfigFile(f.Name()); err != nil { return err } /* (Re)Load the AA profile; we set it in the container's config above * in init() */ if err := AALoadProfile(c); err != nil { c.StorageStop() return err } if err := SeccompCreateProfile(c); err != nil { c.StorageStop() return err } cmd := exec.Command( os.Args[0], "forkmigrate", c.name, c.c.ConfigPath(), f.Name(), imagesDir, ) return cmd.Run() } apply profiles and config before we check for and act on nested/privileged properties Signed-off-by: Serge Hallyn <3df611b026e4639bee8aef7a4beb2e39fcebb313@ubuntu.com> package main import ( "archive/tar" "bytes" "crypto/rand" "database/sql" "encoding/json" "fmt" "io" "io/ioutil" "math/big" "net" "os" "os/exec" "path" "path/filepath" "reflect" "regexp" "sort" "strings" "syscall" "time" "gopkg.in/flosch/pongo2.v3" "gopkg.in/lxc/go-lxc.v2" "gopkg.in/yaml.v2" "github.com/lxc/lxd/shared" log "gopkg.in/inconshreveable/log15.v2" ) // ExtractInterfaceFromConfigName returns "eth0" from "volatile.eth0.hwaddr", // or an error if the key does not match this pattern. func extractInterfaceFromConfigName(k string) (string, error) { re := regexp.MustCompile("volatile\\.([^.]*)\\.hwaddr") m := re.FindStringSubmatch(k) if m != nil && len(m) > 1 { return m[1], nil } return "", fmt.Errorf("%s did not match", k) } func validateRawLxc(rawLxc string) error { for _, line := range strings.Split(rawLxc, "\n") { membs := strings.SplitN(line, "=", 2) if strings.ToLower(strings.Trim(membs[0], " \t")) == "lxc.logfile" { return fmt.Errorf("setting lxc.logfile is not allowed") } } return nil } // GenerateMacAddr generates a mac address from a string template: // e.g. "00:11:22:xx:xx:xx" -> "00:11:22:af:3e:51" func generateMacAddr(template string) (string, error) { ret := bytes.Buffer{} for _, c := range template { if c == 'x' { c, err := rand.Int(rand.Reader, big.NewInt(16)) if err != nil { return "", err } ret.WriteString(fmt.Sprintf("%x", c.Int64())) } else { ret.WriteString(string(c)) } } return ret.String(), nil } func containerPathGet(name string, isSnapshot bool) string { if isSnapshot { return shared.VarPath("snapshots", name) } return shared.VarPath("containers", name) } // containerLXDArgs contains every argument needed to create an LXD Container type containerLXDArgs struct { ID int // Leave it empty when you create one. Ctype containerType Config map[string]string Profiles []string Ephemeral bool BaseImage string Architecture int Devices shared.Devices } type containerLXD struct { c *lxc.Container daemon *Daemon id int name string config map[string]string profiles []string devices shared.Devices architecture int ephemeral bool idmapset *shared.IdmapSet cType containerType baseConfig map[string]string baseDevices shared.Devices Storage storage } type container interface { RenderState() (*shared.ContainerState, error) Reboot() error Freeze() error Shutdown(timeout time.Duration) error Start() error Stop() error Unfreeze() error Delete() error Restore(sourceContainer container) error Rename(newName string) error ConfigReplace(newConfig containerLXDArgs) error StorageStart() error StorageStop() error StorageGet() storage IsPrivileged() bool IsRunning() bool IsEphemeral() bool IsSnapshot() bool IDGet() int NameGet() string ArchitectureGet() int ConfigGet() map[string]string ConfigKeySet(key string, value string) error DevicesGet() shared.Devices ProfilesGet() []string PathGet(newName string) string RootfsPathGet() string TemplatesPathGet() string StateDirGet() string LogFilePathGet() string LogPathGet() string InitPidGet() int StateGet() string IdmapSetGet() *shared.IdmapSet LastIdmapSetGet() (*shared.IdmapSet, error) TemplateApply(trigger string) error ExportToTar(snap string, w io.Writer) error Checkpoint(opts lxc.CheckpointOptions) error StartFromMigration(imagesDir string) error // TODO: Remove every use of this and remove it. LXContainerGet() *lxc.Container DetachMount(m shared.Device) error AttachMount(m shared.Device) error } func containerLXDCreateAsEmpty(d *Daemon, name string, args containerLXDArgs) (container, error) { // Create the container c, err := containerLXDCreateInternal(d, name, args) if err != nil { return nil, err } // Now create the empty storage if err := c.Storage.ContainerCreate(c); err != nil { c.Delete() return nil, err } return c, nil } func containerLXDCreateFromImage(d *Daemon, name string, args containerLXDArgs, hash string) (container, error) { // Create the container c, err := containerLXDCreateInternal(d, name, args) if err != nil { return nil, err } if err := dbImageLastAccessUpdate(d.db, hash); err != nil { return nil, fmt.Errorf("Error updating image last use date: %s", err) } // Now create the storage from an image if err := c.Storage.ContainerCreateFromImage(c, hash); err != nil { c.Delete() return nil, err } return c, nil } func containerLXDCreateAsCopy(d *Daemon, name string, args containerLXDArgs, sourceContainer container) (container, error) { c, err := containerLXDCreateInternal(d, name, args) if err != nil { return nil, err } if err := c.ConfigReplace(args); err != nil { c.Delete() return nil, err } if err := c.Storage.ContainerCopy(c, sourceContainer); err != nil { c.Delete() return nil, err } return c, nil } func containerLXDCreateAsSnapshot(d *Daemon, name string, args containerLXDArgs, sourceContainer container, stateful bool) (container, error) { c, err := containerLXDCreateInternal(d, name, args) if err != nil { return nil, err } c.Storage = sourceContainer.StorageGet() if err := c.Storage.ContainerSnapshotCreate(c, sourceContainer); err != nil { c.Delete() return nil, err } if stateful { stateDir := c.StateDirGet() err = os.MkdirAll(stateDir, 0700) if err != nil { c.Delete() return nil, err } // TODO - shouldn't we freeze for the duration of rootfs snapshot below? if !sourceContainer.IsRunning() { c.Delete() return nil, fmt.Errorf("Container not running\n") } opts := lxc.CheckpointOptions{Directory: stateDir, Stop: true, Verbose: true} err = sourceContainer.Checkpoint(opts) err2 := CollectCRIULogFile(sourceContainer, stateDir, "snapshot", "dump") if err2 != nil { shared.Log.Warn("failed to collect criu log file", log.Ctx{"error": err2}) } if err != nil { c.Delete() return nil, err } } return c, nil } func validContainerName(name string) error { if strings.Contains(name, shared.SnapshotDelimiter) { return fmt.Errorf( "The character '%s' is reserved for snapshots.", shared.SnapshotDelimiter) } return nil } func containerLXDCreateInternal( d *Daemon, name string, args containerLXDArgs) (*containerLXD, error) { shared.Log.Info( "Container create", log.Ctx{ "container": name, "isSnapshot": args.Ctype == cTypeSnapshot}) if args.Ctype != cTypeSnapshot { if err := validContainerName(name); err != nil { return nil, err } } path := containerPathGet(name, args.Ctype == cTypeSnapshot) if shared.PathExists(path) { shared.Log.Error( "The container already exists on disk", log.Ctx{ "container": name, "path": path}) return nil, fmt.Errorf( "The container already exists on disk, container: '%s', path: '%s'", name, path) } if args.Profiles == nil { args.Profiles = []string{"default"} } if args.Config == nil { args.Config = map[string]string{} } if args.BaseImage != "" { args.Config["volatile.base_image"] = args.BaseImage } if args.Devices == nil { args.Devices = shared.Devices{} } profiles, err := dbProfilesGet(d.db) if err != nil { return nil, err } for _, profile := range args.Profiles { if !shared.StringInSlice(profile, profiles) { return nil, fmt.Errorf("Requested profile '%s' doesn't exist", profile) } } id, err := dbContainerCreate(d.db, name, args) if err != nil { return nil, err } shared.Log.Debug( "Container created in the DB", log.Ctx{"container": name, "id": id}) baseConfig := map[string]string{} if err := shared.DeepCopy(&args.Config, &baseConfig); err != nil { return nil, err } baseDevices := shared.Devices{} if err := shared.DeepCopy(&args.Devices, &baseDevices); err != nil { return nil, err } c := &containerLXD{ daemon: d, id: id, name: name, ephemeral: args.Ephemeral, architecture: args.Architecture, config: args.Config, profiles: args.Profiles, devices: args.Devices, cType: args.Ctype, baseConfig: baseConfig, baseDevices: baseDevices} // No need to detect storage here, its a new container. c.Storage = d.Storage if err := c.init(); err != nil { c.Delete() // Delete the container from the DB. return nil, err } idmap := c.IdmapSetGet() var jsonIdmap string if idmap != nil { idmapBytes, err := json.Marshal(idmap.Idmap) if err != nil { c.Delete() return nil, err } jsonIdmap = string(idmapBytes) } else { jsonIdmap = "[]" } err = c.ConfigKeySet("volatile.last_state.idmap", jsonIdmap) if err != nil { c.Delete() return nil, err } return c, nil } func containerLXDLoad(d *Daemon, name string) (container, error) { shared.Log.Debug("Container load", log.Ctx{"container": name}) args, err := dbContainerGet(d.db, name) if err != nil { return nil, err } baseConfig := map[string]string{} if err := shared.DeepCopy(&args.Config, &baseConfig); err != nil { return nil, err } baseDevices := shared.Devices{} if err := shared.DeepCopy(&args.Devices, &baseDevices); err != nil { return nil, err } c := &containerLXD{ daemon: d, id: args.ID, name: name, ephemeral: args.Ephemeral, architecture: args.Architecture, config: args.Config, profiles: args.Profiles, devices: args.Devices, cType: args.Ctype, baseConfig: baseConfig, baseDevices: baseDevices} s, err := storageForFilename(d, c.PathGet("")) if err != nil { shared.Log.Warn("Couldn't detect storage.", log.Ctx{"container": c.NameGet()}) c.Storage = d.Storage } else { c.Storage = s } if err := c.init(); err != nil { return nil, err } return c, nil } // init prepares the LXContainer for this LXD Container // TODO: This gets called on each load of the container, // we might be able to split this is up into c.Start(). func (c *containerLXD) init() error { templateConfBase := "ubuntu" templateConfDir := os.Getenv("LXD_LXC_TEMPLATE_CONFIG") if templateConfDir == "" { templateConfDir = "/usr/share/lxc/config" } cc, err := lxc.NewContainer(c.NameGet(), c.daemon.lxcpath) if err != nil { return err } c.c = cc logfile := c.LogFilePathGet() if err := os.MkdirAll(filepath.Dir(logfile), 0700); err != nil { return err } if err = c.c.SetLogFile(logfile); err != nil { return err } personality, err := shared.ArchitecturePersonality(c.architecture) if err == nil { if err := c.c.SetConfigItem("lxc.arch", personality); err != nil { return err } } err = c.c.SetConfigItem("lxc.include", fmt.Sprintf("%s/%s.common.conf", templateConfDir, templateConfBase)) if err != nil { return err } if err := c.c.SetConfigItem("lxc.rootfs", c.RootfsPathGet()); err != nil { return err } if err := c.c.SetConfigItem("lxc.loglevel", "0"); err != nil { return err } if err := c.c.SetConfigItem("lxc.utsname", c.NameGet()); err != nil { return err } if err := c.c.SetConfigItem("lxc.tty", "0"); err != nil { return err } if err := setupDevLxdMount(c.c); err != nil { return err } for _, p := range c.profiles { if err := c.applyProfile(p); err != nil { return err } } // base per-container config should override profile config, so we apply it second if err := c.applyConfig(c.baseConfig); err != nil { return err } if !c.IsPrivileged() { err = c.c.SetConfigItem("lxc.include", fmt.Sprintf("%s/%s.userns.conf", templateConfDir, templateConfBase)) if err != nil { return err } } if c.IsNesting() { shared.Debugf("Setting up %s for nesting", c.name) orig := c.c.ConfigItem("lxc.mount.auto") auto := "" if len(orig) == 1 { auto = orig[0] } if !strings.Contains(auto, "cgroup") { auto = fmt.Sprintf("%s %s", auto, "cgroup:mixed") err = c.c.SetConfigItem("lxc.mount.auto", auto) if err != nil { return err } } /* * mount extra /proc and /sys to work around kernel * restrictions on remounting them when covered */ err = c.c.SetConfigItem("lxc.mount.entry", "proc dev/.lxc/proc proc create=dir,optional") if err != nil { return err } err = c.c.SetConfigItem("lxc.mount.entry", "sys dev/.lxc/sys sysfs create=dir,optional") if err != nil { return err } } /* * Until stacked apparmor profiles are possible, we have to run nested * containers unconfined */ if aaEnabled { if aaConfined() { curProfile := aaProfile() shared.Debugf("Running %s in current profile %s (nested container)", c.name, curProfile) curProfile = strings.TrimSuffix(curProfile, " (enforce)") if err := c.c.SetConfigItem("lxc.aa_profile", curProfile); err != nil { return err } } else if err := c.c.SetConfigItem("lxc.aa_profile", AAProfileName(c)); err != nil { return err } } if err := c.c.SetConfigItem("lxc.seccomp", SeccompProfilePath(c)); err != nil { return err } if err := c.setupMacAddresses(); err != nil { return err } // Allow overwrites of devices for k, v := range c.baseDevices { c.devices[k] = v } /* now add the lxc.* entries for the configured devices */ if err := c.applyDevices(); err != nil { return err } if !c.IsPrivileged() { if c.daemon.IdmapSet == nil { return fmt.Errorf("user has no subuids") } c.idmapset = c.daemon.IdmapSet // TODO - per-tenant idmaps } if err := c.mountShared(); err != nil { return err } if err := c.applyIdmapSet(); err != nil { return err } if err := c.applyPostDeviceConfig(); err != nil { return err } return nil } func (c *containerLXD) RenderState() (*shared.ContainerState, error) { statusCode := shared.FromLXCState(int(c.c.State())) status := shared.ContainerStatus{ Status: statusCode.String(), StatusCode: statusCode, } if c.IsRunning() { pid := c.InitPidGet() status.Init = pid status.Ips = c.iPsGet() } return &shared.ContainerState{ Name: c.name, Profiles: c.profiles, Config: c.baseConfig, ExpandedConfig: c.config, Userdata: []byte{}, Status: status, Devices: c.baseDevices, ExpandedDevices: c.devices, Ephemeral: c.ephemeral, }, nil } func (c *containerLXD) Start() error { if c.IsRunning() { return fmt.Errorf("the container is already running") } // Start the storage for this container if err := c.StorageStart(); err != nil { return err } /* (Re)Load the AA profile; we set it in the container's config above * in init() */ if err := AALoadProfile(c); err != nil { c.StorageStop() return err } if err := SeccompCreateProfile(c); err != nil { c.StorageStop() return err } f, err := ioutil.TempFile("", "lxd_lxc_startconfig_") if err != nil { c.StorageStop() return err } configPath := f.Name() if err = f.Chmod(0600); err != nil { f.Close() os.Remove(configPath) c.StorageStop() return err } f.Close() err = c.c.SaveConfigFile(configPath) if err != nil { c.StorageStop() return err } err = c.TemplateApply("start") if err != nil { c.StorageStop() return err } /* Deal with idmap changes */ idmap := c.IdmapSetGet() lastIdmap, err := c.LastIdmapSetGet() if err != nil { return err } var jsonIdmap string if idmap != nil { idmapBytes, err := json.Marshal(idmap.Idmap) if err != nil { c.StorageStop() return err } jsonIdmap = string(idmapBytes) } else { jsonIdmap = "[]" } if !reflect.DeepEqual(idmap, lastIdmap) { shared.Debugf("Container idmap changed, remapping") if lastIdmap != nil { if err := lastIdmap.UnshiftRootfs(c.RootfsPathGet()); err != nil { c.StorageStop() return err } } if idmap != nil { if err := idmap.ShiftRootfs(c.RootfsPathGet()); err != nil { c.StorageStop() return err } } } err = c.ConfigKeySet("volatile.last_state.idmap", jsonIdmap) if err != nil { c.StorageStop() return err } /* Actually start the container */ err = exec.Command( os.Args[0], "forkstart", c.name, c.daemon.lxcpath, configPath).Run() if err != nil { c.StorageStop() err = fmt.Errorf( "Error calling 'lxd forkstart %s %s %s': err='%v'", c.name, c.daemon.lxcpath, path.Join(c.LogPathGet(), "lxc.conf"), err) } if err == nil && c.ephemeral == true { containerWatchEphemeral(c.daemon, c) } return err } func (c *containerLXD) Reboot() error { return c.c.Reboot() } func (c *containerLXD) Freeze() error { return c.c.Freeze() } func (c *containerLXD) IsNesting() bool { switch strings.ToLower(c.config["security.nesting"]) { case "1": return true case "true": return true } return false } func (c *containerLXD) IsPrivileged() bool { switch strings.ToLower(c.config["security.privileged"]) { case "1": return true case "true": return true } return false } func (c *containerLXD) IsRunning() bool { return c.c.Running() } func (c *containerLXD) Shutdown(timeout time.Duration) error { if err := c.c.Shutdown(timeout); err != nil { // Still try to unload the storage. c.StorageStop() return err } // Stop the storage for this container if err := c.StorageStop(); err != nil { return err } if err := AAUnloadProfile(c); err != nil { return err } return nil } func (c *containerLXD) Stop() error { if err := c.c.Stop(); err != nil { // Still try to unload the storage. c.StorageStop() return err } // Stop the storage for this container if err := c.StorageStop(); err != nil { return err } if err := AAUnloadProfile(c); err != nil { return err } return nil } func (c *containerLXD) Unfreeze() error { return c.c.Unfreeze() } func (c *containerLXD) StorageFromImage(hash string) error { return c.Storage.ContainerCreateFromImage(c, hash) } func (c *containerLXD) StorageFromNone() error { return c.Storage.ContainerCreate(c) } func (c *containerLXD) StorageStart() error { return c.Storage.ContainerStart(c) } func (c *containerLXD) StorageStop() error { return c.Storage.ContainerStop(c) } func (c *containerLXD) StorageGet() storage { return c.Storage } func (c *containerLXD) Restore(sourceContainer container) error { /* * restore steps: * 1. stop container if already running * 2. copy snapshot rootfs to container * 3. overwrite existing config with snapshot config */ // Stop the container // TODO: stateful restore ? wasRunning := false if c.IsRunning() { wasRunning = true if err := c.Stop(); err != nil { shared.Log.Error( "RESTORE => could not stop container", log.Ctx{ "container": c.NameGet(), "err": err}) return err } shared.Log.Debug( "RESTORE => Stopped container", log.Ctx{"container": c.NameGet()}) } // Restore the FS. // TODO: I switched the FS and config restore, think thats the correct way // (pcdummy) err := c.Storage.ContainerRestore(c, sourceContainer) if err != nil { shared.Log.Error("RESTORE => Restoring the filesystem failed", log.Ctx{ "source": sourceContainer.NameGet(), "destination": c.NameGet()}) return err } args := containerLXDArgs{ Ctype: cTypeRegular, Config: sourceContainer.ConfigGet(), Profiles: sourceContainer.ProfilesGet(), Ephemeral: sourceContainer.IsEphemeral(), Architecture: sourceContainer.ArchitectureGet(), Devices: sourceContainer.DevicesGet(), } err = c.ConfigReplace(args) if err != nil { shared.Log.Error("RESTORE => Restore of the configuration failed", log.Ctx{ "source": sourceContainer.NameGet(), "destination": c.NameGet()}) return err } if wasRunning { c.Start() } return nil } func (c *containerLXD) Delete() error { shared.Log.Debug("containerLXD.Delete", log.Ctx{"c.name": c.NameGet(), "type": c.cType}) switch c.cType { case cTypeRegular: if err := containerDeleteSnapshots(c.daemon, c.NameGet()); err != nil { return err } if err := c.Storage.ContainerDelete(c); err != nil { return err } case cTypeSnapshot: if err := c.Storage.ContainerSnapshotDelete(c); err != nil { return err } default: return fmt.Errorf("Unknown cType: %d", c.cType) } if err := dbContainerRemove(c.daemon.db, c.NameGet()); err != nil { return err } AADeleteProfile(c) SeccompDeleteProfile(c) return nil } func (c *containerLXD) Rename(newName string) error { oldName := c.NameGet() if c.IsRunning() { return fmt.Errorf("renaming of running container not allowed") } if c.IsSnapshot() { if err := c.Storage.ContainerSnapshotRename(c, newName); err != nil { return err } } else { if err := c.Storage.ContainerRename(c, newName); err != nil { return err } } if err := dbContainerRename(c.daemon.db, oldName, newName); err != nil { return err } results, err := dbContainerGetSnapshots(c.daemon.db, oldName) if err != nil { return err } for _, sname := range results { sc, err := containerLXDLoad(c.daemon, sname) if err != nil { shared.Log.Error( "containerDeleteSnapshots: Failed to load the snapshotcontainer", log.Ctx{"container": oldName, "snapshot": sname}) continue } baseSnapName := filepath.Base(sname) newSnapshotName := newName + shared.SnapshotDelimiter + baseSnapName if err := sc.Rename(newSnapshotName); err != nil { shared.Log.Error( "containerDeleteSnapshots: Failed to rename a snapshotcontainer", log.Ctx{"container": oldName, "snapshot": sname, "err": err}) } } c.name = newName // Recreate the LX Container c.c = nil c.init() return nil } func (c *containerLXD) IsEphemeral() bool { return c.ephemeral } func (c *containerLXD) IsSnapshot() bool { return c.cType == cTypeSnapshot } func (c *containerLXD) IDGet() int { return c.id } func (c *containerLXD) NameGet() string { return c.name } func (c *containerLXD) ArchitectureGet() int { return c.architecture } func (c *containerLXD) PathGet(newName string) string { if newName != "" { return containerPathGet(newName, c.IsSnapshot()) } return containerPathGet(c.NameGet(), c.IsSnapshot()) } func (c *containerLXD) RootfsPathGet() string { return path.Join(c.PathGet(""), "rootfs") } func (c *containerLXD) TemplatesPathGet() string { return path.Join(c.PathGet(""), "templates") } func (c *containerLXD) StateDirGet() string { return path.Join(c.PathGet(""), "state") } func (c *containerLXD) LogPathGet() string { return shared.LogPath(c.NameGet()) } func (c *containerLXD) LogFilePathGet() string { return filepath.Join(c.LogPathGet(), "lxc.log") } func (c *containerLXD) InitPidGet() int { return c.c.InitPid() } func (c *containerLXD) StateGet() string { return c.c.State().String() } func (c *containerLXD) IdmapSetGet() *shared.IdmapSet { return c.idmapset } func (c *containerLXD) LastIdmapSetGet() (*shared.IdmapSet, error) { config := c.ConfigGet() lastJsonIdmap := config["volatile.last_state.idmap"] if lastJsonIdmap == "" { return c.IdmapSetGet(), nil } lastIdmap := new(shared.IdmapSet) err := json.Unmarshal([]byte(lastJsonIdmap), &lastIdmap.Idmap) if err != nil { return nil, err } if len(lastIdmap.Idmap) == 0 { return nil, nil } return lastIdmap, nil } func (c *containerLXD) ConfigKeySet(key string, value string) error { c.baseConfig[key] = value args := containerLXDArgs{ Ctype: c.cType, Config: c.baseConfig, Profiles: c.profiles, Ephemeral: c.ephemeral, Architecture: c.architecture, Devices: c.baseDevices, } return c.ConfigReplace(args) } func (c *containerLXD) LXContainerGet() *lxc.Container { return c.c } // ConfigReplace replaces the config of container and tries to live apply // the new configuration. func (c *containerLXD) ConfigReplace(newContainerArgs containerLXDArgs) error { /* check to see that the config actually applies to the container * successfully before saving it. in particular, raw.lxc and * raw.apparmor need to be parsed once to make sure they make sense. */ preDevList := c.devices /* Validate devices */ if err := validateConfig(c, newContainerArgs.Devices); err != nil { return err } if err := c.applyConfig(newContainerArgs.Config); err != nil { return err } tx, err := dbBegin(c.daemon.db) if err != nil { return err } /* Update config or profiles */ if err = dbContainerConfigClear(tx, c.id); err != nil { shared.Log.Debug( "Error clearing configuration for container", log.Ctx{"name": c.NameGet()}) tx.Rollback() return err } if err = dbContainerConfigInsert(tx, c.id, newContainerArgs.Config); err != nil { shared.Debugf("Error inserting configuration for container %s", c.NameGet()) tx.Rollback() return err } /* handle profiles */ if emptyProfile(newContainerArgs.Profiles) { _, err := tx.Exec("DELETE from containers_profiles where container_id=?", c.id) if err != nil { tx.Rollback() return err } } else { if err := dbContainerProfilesInsert(tx, c.id, newContainerArgs.Profiles); err != nil { tx.Rollback() return err } } err = dbDevicesAdd(tx, "container", int64(c.id), newContainerArgs.Devices) if err != nil { tx.Rollback() return err } if err := c.applyPostDeviceConfig(); err != nil { return err } c.baseConfig = newContainerArgs.Config c.baseDevices = newContainerArgs.Devices /* Let's try to load the apparmor profile again, in case the * raw.apparmor config was changed (or deleted). Make sure we do this * before commit, in case it fails because the user screwed something * up so we can roll back and not hose their container. * * For containers that aren't running, we just want to parse the new * profile; this is because this code is called during the start * process after the profile is loaded but before the container starts, * which will cause a container start to fail. If the container is * running, we /do/ want to reload the profile, because we want the * changes to take effect immediately. */ if !c.IsRunning() { AAParseProfile(c) return txCommit(tx) } if err := AALoadProfile(c); err != nil { tx.Rollback() return err } if err := txCommit(tx); err != nil { return err } // add devices from new profile list to the desired goal set for _, p := range c.profiles { profileDevs, err := dbDevicesGet(c.daemon.db, p, true) if err != nil { return fmt.Errorf("Error reading devices from profile '%s': %v", p, err) } newContainerArgs.Devices.ExtendFromProfile(preDevList, profileDevs) } tx, err = dbBegin(c.daemon.db) if err != nil { return err } if err := devicesApplyDeltaLive(tx, c, preDevList, newContainerArgs.Devices); err != nil { return err } if err := txCommit(tx); err != nil { return err } return nil } func (c *containerLXD) ConfigGet() map[string]string { return c.config } func (c *containerLXD) DevicesGet() shared.Devices { return c.devices } func (c *containerLXD) ProfilesGet() []string { return c.profiles } /* * Export the container to a unshifted tarfile containing: * dir/ * metadata.yaml * rootfs/ */ func (c *containerLXD) ExportToTar(snap string, w io.Writer) error { if snap == "" && c.IsRunning() { return fmt.Errorf("Cannot export a running container as image") } idmap, err := c.LastIdmapSetGet() if err != nil { return err } if idmap != nil { if err := idmap.UnshiftRootfs(c.RootfsPathGet()); err != nil { return err } defer idmap.ShiftRootfs(c.RootfsPathGet()) } tw := tar.NewWriter(w) // keep track of the first path we saw for each path with nlink>1 linkmap := map[uint64]string{} cDir := c.PathGet("") // Path inside the tar image is the pathname starting after cDir offset := len(cDir) + 1 writeToTar := func(path string, fi os.FileInfo, err error) error { if err := c.tarStoreFile(linkmap, offset, tw, path, fi); err != nil { shared.Debugf("Error tarring up %s: %s", path, err) return err } return nil } fnam := filepath.Join(cDir, "metadata.yaml") if shared.PathExists(fnam) { fi, err := os.Lstat(fnam) if err != nil { shared.Debugf("Error statting %s during exportToTar", fnam) tw.Close() return err } if err := c.tarStoreFile(linkmap, offset, tw, fnam, fi); err != nil { shared.Debugf("Error writing to tarfile: %s", err) tw.Close() return err } } fnam = filepath.Join(cDir, "rootfs") filepath.Walk(fnam, writeToTar) fnam = filepath.Join(cDir, "templates") if shared.PathExists(fnam) { filepath.Walk(fnam, writeToTar) } return tw.Close() } func (c *containerLXD) TemplateApply(trigger string) error { fname := path.Join(c.PathGet(""), "metadata.yaml") if !shared.PathExists(fname) { return nil } content, err := ioutil.ReadFile(fname) if err != nil { return err } metadata := new(imageMetadata) err = yaml.Unmarshal(content, &metadata) if err != nil { return fmt.Errorf("Could not parse %s: %v", fname, err) } for filepath, template := range metadata.Templates { var w *os.File found := false for _, tplTrigger := range template.When { if tplTrigger == trigger { found = true break } } if !found { continue } fullpath := shared.VarPath("containers", c.name, "rootfs", strings.TrimLeft(filepath, "/")) if shared.PathExists(fullpath) { w, err = os.Create(fullpath) if err != nil { return err } } else { uid := 0 gid := 0 if !c.IsPrivileged() { uid, gid = c.idmapset.ShiftIntoNs(0, 0) } shared.MkdirAllOwner(path.Dir(fullpath), 0755, uid, gid) w, err = os.Create(fullpath) if err != nil { return err } if !c.IsPrivileged() { w.Chown(uid, gid) } w.Chmod(0644) } tplString, err := ioutil.ReadFile(shared.VarPath("containers", c.name, "templates", template.Template)) if err != nil { return err } tpl, err := pongo2.FromString("{% autoescape off %}" + string(tplString) + "{% endautoescape %}") if err != nil { return err } containerMeta := make(map[string]string) containerMeta["name"] = c.name containerMeta["architecture"], _ = shared.ArchitectureName(c.architecture) if c.ephemeral { containerMeta["ephemeral"] = "true" } else { containerMeta["ephemeral"] = "false" } if c.IsPrivileged() { containerMeta["privileged"] = "true" } else { containerMeta["privileged"] = "false" } configGet := func(confKey, confDefault *pongo2.Value) *pongo2.Value { val, ok := c.config[confKey.String()] if !ok { return confDefault } return pongo2.AsValue(strings.TrimRight(val, "\r\n")) } tpl.ExecuteWriter(pongo2.Context{"trigger": trigger, "path": filepath, "container": containerMeta, "config": c.config, "devices": c.devices, "properties": template.Properties, "config_get": configGet}, w) } return nil } func (c *containerLXD) DetachMount(m shared.Device) error { // TODO - in case of reboot, we should remove the lxc.mount.entry. Trick // is, we can't d.c.ClearConfigItem bc that will clear all the keys. So // we should get the full list, clear, then reinsert all but the one we're // removing shared.Debugf("Mounts detach not yet implemented") pid := c.c.InitPid() if pid == -1 { // container not running return nil } pidstr := fmt.Sprintf("%d", pid) return exec.Command(os.Args[0], "forkumount", pidstr, m["path"]).Run() } func (c *containerLXD) AttachMount(m shared.Device) error { dest := m["path"] source := m["source"] opts := "" fstype := "none" flags := 0 sb, err := os.Stat(source) if err != nil { return err } if sb.IsDir() { flags |= syscall.MS_BIND opts = "bind,create=dir" } else { if !shared.IsBlockdev(sb.Mode()) { // Not sure if we want to try dealing with loopdevs, but // since we'd need to deal with partitions i think not. // We also might want to support file bind mounting, but // this doesn't do that. return fmt.Errorf("non-block device file not supported\n") } fstype, err = shared.BlockFsDetect(source) if err != nil { return fmt.Errorf("Unable to detect fstype for %s: %s\n", source, err) } } // add a lxc.mount.entry = souce destination, in case of reboot if m["readonly"] == "1" || m["readonly"] == "true" { if opts == "" { opts = "ro" } else { opts = opts + ",ro" } } optional := false if m["optional"] == "1" || m["optional"] == "true" { optional = true opts = opts + ",optional" } entry := fmt.Sprintf("%s %s %s %s 0 0", source, dest, fstype, opts) if err := c.c.SetConfigItem("lxc.mount.entry", entry); err != nil { return err } pid := c.c.InitPid() if pid == -1 { // container not running - we're done return nil } // now live-mount tmpMount, err := ioutil.TempDir(shared.VarPath("shmounts", c.name), "lxdmount_") if err != nil { return err } err = syscall.Mount(m["source"], tmpMount, fstype, uintptr(flags), "") if err != nil { return err } mntsrc := filepath.Join("/dev/.lxd-mounts", filepath.Base(tmpMount)) // finally we need to move-mount this in the container pidstr := fmt.Sprintf("%d", pid) err = exec.Command(os.Args[0], "forkmount", pidstr, mntsrc, m["path"]).Run() syscall.Unmount(tmpMount, syscall.MNT_DETACH) // in case forkmount failed os.Remove(tmpMount) if err != nil && !optional { return err } return nil } func (c *containerLXD) applyConfig(config map[string]string) error { var err error for k, v := range config { switch k { case "limits.cpus": // TODO - Come up with a way to choose cpus for multiple // containers var vint int count, err := fmt.Sscanf(v, "%d", &vint) if err != nil { return err } if count != 1 || vint < 0 || vint > 65000 { return fmt.Errorf("Bad cpu limit: %s\n", v) } cpuset := fmt.Sprintf("0-%d", vint-1) err = c.c.SetConfigItem("lxc.cgroup.cpuset.cpus", cpuset) case "limits.memory": err = c.c.SetConfigItem("lxc.cgroup.memory.limit_in_bytes", v) default: if strings.HasPrefix(k, "environment.") { c.c.SetConfigItem("lxc.environment", fmt.Sprintf("%s=%s", strings.TrimPrefix(k, "environment."), v)) } /* Things like security.privileged need to be propagated */ c.config[k] = v } if err != nil { shared.Debugf("Error setting %s: %q", k, err) return err } } return nil } func (c *containerLXD) applyPostDeviceConfig() error { // applies config that must be delayed until after devices are // instantiated, see bug #588 and fix #635 if lxcConfig, ok := c.config["raw.lxc"]; ok { if err := validateRawLxc(lxcConfig); err != nil { return err } f, err := ioutil.TempFile("", "lxd_config_") if err != nil { return err } err = shared.WriteAll(f, []byte(lxcConfig)) f.Close() defer os.Remove(f.Name()) if err != nil { return err } if err := c.c.LoadConfigFile(f.Name()); err != nil { return fmt.Errorf("problem applying raw.lxc, perhaps there is a syntax error?") } } return nil } func (c *containerLXD) applyProfile(p string) error { q := `SELECT key, value FROM profiles_config JOIN profiles ON profiles.id=profiles_config.profile_id WHERE profiles.name=?` var k, v string inargs := []interface{}{p} outfmt := []interface{}{k, v} result, err := dbQueryScan(c.daemon.db, q, inargs, outfmt) if err != nil { return err } config := map[string]string{} for _, r := range result { k = r[0].(string) v = r[1].(string) config[k] = v } newdevs, err := dbDevicesGet(c.daemon.db, p, true) if err != nil { return err } for k, v := range newdevs { c.devices[k] = v } return c.applyConfig(config) } func (c *containerLXD) updateContainerHWAddr(k, v string) { for name, d := range c.devices { if d["type"] != "nic" { continue } for key := range c.config { device, err := extractInterfaceFromConfigName(key) if err == nil && device == name { d["hwaddr"] = v c.config[key] = v return } } } } func (c *containerLXD) setupMacAddresses() error { newConfigEntries := map[string]string{} for name, d := range c.devices { if d["type"] != "nic" { continue } found := false for key, val := range c.config { device, err := extractInterfaceFromConfigName(key) if err == nil && device == name { found = true d["hwaddr"] = val } } if !found { var hwaddr string var err error if d["hwaddr"] != "" { hwaddr, err = generateMacAddr(d["hwaddr"]) if err != nil { return err } } else { hwaddr, err = generateMacAddr("00:16:3e:xx:xx:xx") if err != nil { return err } } if hwaddr != d["hwaddr"] { d["hwaddr"] = hwaddr key := fmt.Sprintf("volatile.%s.hwaddr", name) c.config[key] = hwaddr c.baseConfig[key] = hwaddr newConfigEntries[key] = hwaddr } } } if len(newConfigEntries) > 0 { tx, err := dbBegin(c.daemon.db) if err != nil { return err } /* * My logic may be flawed here, but it seems to me that one of * the following must be true: * 1. The current database entry equals what we had stored. * Our update akes precedence * 2. The current database entry is different from what we had * stored. Someone updated it since we last grabbed the * container configuration. So either * a. it contains 'x' and is a template. We have generated * a real mac, so our update takes precedence * b. it contains no 'x' and is an hwaddr, not template. We * defer to the racer's update since it may be actually * starting the container. */ str := "INSERT INTO containers_config (container_id, key, value) values (?, ?, ?)" stmt, err := tx.Prepare(str) if err != nil { tx.Rollback() return err } defer stmt.Close() ustr := "UPDATE containers_config SET value=? WHERE container_id=? AND key=?" ustmt, err := tx.Prepare(ustr) if err != nil { tx.Rollback() return err } defer ustmt.Close() qstr := "SELECT value FROM containers_config WHERE container_id=? AND key=?" qstmt, err := tx.Prepare(qstr) if err != nil { tx.Rollback() return err } defer qstmt.Close() for k, v := range newConfigEntries { var racer string err := qstmt.QueryRow(c.id, k).Scan(&racer) if err == sql.ErrNoRows { _, err = stmt.Exec(c.id, k, v) if err != nil { shared.Debugf("Error adding mac address to container") tx.Rollback() return err } } else if err != nil { tx.Rollback() return err } else if strings.Contains(racer, "x") { _, err = ustmt.Exec(v, c.id, k) if err != nil { shared.Debugf("Error updating mac address to container") tx.Rollback() return err } } else { // we accept the racing task's update c.updateContainerHWAddr(k, v) } } err = txCommit(tx) if err != nil { fmt.Printf("setupMacAddresses: (TxCommit) error %s\n", err) } return err } return nil } func (c *containerLXD) applyIdmapSet() error { if c.idmapset == nil { return nil } lines := c.idmapset.ToLxcString() for _, line := range lines { err := c.c.SetConfigItem("lxc.id_map", line+"\n") if err != nil { return err } } return nil } func (c *containerLXD) applyDevices() error { var keys []string for k := range c.devices { keys = append(keys, k) } sort.Strings(keys) for _, name := range keys { d := c.devices[name] if name == "type" { continue } configs, err := deviceToLxc(d) if err != nil { return fmt.Errorf("Failed configuring device %s: %s\n", name, err) } for _, line := range configs { err := c.c.SetConfigItem(line[0], line[1]) if err != nil { return fmt.Errorf("Failed configuring device %s: %s\n", name, err) } } } return nil } func (c *containerLXD) iPsGet() []shared.Ip { ips := []shared.Ip{} names, err := c.c.Interfaces() if err != nil { return ips } for _, n := range names { addresses, err := c.c.IPAddress(n) if err != nil { continue } veth := "" for i := 0; i < len(c.c.ConfigItem("lxc.network")); i++ { nicName := c.c.RunningConfigItem(fmt.Sprintf("lxc.network.%d.name", i))[0] if nicName != n { continue } interfaceType := c.c.RunningConfigItem(fmt.Sprintf("lxc.network.%d.type", i)) if interfaceType[0] != "veth" { continue } veth = c.c.RunningConfigItem(fmt.Sprintf("lxc.network.%d.veth.pair", i))[0] break } for _, a := range addresses { ip := shared.Ip{Interface: n, Address: a, HostVeth: veth} if net.ParseIP(a).To4() == nil { ip.Protocol = "IPV6" } else { ip.Protocol = "IPV4" } ips = append(ips, ip) } } return ips } func (c *containerLXD) tarStoreFile(linkmap map[uint64]string, offset int, tw *tar.Writer, path string, fi os.FileInfo) error { var err error var major, minor, nlink int var ino uint64 link := "" if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err = os.Readlink(path) if err != nil { return err } } hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return err } hdr.Name = path[offset:] if fi.IsDir() || fi.Mode()&os.ModeSymlink == os.ModeSymlink { hdr.Size = 0 } else { hdr.Size = fi.Size() } hdr.Uid, hdr.Gid, major, minor, ino, nlink, err = shared.GetFileStat(path) if err != nil { return fmt.Errorf("error getting file info: %s\n", err) } // unshift the id under /rootfs/ for unpriv containers if !c.IsPrivileged() && strings.HasPrefix(hdr.Name, "/rootfs") { hdr.Uid, hdr.Gid = c.idmapset.ShiftFromNs(hdr.Uid, hdr.Gid) if hdr.Uid == -1 || hdr.Gid == -1 { return nil } } if major != -1 { hdr.Devmajor = int64(major) hdr.Devminor = int64(minor) } // If it's a hardlink we've already seen use the old name if fi.Mode().IsRegular() && nlink > 1 { if firstpath, found := linkmap[ino]; found { hdr.Typeflag = tar.TypeLink hdr.Linkname = firstpath hdr.Size = 0 } else { linkmap[ino] = hdr.Name } } // todo - handle xattrs if err := tw.WriteHeader(hdr); err != nil { return fmt.Errorf("error writing header: %s\n", err) } if hdr.Typeflag == tar.TypeReg { f, err := os.Open(path) if err != nil { return fmt.Errorf("tarStoreFile: error opening file: %s\n", err) } defer f.Close() if _, err := io.Copy(tw, f); err != nil { return fmt.Errorf("error copying file %s\n", err) } } return nil } func (c *containerLXD) mkdirAllContainerRoot(path string, perm os.FileMode) error { var uid int var gid int if !c.IsPrivileged() { uid, gid = c.idmapset.ShiftIntoNs(0, 0) if uid == -1 { uid = 0 } if gid == -1 { gid = 0 } } return shared.MkdirAllOwner(path, perm, uid, gid) } func (c *containerLXD) mountShared() error { source := shared.VarPath("shmounts", c.NameGet()) entry := fmt.Sprintf("%s dev/.lxd-mounts none bind,create=dir 0 0", source) if !shared.PathExists(source) { if err := c.mkdirAllContainerRoot(source, 0755); err != nil { return err } } return c.c.SetConfigItem("lxc.mount.entry", entry) } func (c *containerLXD) Checkpoint(opts lxc.CheckpointOptions) error { return c.c.Checkpoint(opts) } func (c *containerLXD) StartFromMigration(imagesDir string) error { f, err := ioutil.TempFile("", "lxd_lxc_migrateconfig_") if err != nil { return err } if err = f.Chmod(0600); err != nil { f.Close() os.Remove(f.Name()) return err } f.Close() os.Remove(f.Name()) if err := c.c.SaveConfigFile(f.Name()); err != nil { return err } /* (Re)Load the AA profile; we set it in the container's config above * in init() */ if err := AALoadProfile(c); err != nil { c.StorageStop() return err } if err := SeccompCreateProfile(c); err != nil { c.StorageStop() return err } cmd := exec.Command( os.Args[0], "forkmigrate", c.name, c.c.ConfigPath(), f.Name(), imagesDir, ) return cmd.Run() }
module.Module 不再对 json 等提供直接支持。
// Copyright 2015 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. package jsre import ( "fmt" "sort" "strconv" "strings" "github.com/fatih/color" "github.com/robertkrimen/otto" ) const ( maxPrettyPrintLevel = 3 indentString = " " ) var ( functionColor = color.New(color.FgMagenta) specialColor = color.New(color.Bold) numberColor = color.New(color.FgRed) stringColor = color.New(color.FgGreen) ) // these fields are hidden when printing objects. var boringKeys = map[string]bool{ "valueOf": true, "toString": true, "toLocaleString": true, "hasOwnProperty": true, "isPrototypeOf": true, "propertyIsEnumerable": true, "constructor": true, } // prettyPrint writes value to standard output. func prettyPrint(vm *otto.Otto, value otto.Value) { ppctx{vm}.printValue(value, 0, false) } func prettyPrintJS(call otto.FunctionCall) otto.Value { for _, v := range call.ArgumentList { prettyPrint(call.Otto, v) fmt.Println() } return otto.UndefinedValue() } type ppctx struct{ vm *otto.Otto } func (ctx ppctx) indent(level int) string { return strings.Repeat(indentString, level) } func (ctx ppctx) printValue(v otto.Value, level int, inArray bool) { switch { case v.IsObject(): ctx.printObject(v.Object(), level, inArray) case v.IsNull(): specialColor.Print("null") case v.IsUndefined(): specialColor.Print("undefined") case v.IsString(): s, _ := v.ToString() stringColor.Printf("%q", s) case v.IsBoolean(): b, _ := v.ToBoolean() specialColor.Printf("%t", b) case v.IsNaN(): numberColor.Printf("NaN") case v.IsNumber(): s, _ := v.ToString() numberColor.Printf("%s", s) default: fmt.Printf("<unprintable>") } } func (ctx ppctx) printObject(obj *otto.Object, level int, inArray bool) { switch obj.Class() { case "Array": lv, _ := obj.Get("length") len, _ := lv.ToInteger() if len == 0 { fmt.Printf("[]") return } if level > maxPrettyPrintLevel { fmt.Print("[...]") return } fmt.Print("[") for i := int64(0); i < len; i++ { el, err := obj.Get(strconv.FormatInt(i, 10)) if err == nil { ctx.printValue(el, level+1, true) } if i < len-1 { fmt.Printf(", ") } } fmt.Print("]") case "Object": // Print values from bignumber.js as regular numbers. if ctx.isBigNumber(obj) { numberColor.Print(toString(obj)) return } // Otherwise, print all fields indented, but stop if we're too deep. keys := ctx.fields(obj) if len(keys) == 0 { fmt.Print("{}") return } if level > maxPrettyPrintLevel { fmt.Print("{...}") return } fmt.Println("{") for i, k := range keys { v, _ := obj.Get(k) fmt.Printf("%s%s: ", ctx.indent(level+1), k) ctx.printValue(v, level+1, false) if i < len(keys)-1 { fmt.Printf(",") } fmt.Println() } if inArray { level-- } fmt.Printf("%s}", ctx.indent(level)) case "Function": // Use toString() to display the argument list if possible. if robj, err := obj.Call("toString"); err != nil { functionColor.Print("function()") } else { desc := strings.Trim(strings.Split(robj.String(), "{")[0], " \t\n") desc = strings.Replace(desc, " (", "(", 1) functionColor.Print(desc) } case "RegExp": stringColor.Print(toString(obj)) default: if v, _ := obj.Get("toString"); v.IsFunction() && level <= maxPrettyPrintLevel { s, _ := obj.Call("toString") fmt.Printf("<%s %s>", obj.Class(), s.String()) } else { fmt.Printf("<%s>", obj.Class()) } } } func (ctx ppctx) fields(obj *otto.Object) []string { var ( vals, methods []string seen = make(map[string]bool) ) add := func(k string) { if seen[k] || boringKeys[k] { return } seen[k] = true if v, _ := obj.Get(k); v.IsFunction() { methods = append(methods, k) } else { vals = append(vals, k) } } iterOwnAndConstructorKeys(ctx.vm, obj, add) sort.Strings(vals) sort.Strings(methods) return append(vals, methods...) } func iterOwnAndConstructorKeys(vm *otto.Otto, obj *otto.Object, f func(string)) { seen := make(map[string]bool) iterOwnKeys(vm, obj, func(prop string) { seen[prop] = true f(prop) }) if cp := constructorPrototype(obj); cp != nil { iterOwnKeys(vm, cp, func(prop string) { if !seen[prop] { f(prop) } }) } } func iterOwnKeys(vm *otto.Otto, obj *otto.Object, f func(string)) { Object, _ := vm.Object("Object") rv, _ := Object.Call("getOwnPropertyNames", obj.Value()) gv, _ := rv.Export() switch gv := gv.(type) { case []interface{}: for _, v := range gv { f(v.(string)) } case []string: for _, v := range gv { f(v) } default: panic(fmt.Errorf("Object.getOwnPropertyNames returned unexpected type %T", gv)) } } func (ctx ppctx) isBigNumber(v *otto.Object) bool { // Handle numbers with custom constructor. if v, _ := v.Get("constructor"); v.Object() != nil { if strings.HasPrefix(toString(v.Object()), "function BigNumber") { return true } } // Handle default constructor. BigNumber, _ := ctx.vm.Object("BigNumber.prototype") if BigNumber == nil { return false } bv, _ := BigNumber.Call("isPrototypeOf", v) b, _ := bv.ToBoolean() return b } func toString(obj *otto.Object) string { s, _ := obj.Call("toString") return s.String() } func constructorPrototype(obj *otto.Object) *otto.Object { if v, _ := obj.Get("constructor"); v.Object() != nil { if v, _ = v.Object().Get("prototype"); v.Object() != nil { return v.Object() } } return nil } jsre: hide fields with prefix _ when pretty-printing This makes web3 internals like _requestManager invisible. // Copyright 2015 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. package jsre import ( "fmt" "sort" "strconv" "strings" "github.com/fatih/color" "github.com/robertkrimen/otto" ) const ( maxPrettyPrintLevel = 3 indentString = " " ) var ( functionColor = color.New(color.FgMagenta) specialColor = color.New(color.Bold) numberColor = color.New(color.FgRed) stringColor = color.New(color.FgGreen) ) // these fields are hidden when printing objects. var boringKeys = map[string]bool{ "valueOf": true, "toString": true, "toLocaleString": true, "hasOwnProperty": true, "isPrototypeOf": true, "propertyIsEnumerable": true, "constructor": true, } // prettyPrint writes value to standard output. func prettyPrint(vm *otto.Otto, value otto.Value) { ppctx{vm}.printValue(value, 0, false) } func prettyPrintJS(call otto.FunctionCall) otto.Value { for _, v := range call.ArgumentList { prettyPrint(call.Otto, v) fmt.Println() } return otto.UndefinedValue() } type ppctx struct{ vm *otto.Otto } func (ctx ppctx) indent(level int) string { return strings.Repeat(indentString, level) } func (ctx ppctx) printValue(v otto.Value, level int, inArray bool) { switch { case v.IsObject(): ctx.printObject(v.Object(), level, inArray) case v.IsNull(): specialColor.Print("null") case v.IsUndefined(): specialColor.Print("undefined") case v.IsString(): s, _ := v.ToString() stringColor.Printf("%q", s) case v.IsBoolean(): b, _ := v.ToBoolean() specialColor.Printf("%t", b) case v.IsNaN(): numberColor.Printf("NaN") case v.IsNumber(): s, _ := v.ToString() numberColor.Printf("%s", s) default: fmt.Printf("<unprintable>") } } func (ctx ppctx) printObject(obj *otto.Object, level int, inArray bool) { switch obj.Class() { case "Array": lv, _ := obj.Get("length") len, _ := lv.ToInteger() if len == 0 { fmt.Printf("[]") return } if level > maxPrettyPrintLevel { fmt.Print("[...]") return } fmt.Print("[") for i := int64(0); i < len; i++ { el, err := obj.Get(strconv.FormatInt(i, 10)) if err == nil { ctx.printValue(el, level+1, true) } if i < len-1 { fmt.Printf(", ") } } fmt.Print("]") case "Object": // Print values from bignumber.js as regular numbers. if ctx.isBigNumber(obj) { numberColor.Print(toString(obj)) return } // Otherwise, print all fields indented, but stop if we're too deep. keys := ctx.fields(obj) if len(keys) == 0 { fmt.Print("{}") return } if level > maxPrettyPrintLevel { fmt.Print("{...}") return } fmt.Println("{") for i, k := range keys { v, _ := obj.Get(k) fmt.Printf("%s%s: ", ctx.indent(level+1), k) ctx.printValue(v, level+1, false) if i < len(keys)-1 { fmt.Printf(",") } fmt.Println() } if inArray { level-- } fmt.Printf("%s}", ctx.indent(level)) case "Function": // Use toString() to display the argument list if possible. if robj, err := obj.Call("toString"); err != nil { functionColor.Print("function()") } else { desc := strings.Trim(strings.Split(robj.String(), "{")[0], " \t\n") desc = strings.Replace(desc, " (", "(", 1) functionColor.Print(desc) } case "RegExp": stringColor.Print(toString(obj)) default: if v, _ := obj.Get("toString"); v.IsFunction() && level <= maxPrettyPrintLevel { s, _ := obj.Call("toString") fmt.Printf("<%s %s>", obj.Class(), s.String()) } else { fmt.Printf("<%s>", obj.Class()) } } } func (ctx ppctx) fields(obj *otto.Object) []string { var ( vals, methods []string seen = make(map[string]bool) ) add := func(k string) { if seen[k] || boringKeys[k] || strings.HasPrefix(k, "_") { return } seen[k] = true if v, _ := obj.Get(k); v.IsFunction() { methods = append(methods, k) } else { vals = append(vals, k) } } iterOwnAndConstructorKeys(ctx.vm, obj, add) sort.Strings(vals) sort.Strings(methods) return append(vals, methods...) } func iterOwnAndConstructorKeys(vm *otto.Otto, obj *otto.Object, f func(string)) { seen := make(map[string]bool) iterOwnKeys(vm, obj, func(prop string) { seen[prop] = true f(prop) }) if cp := constructorPrototype(obj); cp != nil { iterOwnKeys(vm, cp, func(prop string) { if !seen[prop] { f(prop) } }) } } func iterOwnKeys(vm *otto.Otto, obj *otto.Object, f func(string)) { Object, _ := vm.Object("Object") rv, _ := Object.Call("getOwnPropertyNames", obj.Value()) gv, _ := rv.Export() switch gv := gv.(type) { case []interface{}: for _, v := range gv { f(v.(string)) } case []string: for _, v := range gv { f(v) } default: panic(fmt.Errorf("Object.getOwnPropertyNames returned unexpected type %T", gv)) } } func (ctx ppctx) isBigNumber(v *otto.Object) bool { // Handle numbers with custom constructor. if v, _ := v.Get("constructor"); v.Object() != nil { if strings.HasPrefix(toString(v.Object()), "function BigNumber") { return true } } // Handle default constructor. BigNumber, _ := ctx.vm.Object("BigNumber.prototype") if BigNumber == nil { return false } bv, _ := BigNumber.Call("isPrototypeOf", v) b, _ := bv.ToBoolean() return b } func toString(obj *otto.Object) string { s, _ := obj.Call("toString") return s.String() } func constructorPrototype(obj *otto.Object) *otto.Object { if v, _ := obj.Get("constructor"); v.Object() != nil { if v, _ = v.Object().Get("prototype"); v.Object() != nil { return v.Object() } } return nil }
package main import ( "fmt" "os" "testing" ) var exitCode int func Test_main(t *testing.T) { fmt.Println("aqui test_____main") go main() exitCode = <-exitCh } func TestMain(m *testing.M) { fmt.Println("maiiiiiii") m.Run() // can exit because cover profile is already written os.Exit(exitCode) } modified: main_test.go package main import ( "os" "testing" ) var exitCode int func Test_main(t *testing.T) { go main() exitCode = <-exitCh } func TestMain(m *testing.M) { m.Run() // can exit because cover profile is already written os.Exit(exitCode) }
package main import ( "encoding/json" "flag" "fmt" "log" "os" "strings" "time" "h12.me/kafka/broker" ) const ( clientID = "h12.me/kafka/kafpro" ) type Config struct { Broker string Meta MetaConfig Coord CoordConfig Offset OffsetConfig Commit CommitConfig Time TimeConfig Consume ConsumeConfig } type CoordConfig struct { GroupName string } type OffsetConfig struct { GroupName string Topic string Partition int } type TimeConfig struct { Topic string Partition int Time string } type ConsumeConfig struct { Topic string Partition int Offset int } type MetaConfig struct { Topics []string } type CommitConfig struct { GroupName string Topic string Partition int Offset int Retention int // millisecond } func main() { var cfg Config flag.StringVar(&cfg.Broker, "broker", "", "broker address") // get subcommand if len(os.Args) < 2 { usage() os.Exit(1) } subCmd := os.Args[1] os.Args = append(os.Args[0:1], os.Args[2:]...) switch subCmd { case "meta": var topicsArg string flag.StringVar(&topicsArg, "topics", "", "topic names seperated by comma") flag.Parse() br := broker.New(broker.DefaultConfig().WithAddr(cfg.Broker)) cfg.Meta.Topics = strings.Split(topicsArg, ",") if err := meta(br, &cfg.Meta); err != nil { log.Fatal(err) } case "coord": flag.StringVar(&cfg.Coord.GroupName, "group", "", "group name") flag.Parse() br := broker.New(broker.DefaultConfig().WithAddr(cfg.Broker)) if err := coord(br, &cfg.Coord); err != nil { log.Fatal(err) } case "offset": flag.StringVar(&cfg.Offset.GroupName, "group", "", "group name") flag.StringVar(&cfg.Offset.Topic, "topic", "", "topic name") flag.IntVar(&cfg.Offset.Partition, "partition", 0, "partition") flag.Parse() br := broker.New(broker.DefaultConfig().WithAddr(cfg.Broker)) if err := offset(br, &cfg.Offset); err != nil { log.Fatal(err) } case "commit": flag.StringVar(&cfg.Commit.GroupName, "group", "", "group name") flag.StringVar(&cfg.Commit.Topic, "topic", "", "topic name") flag.IntVar(&cfg.Commit.Partition, "partition", 0, "partition") flag.IntVar(&cfg.Commit.Offset, "offset", 0, "offset") flag.IntVar(&cfg.Commit.Retention, "retention", 0, "retention") flag.Parse() br := broker.New(broker.DefaultConfig().WithAddr(cfg.Broker)) if err := commit(br, &cfg.Commit); err != nil { log.Fatal(err) } case "time": flag.StringVar(&cfg.Time.Topic, "topic", "", "topic name") flag.IntVar(&cfg.Time.Partition, "partition", 0, "partition") flag.StringVar(&cfg.Time.Time, "time", "", "time") flag.Parse() br := broker.New(broker.DefaultConfig().WithAddr(cfg.Broker)) if err := timeOffset(br, &cfg.Time); err != nil { log.Fatal(err) } case "consume": flag.StringVar(&cfg.Consume.Topic, "topic", "", "topic name") flag.IntVar(&cfg.Consume.Partition, "partition", 0, "partition") flag.IntVar(&cfg.Consume.Offset, "offset", 0, "offset") flag.Parse() br := broker.New(broker.DefaultConfig().WithAddr(cfg.Broker)) if err := consume(br, &cfg.Consume); err != nil { log.Fatal(err) } default: log.Fatalf("invalid subcommand %s", subCmd) } } func usage() { fmt.Println(` kafpro is a command line tool for querying Kafka wire API Usage: kafpro command [arguments] The commands are: meta TopicMetadataRequest consume FetchRequest time OffsetRequest offset OffsetFetchRequestV1 commit OffsetCommitRequestV1 coord GroupCoordinatorRequest `) flag.PrintDefaults() } func meta(br *broker.B, cfg *MetaConfig) error { resp, err := br.TopicMetadata(cfg.Topics...) if err != nil { return err } fmt.Println(toJSON(resp)) return nil } func coord(br *broker.B, coord *CoordConfig) error { reqMsg := broker.GroupCoordinatorRequest(coord.GroupName) req := &broker.Request{ ClientID: clientID, RequestMessage: &reqMsg, } resp := &broker.GroupCoordinatorResponse{} if err := br.Do(req, resp); err != nil { return err } fmt.Println(toJSON(&resp)) if resp.ErrorCode.HasError() { return resp.ErrorCode } return nil } func offset(br *broker.B, cfg *OffsetConfig) error { req := &broker.Request{ ClientID: clientID, RequestMessage: &broker.OffsetFetchRequestV1{ ConsumerGroup: cfg.GroupName, PartitionInTopics: []broker.PartitionInTopic{ { TopicName: cfg.Topic, Partitions: []int32{int32(cfg.Partition)}, }, }, }, } resp := broker.OffsetFetchResponse{} if err := br.Do(req, &resp); err != nil { return err } fmt.Println(toJSON(&resp)) for i := range resp { t := &resp[i] if t.TopicName == cfg.Topic { for j := range resp[i].OffsetMetadataInPartitions { p := &t.OffsetMetadataInPartitions[j] if p.Partition == int32(cfg.Partition) { if p.ErrorCode.HasError() { return p.ErrorCode } } } } } return nil } func commit(br *broker.B, cfg *CommitConfig) error { req := &broker.Request{ ClientID: clientID, RequestMessage: &broker.OffsetCommitRequestV1{ ConsumerGroupID: cfg.GroupName, OffsetCommitInTopicV1s: []broker.OffsetCommitInTopicV1{ { TopicName: cfg.Topic, OffsetCommitInPartitionV1s: []broker.OffsetCommitInPartitionV1{ { Partition: int32(cfg.Partition), Offset: int64(cfg.Offset), // TimeStamp in milliseconds TimeStamp: time.Now().Add(time.Duration(cfg.Retention)*time.Millisecond).Unix() * 1000, }, }, }, }, }, } resp := broker.OffsetCommitResponse{} if err := br.Do(req, &resp); err != nil { return err } for i := range resp { t := &resp[i] if t.TopicName == cfg.Topic { for j := range t.ErrorInPartitions { p := &t.ErrorInPartitions[j] if int(p.Partition) == cfg.Partition { if p.ErrorCode.HasError() { return p.ErrorCode } } } } } return nil } func timeOffset(br *broker.B, cfg *TimeConfig) error { var t time.Time switch cfg.Time { case "latest": t = broker.Latest case "earliest": t = broker.Earliest default: var err error t, err = time.Parse("2006-01-02T15:04:05", cfg.Time) if err != nil { return err } } resp, err := br.OffsetByTime(cfg.Topic, int32(cfg.Partition), t) if err != nil { return err } fmt.Println(toJSON(&resp)) return nil } func consume(br *broker.B, cfg *ConsumeConfig) error { req := &broker.Request{ ClientID: clientID, RequestMessage: &broker.FetchRequest{ ReplicaID: -1, MaxWaitTime: int32(time.Second / time.Millisecond), MinBytes: int32(1024), FetchOffsetInTopics: []broker.FetchOffsetInTopic{ { TopicName: cfg.Topic, FetchOffsetInPartitions: []broker.FetchOffsetInPartition{ { Partition: int32(cfg.Partition), FetchOffset: int64(cfg.Offset), MaxBytes: int32(1000), }, }, }, }, }, } resp := broker.FetchResponse{} if err := br.Do(req, &resp); err != nil { return err } fmt.Println(toJSON(resp)) return nil } func toJSON(v interface{}) string { buf, _ := json.MarshalIndent(v, "", "\t") return string(buf) } use Flatten for consume package main import ( "encoding/json" "flag" "fmt" "log" "os" "strings" "time" "h12.me/kafka/broker" ) const ( clientID = "h12.me/kafka/kafpro" ) type Config struct { Broker string Meta MetaConfig Coord CoordConfig Offset OffsetConfig Commit CommitConfig Time TimeConfig Consume ConsumeConfig } type CoordConfig struct { GroupName string } type OffsetConfig struct { GroupName string Topic string Partition int } type TimeConfig struct { Topic string Partition int Time string } type ConsumeConfig struct { Topic string Partition int Offset int } type MetaConfig struct { Topics []string } type CommitConfig struct { GroupName string Topic string Partition int Offset int Retention int // millisecond } func main() { var cfg Config flag.StringVar(&cfg.Broker, "broker", "", "broker address") // get subcommand if len(os.Args) < 2 { usage() os.Exit(1) } subCmd := os.Args[1] os.Args = append(os.Args[0:1], os.Args[2:]...) switch subCmd { case "meta": var topicsArg string flag.StringVar(&topicsArg, "topics", "", "topic names seperated by comma") flag.Parse() br := broker.New(broker.DefaultConfig().WithAddr(cfg.Broker)) cfg.Meta.Topics = strings.Split(topicsArg, ",") if err := meta(br, &cfg.Meta); err != nil { log.Fatal(err) } case "coord": flag.StringVar(&cfg.Coord.GroupName, "group", "", "group name") flag.Parse() br := broker.New(broker.DefaultConfig().WithAddr(cfg.Broker)) if err := coord(br, &cfg.Coord); err != nil { log.Fatal(err) } case "offset": flag.StringVar(&cfg.Offset.GroupName, "group", "", "group name") flag.StringVar(&cfg.Offset.Topic, "topic", "", "topic name") flag.IntVar(&cfg.Offset.Partition, "partition", 0, "partition") flag.Parse() br := broker.New(broker.DefaultConfig().WithAddr(cfg.Broker)) if err := offset(br, &cfg.Offset); err != nil { log.Fatal(err) } case "commit": flag.StringVar(&cfg.Commit.GroupName, "group", "", "group name") flag.StringVar(&cfg.Commit.Topic, "topic", "", "topic name") flag.IntVar(&cfg.Commit.Partition, "partition", 0, "partition") flag.IntVar(&cfg.Commit.Offset, "offset", 0, "offset") flag.IntVar(&cfg.Commit.Retention, "retention", 0, "retention") flag.Parse() br := broker.New(broker.DefaultConfig().WithAddr(cfg.Broker)) if err := commit(br, &cfg.Commit); err != nil { log.Fatal(err) } case "time": flag.StringVar(&cfg.Time.Topic, "topic", "", "topic name") flag.IntVar(&cfg.Time.Partition, "partition", 0, "partition") flag.StringVar(&cfg.Time.Time, "time", "", "time") flag.Parse() br := broker.New(broker.DefaultConfig().WithAddr(cfg.Broker)) if err := timeOffset(br, &cfg.Time); err != nil { log.Fatal(err) } case "consume": flag.StringVar(&cfg.Consume.Topic, "topic", "", "topic name") flag.IntVar(&cfg.Consume.Partition, "partition", 0, "partition") flag.IntVar(&cfg.Consume.Offset, "offset", 0, "offset") flag.Parse() br := broker.New(broker.DefaultConfig().WithAddr(cfg.Broker)) if err := consume(br, &cfg.Consume); err != nil { log.Fatal(err) } default: log.Fatalf("invalid subcommand %s", subCmd) } } func usage() { fmt.Println(` kafpro is a command line tool for querying Kafka wire API Usage: kafpro command [arguments] The commands are: meta TopicMetadataRequest consume FetchRequest time OffsetRequest offset OffsetFetchRequestV1 commit OffsetCommitRequestV1 coord GroupCoordinatorRequest `) flag.PrintDefaults() } func meta(br *broker.B, cfg *MetaConfig) error { resp, err := br.TopicMetadata(cfg.Topics...) if err != nil { return err } fmt.Println(toJSON(resp)) return nil } func coord(br *broker.B, coord *CoordConfig) error { reqMsg := broker.GroupCoordinatorRequest(coord.GroupName) req := &broker.Request{ ClientID: clientID, RequestMessage: &reqMsg, } resp := &broker.GroupCoordinatorResponse{} if err := br.Do(req, resp); err != nil { return err } fmt.Println(toJSON(&resp)) if resp.ErrorCode.HasError() { return resp.ErrorCode } return nil } func offset(br *broker.B, cfg *OffsetConfig) error { req := &broker.Request{ ClientID: clientID, RequestMessage: &broker.OffsetFetchRequestV1{ ConsumerGroup: cfg.GroupName, PartitionInTopics: []broker.PartitionInTopic{ { TopicName: cfg.Topic, Partitions: []int32{int32(cfg.Partition)}, }, }, }, } resp := broker.OffsetFetchResponse{} if err := br.Do(req, &resp); err != nil { return err } fmt.Println(toJSON(&resp)) for i := range resp { t := &resp[i] if t.TopicName == cfg.Topic { for j := range resp[i].OffsetMetadataInPartitions { p := &t.OffsetMetadataInPartitions[j] if p.Partition == int32(cfg.Partition) { if p.ErrorCode.HasError() { return p.ErrorCode } } } } } return nil } func commit(br *broker.B, cfg *CommitConfig) error { req := &broker.Request{ ClientID: clientID, RequestMessage: &broker.OffsetCommitRequestV1{ ConsumerGroupID: cfg.GroupName, OffsetCommitInTopicV1s: []broker.OffsetCommitInTopicV1{ { TopicName: cfg.Topic, OffsetCommitInPartitionV1s: []broker.OffsetCommitInPartitionV1{ { Partition: int32(cfg.Partition), Offset: int64(cfg.Offset), // TimeStamp in milliseconds TimeStamp: time.Now().Add(time.Duration(cfg.Retention)*time.Millisecond).Unix() * 1000, }, }, }, }, }, } resp := broker.OffsetCommitResponse{} if err := br.Do(req, &resp); err != nil { return err } for i := range resp { t := &resp[i] if t.TopicName == cfg.Topic { for j := range t.ErrorInPartitions { p := &t.ErrorInPartitions[j] if int(p.Partition) == cfg.Partition { if p.ErrorCode.HasError() { return p.ErrorCode } } } } } return nil } func timeOffset(br *broker.B, cfg *TimeConfig) error { var t time.Time switch cfg.Time { case "latest": t = broker.Latest case "earliest": t = broker.Earliest default: var err error t, err = time.Parse("2006-01-02T15:04:05", cfg.Time) if err != nil { return err } } resp, err := br.OffsetByTime(cfg.Topic, int32(cfg.Partition), t) if err != nil { return err } fmt.Println(toJSON(&resp)) return nil } func consume(br *broker.B, cfg *ConsumeConfig) error { req := &broker.Request{ ClientID: clientID, RequestMessage: &broker.FetchRequest{ ReplicaID: -1, MaxWaitTime: int32(time.Second / time.Millisecond), MinBytes: int32(1024), FetchOffsetInTopics: []broker.FetchOffsetInTopic{ { TopicName: cfg.Topic, FetchOffsetInPartitions: []broker.FetchOffsetInPartition{ { Partition: int32(cfg.Partition), FetchOffset: int64(cfg.Offset), MaxBytes: int32(1000), }, }, }, }, }, } resp := broker.FetchResponse{} if err := br.Do(req, &resp); err != nil { return err } fmt.Println(toJSON(resp)) for _, t := range resp { for _, p := range t.FetchMessageSetInPartitions { ms, err := p.MessageSet.Flatten() if err != nil { return err } fmt.Println(toJSON(ms)) } } return nil } func toJSON(v interface{}) string { buf, _ := json.MarshalIndent(v, "", "\t") return string(buf) }
Introduced constants for the item types
refactored stmts into two groups and completed for loop parsing
cleanup people/cypher.go
// Copyright 2018 by caixw, All rights reserved. // Use of this source code is governed by a MIT // license that can be found in the LICENSE file. package sqlbuilder import ( "context" "database/sql" ) var ( _ execPreparer = &DeleteStmt{} _ execPreparer = &UpdateStmt{} _ execPreparer = &InsertStmt{} _ execPreparer = &UpdateStmt{} _ queryer = &SelectStmt{} ) type execer interface { Exec() (sql.Result, error) ExecContext(ctx context.Context) (sql.Result, error) } type queryer interface { Query() (*sql.Rows, error) QueryContext(ctx context.Context) (*sql.Rows, error) } type preparer interface { Prepare() (*sql.Stmt, error) PrepareContext(ctx context.Context) (*sql.Stmt, error) } type execPreparer interface { execer preparer } type queryPreparer interface { queryer preparer } [sqlbuilder] 删除无用的代码
/* Taken from the Go Tour (Exercise: Web Crawler) * * In this exercise you'll use Go's concurrency features to parallelize a web crawler. * * Modify the Crawl function to fetch URLs in parallel without fetching the same URL twice. * */ package main import ( "fmt" ) type Fetcher interface { Fetch(url string) (body string, urls []string, err error) } func Crawl(url string, depth int, fetcher Fetcher, ch chan string, visited_ch chan map[string]bool) { defer close(ch) if depth <= 0 { return } /* This implements an atomic "test and set" for the visited map. It atomically fetches the * visited status for the URL and sets it. * * This is cleverly achieved by using a buffered channel with unitary capacity where worker * threads consume the map when they want to read and mutate it, and write it back to the * channel once they're done. * * Note that the channel must be buffered with a capacity of 1, otherwise we would deadlock * because unbuffered channels block readers and writers until the other end is ready. * * This is Go's philosophy of concurrency: * Don't communicate by sharing memory, share memory by communicating * * How brilliant is that? */ visited := <- visited_ch _, found := visited[url] visited[url] = true visited_ch <- visited if found { return } body, urls, err := fetcher.Fetch(url) if err != nil { ch <- fmt.Sprintln(err) return } ch <- fmt.Sprintf("found: %s %q\n", url, body) chans := make([]chan string, len(urls)) for i, u := range urls { chans[i] = make(chan string, 128) go Crawl(u, depth-1, fetcher, chans[i], visited_ch) } /* This is how we implement synchronization and wait for other threads to finish. * * Each Crawl() thread is assigned its own channel to write results to. Each thread closes * its channel once it's done, that is, after writing its own results into the channel and * the results of the goroutines it spawned. Thus, results flow from a set of channels down * the "channel tree" until they reach the main, primary channel. * * This clever mechanism allows goroutines to wait for other spawned routines to terminate * before returning and closing their own channel. * * Synchronization is implicitly achieved with the channels, because each thread defers * closing the channel, which is wonderful. */ for i := range chans { for s := range chans[i] { ch <- s } } return } func main() { ch := make(chan string, 128) visited_ch := make(chan map[string]bool, 1) visited_ch <- make(map[string]bool) go Crawl("http://golang.org/", 4, fetcher, ch, visited_ch) for s := range ch { fmt.Print(s) } } // fakeFetcher is a Fetcher that returns canned results. type fakeFetcher map[string]*fakeResult type fakeResult struct { body string urls []string } func (f fakeFetcher) Fetch(url string) (string, []string, error) { if res, ok := f[url]; ok { return res.body, res.urls, nil } return "", nil, fmt.Errorf("not found: %s", url) } // fetcher is a populated fakeFetcher. var fetcher = fakeFetcher{ "http://golang.org/": &fakeResult{ "The Go Programming Language", []string{ "http://golang.org/pkg/", "http://golang.org/cmd/", }, }, "http://golang.org/pkg/": &fakeResult{ "Packages", []string{ "http://golang.org/", "http://golang.org/cmd/", "http://golang.org/pkg/fmt/", "http://golang.org/pkg/os/", }, }, "http://golang.org/pkg/fmt/": &fakeResult{ "Package fmt", []string{ "http://golang.org/", "http://golang.org/pkg/", }, }, "http://golang.org/pkg/os/": &fakeResult{ "Package os", []string{ "http://golang.org/", "http://golang.org/pkg/", }, }, } Added explanation about buffered channels in the web crawler exercise /* Taken from the Go Tour (Exercise: Web Crawler) * * In this exercise you'll use Go's concurrency features to parallelize a web crawler. * * Modify the Crawl function to fetch URLs in parallel without fetching the same URL twice. * */ package main import ( "fmt" ) type Fetcher interface { Fetch(url string) (body string, urls []string, err error) } func Crawl(url string, depth int, fetcher Fetcher, ch chan string, visited_ch chan map[string]bool) { defer close(ch) if depth <= 0 { return } /* This implements an atomic "test and set" for the visited map. It atomically fetches the * visited status for the URL and sets it. * * This is cleverly achieved by using a buffered channel with unitary capacity where worker * threads consume the map when they want to read and mutate it, and write it back to the * channel once they're done. * * Note that the channel must be buffered with a capacity of 1, otherwise we would deadlock * because unbuffered channels block readers and writers until the other end is ready. * * This is Go's philosophy of concurrency: * Don't communicate by sharing memory, share memory by communicating * * How brilliant is that? */ visited := <- visited_ch _, found := visited[url] visited[url] = true visited_ch <- visited if found { return } body, urls, err := fetcher.Fetch(url) if err != nil { ch <- fmt.Sprintln(err) return } ch <- fmt.Sprintf("found: %s %q\n", url, body) chans := make([]chan string, len(urls)) for i, u := range urls { chans[i] = make(chan string, 128) go Crawl(u, depth-1, fetcher, chans[i], visited_ch) } /* This is how we implement synchronization and wait for other threads to finish. * * Each Crawl() thread is assigned its own channel to write results to. Each thread closes * its channel once it's done, that is, after writing its own results into the channel and * the results of the goroutines it spawned. Thus, results flow from a set of channels up * the "channel tree" until they reach the main, primary channel. * * This clever mechanism allows goroutines to wait for other spawned routines to terminate * before returning and closing their own channel. * * The channels are buffered (with a capacity of 128) because otherwise there is not much * parallelism, since each thread could only make progress after the parent thread fetched * the last result written. * * Synchronization is implicitly achieved with the channels, because each thread defers * closing the channel, which is wonderful. */ for i := range chans { for s := range chans[i] { ch <- s } } return } func main() { ch := make(chan string, 128) visited_ch := make(chan map[string]bool, 1) visited_ch <- make(map[string]bool) go Crawl("http://golang.org/", 4, fetcher, ch, visited_ch) for s := range ch { fmt.Print(s) } } // fakeFetcher is a Fetcher that returns canned results. type fakeFetcher map[string]*fakeResult type fakeResult struct { body string urls []string } func (f fakeFetcher) Fetch(url string) (string, []string, error) { if res, ok := f[url]; ok { return res.body, res.urls, nil } return "", nil, fmt.Errorf("not found: %s", url) } // fetcher is a populated fakeFetcher. var fetcher = fakeFetcher{ "http://golang.org/": &fakeResult{ "The Go Programming Language", []string{ "http://golang.org/pkg/", "http://golang.org/cmd/", }, }, "http://golang.org/pkg/": &fakeResult{ "Packages", []string{ "http://golang.org/", "http://golang.org/cmd/", "http://golang.org/pkg/fmt/", "http://golang.org/pkg/os/", }, }, "http://golang.org/pkg/fmt/": &fakeResult{ "Package fmt", []string{ "http://golang.org/", "http://golang.org/pkg/", }, }, "http://golang.org/pkg/os/": &fakeResult{ "Package os", []string{ "http://golang.org/", "http://golang.org/pkg/", }, }, }
package game import ( "image/color" "golang.org/x/image/font" "github.com/Bredgren/game1/game/camera" "github.com/Bredgren/game1/game/keymap" "github.com/Bredgren/game1/game/keymap/button" "github.com/Bredgren/geo" "github.com/hajimehoshi/ebiten" "github.com/hajimehoshi/ebiten/text" ) type keyLabel struct { action keymap.Action bounds geo.Rect face font.Face img map[bool]*ebiten.Image btnDown bool } func newKeyLabel(action keymap.Action, bounds geo.Rect, face font.Face) *keyLabel { // bounds, _ := font.BoundString(face, name) // width := (bounds.Max.X - bounds.Min.X).Ceil() + 4 // height := (bounds.Max.Y - bounds.Min.Y).Ceil() // offset := (face.Metrics().Height - face.Metrics().Descent).Floor() - 1 width := int(bounds.W) height := int(bounds.H) img1, _ := ebiten.NewImage(width, height, ebiten.FilterNearest) img1.Fill(color.RGBA{0, 0, 0, 50}) // text.Draw(img1, name, face, 2, offset, color.Black) img2, _ := ebiten.NewImage(width, height, ebiten.FilterNearest) img2.Fill(color.RGBA{0, 0, 0, 100}) // text.Draw(img2, name, face, 2, offset, color.White) k := &keyLabel{ action: action, // bounds: geo.RectWH(geo.I2F2(width, height)), bounds: bounds, face: face, img: map[bool]*ebiten.Image{ false: img1, true: img2, }, } return k } func (k *keyLabel) draw(dst *ebiten.Image, cam *camera.Camera) { mouseOver := k.bounds.CollidePoint(geo.I2F2(ebiten.CursorPosition())) opts := ebiten.DrawImageOptions{} opts.GeoM.Translate(k.bounds.TopLeft()) dst.DrawImage(k.img[mouseOver], &opts) c := color.Black if k.btnDown { c = color.White } x, y := k.bounds.TopLeft() x += 4 y += 14 text.Draw(dst, string(k.action), k.face, int(x), int(y), c) } func (k *keyLabel) handleBtn(down bool) bool { k.btnDown = down return false } // func (k *keyLabel) handleAxis(val float64) bool { // k.axisMove = val != 0 // return false // } type keyButton struct { btn button.KeyMouse onClick func() } func (k *keyButton) draw(dst *ebiten.Image, cam *camera.Camera) { // opts := ebiten.DrawImageOptions{} // opts.GeoM.Translate(k.bounds.TopLeft()) // dst.DrawImage(k.img[k.btnDown || k.axisMove], &opts) } type axisButton struct { axis int } type axisState struct { } Remove old keylabel
package game import ( "fmt" "image/color" "log" "time" "golang.org/x/image/font/basicfont" "github.com/Bredgren/game1/game/camera" "github.com/Bredgren/game1/game/keymap" "github.com/Bredgren/game1/game/keymap/button" "github.com/Bredgren/game1/game/ui" "github.com/Bredgren/geo" "github.com/hajimehoshi/ebiten" "github.com/hajimehoshi/ebiten/ebitenutil" ) const ( buttonWidth = 350 buttonHeight = 20 ) type mainMenuState struct { p *player screenHeight int cam *camera.Camera bg *background keymap keymap.Layers remapAction keymap.Action remap bool remapText *ui.Text menu ui.Drawer btns map[keymap.Action]*ui.Button actionText map[keymap.Action]*ui.Text keyText map[keymap.Action]*ui.Text gamepadText map[keymap.Action]*ui.Text canClickButton bool } func newMainMenu(p *player, screenHeight int, cam *camera.Camera, bg *background, km keymap.Layers) *mainMenuState { m := &mainMenuState{ p: p, screenHeight: screenHeight, cam: cam, bg: bg, keymap: km, btns: map[keymap.Action]*ui.Button{}, actionText: map[keymap.Action]*ui.Text{}, keyText: map[keymap.Action]*ui.Text{}, gamepadText: map[keymap.Action]*ui.Text{}, canClickButton: true, } m.setupMenu() m.setupKeymap() return m } func (m *mainMenuState) setupMenu() { idleImg, _ := ebiten.NewImage(buttonWidth, buttonHeight, ebiten.FilterNearest) idleImg.Fill(color.NRGBA{200, 200, 200, 50}) hoverImg, _ := ebiten.NewImage(buttonWidth, buttonHeight, ebiten.FilterNearest) hoverImg.Fill(color.NRGBA{100, 100, 100, 50}) var elements []ui.WeightedDrawer m.remapText = &ui.Text{ Anchor: ui.AnchorCenter, Color: color.Black, Face: basicfont.Face7x13, Wt: 0.5, } elements = append(elements, m.remapText) actions := []keymap.Action{ left, right, move, jump, uppercut, slam, punch, launch, punchH, punchV, } for _, action := range actions { action := action m.keyText[action] = &ui.Text{ Anchor: ui.AnchorLeft, Color: color.Black, Face: basicfont.Face7x13, Wt: 1, } m.gamepadText[action] = &ui.Text{ Anchor: ui.AnchorLeft, Color: color.Black, Face: basicfont.Face7x13, Wt: 1, } m.actionText[action] = &ui.Text{ Text: string(action), Anchor: ui.Anchor{ Src: geo.VecXY(0, 0.5), Dst: geo.VecXY(0, 0.5), Offset: geo.VecXY(5, 0), }, Color: color.Black, Face: basicfont.Face7x13, Wt: 1.8, } m.btns[action] = &ui.Button{ IdleImg: idleImg, HoverImg: hoverImg, IdleAnchor: ui.AnchorCenter, HoverAnchor: ui.AnchorCenter, Element: &ui.HorizontalContainer{ Wt: 1, Elements: []ui.WeightedDrawer{ m.actionText[action], m.keyText[action], m.gamepadText[action], }, }, Wt: 1, OnClick: func() { m.remap = true m.remapAction = action m.remapText.Text = fmt.Sprintf("Remap action '%s'", action) }, } elements = append(elements, m.btns[action]) } m.menu = &ui.VerticalContainer{ Wt: 1, Elements: elements, } m.updateText() } func (m *mainMenuState) updateText() { actions := []keymap.Action{ left, right, move, jump, uppercut, slam, punch, launch, punchH, punchV, } for _, action := range actions { if btn, ok := m.keymap[playerLayer].KeyMouse.GetButton(action); ok { m.keyText[action].Text = btn.String() m.keyText[action].Color = color.Black } else { m.keyText[action].Text = "N/A" m.keyText[action].Color = color.NRGBA{0, 0, 0, 100} } if btn, ok := m.keymap[playerLayer].GamepadBtn.GetButton(action); ok { m.gamepadText[action].Text = fmt.Sprintf("Gampad %d", btn) m.gamepadText[action].Color = color.Black } else { m.gamepadText[action].Text = "N/A" m.gamepadText[action].Color = color.NRGBA{0, 0, 0, 100} } } } func (m *mainMenuState) setupKeymap() { //// Setup remap layer // Button handlers remapHandlers := keymap.ButtonHandlerMap{} for key := ebiten.Key0; key <= ebiten.KeyMax; key++ { action := keymap.Action(fmt.Sprintf("key%d", key)) remapHandlers[action] = m.keyRemapHandler(button.FromKey(key)) } remapHandlers[keymap.Action("mouse0")] = m.keyRemapHandler(button.FromMouse(ebiten.MouseButtonLeft)) remapHandlers[keymap.Action("mouse1")] = m.keyRemapHandler(button.FromMouse(ebiten.MouseButtonMiddle)) remapHandlers[keymap.Action("mouse2")] = m.keyRemapHandler(button.FromMouse(ebiten.MouseButtonRight)) // Gamepad handlers for btn := ebiten.GamepadButton0; btn < ebiten.GamepadButtonMax; btn++ { action := keymap.Action(fmt.Sprintf("btn%d", btn)) remapHandlers[action] = m.btnRemapHandler(btn) } // Axis handlers axisHandlers := keymap.AxisHandlerMap{} // // We don't know how many axes there will be at this point so just do alot :P // for axis := 0; axis < 100; axis++ { // action := keymap.Action(fmt.Sprintf("axis%d", axis)) // axisHandlers[action] = m.axisRemapHandler(axis) // } m.keymap[remapLayer] = keymap.New(remapHandlers, axisHandlers) // Button actions for key := ebiten.Key0; key <= ebiten.KeyMax; key++ { action := keymap.Action(fmt.Sprintf("key%d", key)) m.keymap[remapLayer].KeyMouse.Set(button.FromKey(key), action) } m.keymap[remapLayer].KeyMouse.Set(button.FromMouse(ebiten.MouseButtonLeft), "mouse0") m.keymap[remapLayer].KeyMouse.Set(button.FromMouse(ebiten.MouseButtonMiddle), "mouse1") m.keymap[remapLayer].KeyMouse.Set(button.FromMouse(ebiten.MouseButtonRight), "mouse2") // Gamepad actions for btn := ebiten.GamepadButton0; btn < ebiten.GamepadButtonMax; btn++ { action := keymap.Action(fmt.Sprintf("btn%d", btn)) m.keymap[remapLayer].GamepadBtn.Set(btn, action) } // Axis actions // for axis := 0; axis < 100; axis++ { // action := keymap.Action(fmt.Sprintf("axis%d", axis)) // m.keymap[remapLayer].GamepadAxis.Set(axis, action) // } //// Setup UI handlers leftClickHandlers := keymap.ButtonHandlerMap{ leftClick: m.leftMouseHandler, } m.keymap[leftClickLayer] = keymap.New(leftClickHandlers, nil) m.keymap[leftClickLayer].KeyMouse.Set(button.FromMouse(ebiten.MouseButtonLeft), leftClick) colorFn := func(action keymap.Action) keymap.ButtonHandler { return func(down bool) bool { if down { m.actionText[action].Color = color.White } else { m.actionText[action].Color = color.Black } return false } } // UI handlers uiHandlers := keymap.ButtonHandlerMap{ left: colorFn(left), right: colorFn(right), jump: colorFn(jump), uppercut: colorFn(uppercut), slam: colorFn(slam), punch: colorFn(punch), launch: colorFn(launch), } uiAxisHandlers := keymap.AxisHandlerMap{ // move: m.keyLabels[move].handleAxis, // punchH // punchV } m.keymap[uiLayer] = keymap.New(uiHandlers, uiAxisHandlers) setDefaultKeyMap(m.keymap[uiLayer]) } func (m *mainMenuState) keyRemapHandler(btn button.KeyMouse) keymap.ButtonHandler { return func(down bool) bool { if !m.canClickButton && btn.IsMouse() { // This prevents us from always immediately remapping to left mouse return false } if down && m.remap { log.Println("remap key to", btn) m.keymap[playerLayer].KeyMouse.Set(btn, m.remapAction) m.keymap[uiLayer].KeyMouse.Set(btn, m.remapAction) m.remap = false m.remapText.Text = "" m.updateText() if btn.IsMouse() { // This prevents us from clicking a button if remapping to left mouse while hover // over a button m.canClickButton = false } return true } // No reason to stop propagation here because either the button is up or is not // remappable return false } } func (m *mainMenuState) btnRemapHandler(btn ebiten.GamepadButton) keymap.ButtonHandler { return func(down bool) bool { if down && m.remap { log.Println("remap gamepad btn to", btn) m.keymap[playerLayer].GamepadBtn.Set(btn, m.remapAction) m.keymap[uiLayer].GamepadBtn.Set(btn, m.remapAction) m.remap = false m.updateText() } // No reason to stop propagation here because either the button is up or is not // remappable return false } } // func (m *mainMenuState) axisRemapHandler(axis int) keymap.AxisHandler { // return func(val float64) bool { // remap := m.remap // if val != 0 && remap { // log.Println("remap axis to", axis) // m.keymap[playerLayer].GamepadAxis.Set(axis, m.remapAction) // m.remap = false // } // return remap // } // } func (m *mainMenuState) begin(previousState gameStateName) { m.cam.Target = fixedCameraTarget{geo.VecXY(m.p.pos.X, -float64(m.screenHeight)*0.4)} } func (m *mainMenuState) end() { } func (m *mainMenuState) nextState() gameStateName { return mainMenu } func (m *mainMenuState) update(dt time.Duration) { m.p.update(dt) for _, b := range m.btns { b.Update() } } func (m *mainMenuState) draw(dst *ebiten.Image, cam *camera.Camera) { m.bg.Draw(dst, cam) m.p.draw(dst, cam) x, y := 120.0, 20.0 height := 220.0 ebitenutil.DrawRect(dst, x, y, buttonWidth, height, color.NRGBA{100, 100, 100, 50}) m.menu.Draw(dst, geo.RectXYWH(x, y, buttonWidth, height)) } func (m *mainMenuState) leftMouseHandler(down bool) bool { if m.canClickButton && down { for _, b := range m.btns { if b.Hover { b.OnClick() m.canClickButton = false return true } } } m.canClickButton = !down return false } Fix typo package game import ( "fmt" "image/color" "log" "time" "golang.org/x/image/font/basicfont" "github.com/Bredgren/game1/game/camera" "github.com/Bredgren/game1/game/keymap" "github.com/Bredgren/game1/game/keymap/button" "github.com/Bredgren/game1/game/ui" "github.com/Bredgren/geo" "github.com/hajimehoshi/ebiten" "github.com/hajimehoshi/ebiten/ebitenutil" ) const ( buttonWidth = 350 buttonHeight = 20 ) type mainMenuState struct { p *player screenHeight int cam *camera.Camera bg *background keymap keymap.Layers remapAction keymap.Action remap bool remapText *ui.Text menu ui.Drawer btns map[keymap.Action]*ui.Button actionText map[keymap.Action]*ui.Text keyText map[keymap.Action]*ui.Text gamepadText map[keymap.Action]*ui.Text canClickButton bool } func newMainMenu(p *player, screenHeight int, cam *camera.Camera, bg *background, km keymap.Layers) *mainMenuState { m := &mainMenuState{ p: p, screenHeight: screenHeight, cam: cam, bg: bg, keymap: km, btns: map[keymap.Action]*ui.Button{}, actionText: map[keymap.Action]*ui.Text{}, keyText: map[keymap.Action]*ui.Text{}, gamepadText: map[keymap.Action]*ui.Text{}, canClickButton: true, } m.setupMenu() m.setupKeymap() return m } func (m *mainMenuState) setupMenu() { idleImg, _ := ebiten.NewImage(buttonWidth, buttonHeight, ebiten.FilterNearest) idleImg.Fill(color.NRGBA{200, 200, 200, 50}) hoverImg, _ := ebiten.NewImage(buttonWidth, buttonHeight, ebiten.FilterNearest) hoverImg.Fill(color.NRGBA{100, 100, 100, 50}) var elements []ui.WeightedDrawer m.remapText = &ui.Text{ Anchor: ui.AnchorCenter, Color: color.Black, Face: basicfont.Face7x13, Wt: 0.5, } elements = append(elements, m.remapText) actions := []keymap.Action{ left, right, move, jump, uppercut, slam, punch, launch, punchH, punchV, } for _, action := range actions { action := action m.keyText[action] = &ui.Text{ Anchor: ui.AnchorLeft, Color: color.Black, Face: basicfont.Face7x13, Wt: 1, } m.gamepadText[action] = &ui.Text{ Anchor: ui.AnchorLeft, Color: color.Black, Face: basicfont.Face7x13, Wt: 1, } m.actionText[action] = &ui.Text{ Text: string(action), Anchor: ui.Anchor{ Src: geo.VecXY(0, 0.5), Dst: geo.VecXY(0, 0.5), Offset: geo.VecXY(5, 0), }, Color: color.Black, Face: basicfont.Face7x13, Wt: 1.8, } m.btns[action] = &ui.Button{ IdleImg: idleImg, HoverImg: hoverImg, IdleAnchor: ui.AnchorCenter, HoverAnchor: ui.AnchorCenter, Element: &ui.HorizontalContainer{ Wt: 1, Elements: []ui.WeightedDrawer{ m.actionText[action], m.keyText[action], m.gamepadText[action], }, }, Wt: 1, OnClick: func() { m.remap = true m.remapAction = action m.remapText.Text = fmt.Sprintf("Remap action '%s'", action) }, } elements = append(elements, m.btns[action]) } m.menu = &ui.VerticalContainer{ Wt: 1, Elements: elements, } m.updateText() } func (m *mainMenuState) updateText() { actions := []keymap.Action{ left, right, move, jump, uppercut, slam, punch, launch, punchH, punchV, } for _, action := range actions { if btn, ok := m.keymap[playerLayer].KeyMouse.GetButton(action); ok { m.keyText[action].Text = btn.String() m.keyText[action].Color = color.Black } else { m.keyText[action].Text = "N/A" m.keyText[action].Color = color.NRGBA{0, 0, 0, 100} } if btn, ok := m.keymap[playerLayer].GamepadBtn.GetButton(action); ok { m.gamepadText[action].Text = fmt.Sprintf("Gamepad %d", btn) m.gamepadText[action].Color = color.Black } else { m.gamepadText[action].Text = "N/A" m.gamepadText[action].Color = color.NRGBA{0, 0, 0, 100} } } } func (m *mainMenuState) setupKeymap() { //// Setup remap layer // Button handlers remapHandlers := keymap.ButtonHandlerMap{} for key := ebiten.Key0; key <= ebiten.KeyMax; key++ { action := keymap.Action(fmt.Sprintf("key%d", key)) remapHandlers[action] = m.keyRemapHandler(button.FromKey(key)) } remapHandlers[keymap.Action("mouse0")] = m.keyRemapHandler(button.FromMouse(ebiten.MouseButtonLeft)) remapHandlers[keymap.Action("mouse1")] = m.keyRemapHandler(button.FromMouse(ebiten.MouseButtonMiddle)) remapHandlers[keymap.Action("mouse2")] = m.keyRemapHandler(button.FromMouse(ebiten.MouseButtonRight)) // Gamepad handlers for btn := ebiten.GamepadButton0; btn < ebiten.GamepadButtonMax; btn++ { action := keymap.Action(fmt.Sprintf("btn%d", btn)) remapHandlers[action] = m.btnRemapHandler(btn) } // Axis handlers axisHandlers := keymap.AxisHandlerMap{} // // We don't know how many axes there will be at this point so just do alot :P // for axis := 0; axis < 100; axis++ { // action := keymap.Action(fmt.Sprintf("axis%d", axis)) // axisHandlers[action] = m.axisRemapHandler(axis) // } m.keymap[remapLayer] = keymap.New(remapHandlers, axisHandlers) // Button actions for key := ebiten.Key0; key <= ebiten.KeyMax; key++ { action := keymap.Action(fmt.Sprintf("key%d", key)) m.keymap[remapLayer].KeyMouse.Set(button.FromKey(key), action) } m.keymap[remapLayer].KeyMouse.Set(button.FromMouse(ebiten.MouseButtonLeft), "mouse0") m.keymap[remapLayer].KeyMouse.Set(button.FromMouse(ebiten.MouseButtonMiddle), "mouse1") m.keymap[remapLayer].KeyMouse.Set(button.FromMouse(ebiten.MouseButtonRight), "mouse2") // Gamepad actions for btn := ebiten.GamepadButton0; btn < ebiten.GamepadButtonMax; btn++ { action := keymap.Action(fmt.Sprintf("btn%d", btn)) m.keymap[remapLayer].GamepadBtn.Set(btn, action) } // Axis actions // for axis := 0; axis < 100; axis++ { // action := keymap.Action(fmt.Sprintf("axis%d", axis)) // m.keymap[remapLayer].GamepadAxis.Set(axis, action) // } //// Setup UI handlers leftClickHandlers := keymap.ButtonHandlerMap{ leftClick: m.leftMouseHandler, } m.keymap[leftClickLayer] = keymap.New(leftClickHandlers, nil) m.keymap[leftClickLayer].KeyMouse.Set(button.FromMouse(ebiten.MouseButtonLeft), leftClick) colorFn := func(action keymap.Action) keymap.ButtonHandler { return func(down bool) bool { if down { m.actionText[action].Color = color.White } else { m.actionText[action].Color = color.Black } return false } } // UI handlers uiHandlers := keymap.ButtonHandlerMap{ left: colorFn(left), right: colorFn(right), jump: colorFn(jump), uppercut: colorFn(uppercut), slam: colorFn(slam), punch: colorFn(punch), launch: colorFn(launch), } uiAxisHandlers := keymap.AxisHandlerMap{ // move: m.keyLabels[move].handleAxis, // punchH // punchV } m.keymap[uiLayer] = keymap.New(uiHandlers, uiAxisHandlers) setDefaultKeyMap(m.keymap[uiLayer]) } func (m *mainMenuState) keyRemapHandler(btn button.KeyMouse) keymap.ButtonHandler { return func(down bool) bool { if !m.canClickButton && btn.IsMouse() { // This prevents us from always immediately remapping to left mouse return false } if down && m.remap { log.Println("remap key to", btn) m.keymap[playerLayer].KeyMouse.Set(btn, m.remapAction) m.keymap[uiLayer].KeyMouse.Set(btn, m.remapAction) m.remap = false m.remapText.Text = "" m.updateText() if btn.IsMouse() { // This prevents us from clicking a button if remapping to left mouse while hover // over a button m.canClickButton = false } return true } // No reason to stop propagation here because either the button is up or is not // remappable return false } } func (m *mainMenuState) btnRemapHandler(btn ebiten.GamepadButton) keymap.ButtonHandler { return func(down bool) bool { if down && m.remap { log.Println("remap gamepad btn to", btn) m.keymap[playerLayer].GamepadBtn.Set(btn, m.remapAction) m.keymap[uiLayer].GamepadBtn.Set(btn, m.remapAction) m.remap = false m.updateText() } // No reason to stop propagation here because either the button is up or is not // remappable return false } } // func (m *mainMenuState) axisRemapHandler(axis int) keymap.AxisHandler { // return func(val float64) bool { // remap := m.remap // if val != 0 && remap { // log.Println("remap axis to", axis) // m.keymap[playerLayer].GamepadAxis.Set(axis, m.remapAction) // m.remap = false // } // return remap // } // } func (m *mainMenuState) begin(previousState gameStateName) { m.cam.Target = fixedCameraTarget{geo.VecXY(m.p.pos.X, -float64(m.screenHeight)*0.4)} } func (m *mainMenuState) end() { } func (m *mainMenuState) nextState() gameStateName { return mainMenu } func (m *mainMenuState) update(dt time.Duration) { m.p.update(dt) for _, b := range m.btns { b.Update() } } func (m *mainMenuState) draw(dst *ebiten.Image, cam *camera.Camera) { m.bg.Draw(dst, cam) m.p.draw(dst, cam) x, y := 120.0, 20.0 height := 220.0 ebitenutil.DrawRect(dst, x, y, buttonWidth, height, color.NRGBA{100, 100, 100, 50}) m.menu.Draw(dst, geo.RectXYWH(x, y, buttonWidth, height)) } func (m *mainMenuState) leftMouseHandler(down bool) bool { if m.canClickButton && down { for _, b := range m.btns { if b.Hover { b.OnClick() m.canClickButton = false return true } } } m.canClickButton = !down return false }
package clusters import ( "fmt" "os" "strconv" "strings" "time" oclient "github.com/openshift/origin/pkg/client" ocon "github.com/radanalyticsio/oshinko-core/clusters/containers" odc "github.com/radanalyticsio/oshinko-core/clusters/deploymentconfigs" opt "github.com/radanalyticsio/oshinko-core/clusters/podtemplates" "github.com/radanalyticsio/oshinko-core/clusters/probes" osv "github.com/radanalyticsio/oshinko-core/clusters/services" kapi "k8s.io/kubernetes/pkg/api" kclient "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/sets" ) const clusterConfigMsg = "invalid cluster configuration" const missingConfigMsg = "unable to find spark configuration '%s'" const findDepConfigMsg = "unable to find deployment configs" const createDepConfigMsg = "unable to create deployment config '%s'" const replMsgWorker = "unable to find replication controller for spark workers" const replMsgMaster = "unable to find replication controller for spark master" const masterSrvMsg = "unable to create spark master service endpoint" const mastermsg = "unable to find spark masters" const updateReplMsg = "unable to update replication controller for spark workers" const noSuchClusterMsg = "no such cluster '%s'" const podListMsg = "unable to retrive pod list" const typeLabel = "oshinko-type" const clusterLabel = "oshinko-cluster" const workerType = "worker" const masterType = "master" const webuiType = "webui" const masterPortName = "spark-master" const masterPort = 7077 const webPortName = "spark-webui" const webPort = 8080 const sparkconfdir = "/etc/oshinko-spark-configs" // The suffix to add to the spark master hostname (clustername) for the web service const webServiceSuffix = "-ui" type SparkPod struct { IP string Status string Type string } type SparkCluster struct { Namespace string `json:"namespace,omitempty"` Name string `json:"name,omitempty"` Href string `json:"href"` Image string `json:"image"` MasterURL string `json:"masterUrl"` MasterWebURL string `json:"masterWebUrl"` Status string `json:"status"` WorkerCount int `json:"workerCount"` MasterCount int `json:"masterCount,omitempty"` Config ClusterConfig Pods []SparkPod } func generalErr(err error, msg string, code int) ClusterError { if err != nil { if msg == "" { msg = "error: " + err.Error() } else { msg = msg + ", error: " + err.Error() } } return NewClusterError(msg, code) } func makeSelector(otype string, clustername string) kapi.ListOptions { // Build a selector list based on type and/or cluster name ls := labels.NewSelector() if otype != "" { ot, _ := labels.NewRequirement(typeLabel, labels.EqualsOperator, sets.NewString(otype)) ls = ls.Add(*ot) } if clustername != "" { cname, _ := labels.NewRequirement(clusterLabel, labels.EqualsOperator, sets.NewString(clustername)) ls = ls.Add(*cname) } return kapi.ListOptions{LabelSelector: ls} } func retrieveServiceURL(client kclient.ServiceInterface, stype, clustername string) string { selectorlist := makeSelector(stype, clustername) srvs, err := client.List(selectorlist) if err == nil && len(srvs.Items) != 0 { srv := srvs.Items[0] scheme := "http://" if stype == masterType { scheme = "spark://" } return scheme + srv.Name + ":" + strconv.Itoa(srv.Spec.Ports[0].Port) } return "" } func checkForDeploymentConfigs(client oclient.DeploymentConfigInterface, clustername string) (bool, error) { selectorlist := makeSelector(masterType, clustername) dcs, err := client.List(selectorlist) if err != nil { return false, err } if len(dcs.Items) == 0 { return false, nil } selectorlist = makeSelector(workerType, clustername) dcs, err = client.List(selectorlist) if err != nil { return false, err } if len(dcs.Items) == 0 { return false, nil } return true, nil } func makeEnvVars(clustername, sparkconfdir string) []kapi.EnvVar { envs := []kapi.EnvVar{} envs = append(envs, kapi.EnvVar{Name: "OSHINKO_SPARK_CLUSTER", Value: clustername}) envs = append(envs, kapi.EnvVar{Name: "OSHINKO_REST_HOST", Value: os.Getenv("OSHINKO_REST_SERVICE_HOST")}) envs = append(envs, kapi.EnvVar{Name: "OSHINKO_REST_PORT", Value: os.Getenv("OSHINKO_REST_SERVICE_PORT")}) if sparkconfdir != "" { envs = append(envs, kapi.EnvVar{Name: "SPARK_CONF_DIR", Value: sparkconfdir}) } return envs } func makeWorkerEnvVars(clustername, sparkconfdir string) []kapi.EnvVar { envs := []kapi.EnvVar{} envs = makeEnvVars(clustername, sparkconfdir) envs = append(envs, kapi.EnvVar{ Name: "SPARK_MASTER_ADDRESS", Value: "spark://" + clustername + ":" + strconv.Itoa(masterPort)}) envs = append(envs, kapi.EnvVar{ Name: "SPARK_MASTER_UI_ADDRESS", Value: "http://" + clustername + webServiceSuffix + ":" + strconv.Itoa(webPort)}) return envs } func sparkWorker(namespace string, image string, replicas int, clustername, sparkconfdir, sparkworkerconfig string) *odc.ODeploymentConfig { // Create the basic deployment config // We will use a label and pod selector based on the cluster name. // Openshift will add additional labels and selectors to distinguish pods handled by // this deploymentconfig from pods beloning to another. dc := odc.DeploymentConfig(clustername+"-w", namespace). TriggerOnConfigChange().RollingStrategy().Label(clusterLabel, clustername). Label(typeLabel, workerType). PodSelector(clusterLabel, clustername).Replicas(replicas) // Create a pod template spec with the matching label pt := opt.PodTemplateSpec().Label(clusterLabel, clustername).Label(typeLabel, workerType) // Create a container with the correct ports and start command webport := 8081 webp := ocon.ContainerPort(webPortName, webport) cont := ocon.Container(dc.Name, image). Ports(webp). SetLivenessProbe(probes.NewHTTPGetProbe(webport)).EnvVars(makeWorkerEnvVars(clustername, sparkconfdir)) if sparkworkerconfig != "" { pt = pt.SetConfigMapVolume(sparkworkerconfig) cont = cont.SetVolumeMount(sparkworkerconfig, sparkconfdir, true) } // Finally, assign the container to the pod template spec and // assign the pod template spec to the deployment config return dc.PodTemplateSpec(pt.Containers(cont)) } func sparkMaster(namespace, image, clustername, sparkconfdir, sparkmasterconfig string) *odc.ODeploymentConfig { // Create the basic deployment config // We will use a label and pod selector based on the cluster name // Openshift will add additional labels and selectors to distinguish pods handled by // this deploymentconfig from pods beloning to another. dc := odc.DeploymentConfig(clustername+"-m", namespace). TriggerOnConfigChange().RollingStrategy().Label(clusterLabel, clustername). Label(typeLabel, masterType). PodSelector(clusterLabel, clustername) // Create a pod template spec with the matching label pt := opt.PodTemplateSpec().Label(clusterLabel, clustername). Label(typeLabel, masterType) // Create a container with the correct ports and start command httpProbe := probes.NewHTTPGetProbe(webPort) masterp := ocon.ContainerPort(masterPortName, masterPort) webp := ocon.ContainerPort(webPortName, webPort) cont := ocon.Container(dc.Name, image). Ports(masterp, webp). SetLivenessProbe(httpProbe). SetReadinessProbe(httpProbe).EnvVars(makeEnvVars(clustername, sparkconfdir)) if sparkmasterconfig != "" { pt = pt.SetConfigMapVolume(sparkmasterconfig) cont = cont.SetVolumeMount(sparkmasterconfig, sparkconfdir, true) } // Finally, assign the container to the pod template spec and // assign the pod template spec to the deployment config return dc.PodTemplateSpec(pt.Containers(cont)) } func service(name string, port int, clustername, otype string, podselectors map[string]string) (*osv.OService, *osv.OServicePort) { p := osv.ServicePort(port).TargetPort(port) return osv.Service(name).Label(clusterLabel, clustername). Label(typeLabel, otype).PodSelectors(podselectors).Ports(p), p } func checkForConfigMap(name string, cm kclient.ConfigMapsInterface) error { _, err := cm.Get(name) if err != nil { if strings.Index(err.Error(), "not found") != -1 { return generalErr(err, fmt.Sprintf(missingConfigMsg, name), ClusterConfigCode) } return generalErr(nil, fmt.Sprintf(missingConfigMsg, name), ClientOperationCode) } return nil } func countWorkers(client kclient.PodInterface, clustername string) (int, *kapi.PodList, error) { // If we are unable to retrieve a list of worker pods, return -1 for count // This is an error case, differnt from a list of length 0. Let the caller // decide whether to report the error or the -1 count cnt := -1 selectorlist := makeSelector(workerType, clustername) pods, err := client.List(selectorlist) if pods != nil { cnt = len(pods.Items) } return cnt, pods, err } // CreateClusterResponse create a cluster and return the representation func CreateCluster(clustername, namespace, sparkimage string, config *ClusterConfig, osclient *oclient.Client, client *kclient.Client) (SparkCluster, error) { var masterconfdir string var workerconfdir string var result SparkCluster = SparkCluster{} createCode := func(err error) int { if err != nil && strings.Index(err.Error(), "already exists") != -1 { return ComponentExistsCode } return ClientOperationCode } masterhost := clustername // Copy any named config referenced and update it with any explicit config values finalconfig, err := GetClusterConfig(config, client.ConfigMaps(namespace)) if err != nil { return result, generalErr(err, clusterConfigMsg, ErrorCode(err)) } workercount := int(finalconfig.WorkerCount) // Check if finalconfig contains the names of ConfigMaps to use for spark // configuration. If so they must exist, and the SPARK_CONF_DIR env must be // set correctly cm := client.ConfigMaps(namespace) if finalconfig.SparkMasterConfig != "" { err := checkForConfigMap(finalconfig.SparkMasterConfig, cm) if err != nil { return result, err } masterconfdir = sparkconfdir } if finalconfig.SparkWorkerConfig != "" { err := checkForConfigMap(finalconfig.SparkWorkerConfig, cm) if err != nil { return result, err } workerconfdir = sparkconfdir } // Create the master deployment config masterdc := sparkMaster(namespace, sparkimage, clustername, masterconfdir, finalconfig.SparkMasterConfig) // Create the services that will be associated with the master pod // They will be created with selectors based on the pod labels mastersv, _ := service(masterhost, masterdc.FindPort(masterPortName), clustername, masterType, masterdc.GetPodTemplateSpecLabels()) websv, _ := service(masterhost+webServiceSuffix, masterdc.FindPort(webPortName), clustername, webuiType, masterdc.GetPodTemplateSpecLabels()) // Create the worker deployment config workerdc := sparkWorker(namespace, sparkimage, workercount, clustername, workerconfdir, finalconfig.SparkWorkerConfig) // Launch all of the objects dcc := osclient.DeploymentConfigs(namespace) _, err = dcc.Create(&masterdc.DeploymentConfig) if err != nil { return result, generalErr(err, fmt.Sprintf(createDepConfigMsg, masterdc.Name), createCode(err)) } _, err = dcc.Create(&workerdc.DeploymentConfig) if err != nil { // Since we created the master deployment config, try to clean up DeleteCluster(clustername, namespace, osclient, client) return result, generalErr(err, fmt.Sprintf(createDepConfigMsg, workerdc.Name), createCode(err)) } sc := client.Services(namespace) _, err = sc.Create(&mastersv.Service) if err != nil { // Since we create the master and workers, try to clean up DeleteCluster(clustername, namespace, osclient, client) return result, generalErr(err, masterSrvMsg, createCode(err)) } // Note, if spark webui service fails for some reason we can live without it // TODO ties into cluster status, make a note if the service is missing sc.Create(&websv.Service) // Wait for the replication controllers to exist before building the response. rcc := client.ReplicationControllers(namespace) { var mrepl, wrepl *kapi.ReplicationController mrepl = nil wrepl = nil for i := 0; i < 4; i++ { if mrepl == nil { mrepl, _ = getReplController(rcc, clustername, masterType) } if wrepl == nil { wrepl, _ = getReplController(rcc, clustername, workerType) } if wrepl != nil && mrepl != nil { break } time.Sleep(250 * time.Millisecond) } } result.Name = clustername result.Namespace = namespace result.Href = "/clusters/" + clustername result.Image = sparkimage result.MasterURL = retrieveServiceURL(sc, masterType, clustername) result.MasterWebURL = retrieveServiceURL(sc, webuiType, clustername) if result.MasterURL == "" { result.Status = "MasterServiceMissing" } else { result.Status = "Running" } result.Config = finalconfig result.MasterCount = 0 result.WorkerCount = 0 result.Pods = []SparkPod{} return result, nil } func waitForCount(client kclient.ReplicationControllerInterface, name string, count int) { for i := 0; i < 5; i++ { r, _ := client.Get(name) if r.Status.Replicas == count { return } time.Sleep(1 * time.Second) } } func DeleteCluster(clustername, namespace string, osclient *oclient.Client, client *kclient.Client) (string, error) { var foundSomething bool = false info := []string{} scalerepls := []string{} // Build a selector list for the "oshinko-cluster" label selectorlist := makeSelector("", clustername) // Delete all of the deployment configs dcc := osclient.DeploymentConfigs(namespace) deployments, err := dcc.List(selectorlist) if err != nil { info = append(info, "unable to find deployment configs ("+err.Error()+")") } else { foundSomething = len(deployments.Items) > 0 } for i := range deployments.Items { name := deployments.Items[i].Name err = dcc.Delete(name) if err != nil { info = append(info, "unable to delete deployment config "+name+" ("+err.Error()+")") } } // Get a list of all the replication controllers for the cluster // and set all of the replica values to 0 rcc := client.ReplicationControllers(namespace) repls, err := rcc.List(selectorlist) if err != nil { info = append(info, "unable to find replication controllers ("+err.Error()+")") } else { foundSomething = foundSomething || len(repls.Items) > 0 } for i := range repls.Items { name := repls.Items[i].Name repls.Items[i].Spec.Replicas = 0 _, err = rcc.Update(&repls.Items[i]) if err != nil { info = append(info, "unable to scale replication controller "+name+" ("+err.Error()+")") } else { scalerepls = append(scalerepls, name) } } // Wait for the replica count to drop to 0 for each one we scaled for i := range scalerepls { waitForCount(rcc, scalerepls[i], 0) } // Delete each replication controller for i := range repls.Items { name := repls.Items[i].Name err = rcc.Delete(name) if err != nil { info = append(info, "unable to delete replication controller "+name+" ("+err.Error()+")") } } // Delete the services sc := client.Services(namespace) srvs, err := sc.List(selectorlist) if err != nil { info = append(info, "unable to find services ("+err.Error()+")") } else { foundSomething = foundSomething || len(srvs.Items) > 0 } for i := range srvs.Items { name := srvs.Items[i].Name err = sc.Delete(name) if err != nil { info = append(info, "unable to delete service "+name+" ("+err.Error()+")") } } // If we found some part of a cluster, then there is no error // even though the cluster may not have been fully complete. // If we didn't find any trace of a cluster, then call it an error if !foundSomething { return "", generalErr(nil, fmt.Sprintf(noSuchClusterMsg, clustername), NoSuchClusterCode) } return strings.Join(info, ", "), nil } // FindSingleClusterResponse find a cluster and return its representation func FindSingleCluster(name, namespace string, osclient *oclient.Client, client *kclient.Client) (SparkCluster, error) { addpod := func(p kapi.Pod) SparkPod { return SparkPod{ IP: p.Status.PodIP, Status: string(p.Status.Phase), Type: p.Labels[typeLabel], } } clustername := name var result SparkCluster = SparkCluster{} // Before we do further checks, make sure that we have deploymentconfigs // If either the master or the worker deploymentconfig are missing, we // assume that the cluster is missing. These are the base objects that // we use to create a cluster ok, err := checkForDeploymentConfigs(osclient.DeploymentConfigs(namespace), clustername) if err != nil { return result, generalErr(err, findDepConfigMsg, ClientOperationCode) } if !ok { return result, generalErr(nil, fmt.Sprintf(noSuchClusterMsg, clustername), NoSuchClusterCode) } pc := client.Pods(namespace) sc := client.Services(namespace) rcc := client.ReplicationControllers(namespace) mrepl, err := getReplController(rcc, clustername, masterType) if err != nil { return result, generalErr(err, replMsgMaster, ClientOperationCode) } else if mrepl == nil { return result, generalErr(err, replMsgMaster, ClusterIncompleteCode) } wrepl, err := getReplController(rcc, clustername, workerType) if err != nil { return result, generalErr(err, replMsgWorker, ClientOperationCode) } else if wrepl == nil { return result, generalErr(err, replMsgWorker, ClusterIncompleteCode) } // TODO (tmckay) we should add the spark master and worker configuration values here. // the most likely thing to do is store them in an annotation result.Name = name result.Namespace = namespace result.Href = "/clusters/" + clustername result.WorkerCount, _, _ = countWorkers(pc, clustername) result.MasterCount = 1 result.Config.WorkerCount = wrepl.Spec.Replicas result.Config.MasterCount = mrepl.Spec.Replicas result.MasterURL = retrieveServiceURL(sc, masterType, clustername) result.MasterWebURL = retrieveServiceURL(sc, webuiType, clustername) if result.MasterURL == "" { result.Status = "MasterServiceMissing" } else { result.Status = "Running" } // Report pos result.Pods = []SparkPod{} selectorlist := makeSelector(masterType, clustername) pods, err := pc.List(selectorlist) if err != nil { return result, generalErr(err, podListMsg, ClientOperationCode) } for i := range pods.Items { result.Pods = append(result.Pods, addpod(pods.Items[i])) } _, workers, err := countWorkers(pc, clustername) if err != nil { return result, generalErr(err, podListMsg, ClientOperationCode) } for i := range workers.Items { result.Pods = append(result.Pods, addpod(workers.Items[i])) } return result, nil } // FindClusters find a cluster and return its representation func FindClusters(namespace string, client *kclient.Client) ([]SparkCluster, error) { var result []SparkCluster = []SparkCluster{} pc := client.Pods(namespace) sc := client.Services(namespace) // Create a map so that we can track clusters by name while we // find out information about them clist := map[string]*SparkCluster{} // Get all of the master pods pods, err := pc.List(makeSelector(masterType, "")) if err != nil { return result, generalErr(err, mastermsg, ClientOperationCode) } // TODO should we do something else to find the clusters, like count deployment configs? // From the list of master pods, figure out which clusters we have for i := range pods.Items { // Build the cluster record if we don't already have it // (theoretically with HA we might have more than 1 master) clustername := pods.Items[i].Labels[clusterLabel] if citem, ok := clist[clustername]; !ok { clist[clustername] = new(SparkCluster) citem = clist[clustername] citem.Name = clustername citem.Href = "/clusters/" + clustername // Note, we do not report an error here since we are // reporting on multiple clusters. Instead cnt will be -1. cnt, _, _ := countWorkers(pc, clustername) // TODO we only want to count running pods (not terminating) citem.WorkerCount = cnt citem.MasterURL = retrieveServiceURL(sc, masterType, clustername) citem.MasterWebURL = retrieveServiceURL(sc, webuiType, clustername) // TODO make something real for status if citem.MasterURL == "" { citem.Status = "MasterServiceMissing" } else { citem.Status = "Running" } result = append(result, *citem) } } return result, nil } func getReplController(client kclient.ReplicationControllerInterface, clustername, otype string) (*kapi.ReplicationController, error) { selectorlist := makeSelector(otype, clustername) repls, err := client.List(selectorlist) if err != nil || len(repls.Items) == 0 { return nil, err } // Use the latest replication controller. There could be more than one // if the user did something like oc env to set a new env var on a deployment newestRepl := repls.Items[0] for i := 0; i < len(repls.Items); i++ { if repls.Items[i].CreationTimestamp.Unix() > newestRepl.CreationTimestamp.Unix() { newestRepl = repls.Items[i] } } return &newestRepl, nil } // UpdateSingleClusterResponse update a cluster and return the new representation func UpdateCluster(name, namespace string, config *ClusterConfig, osclient *oclient.Client, client *kclient.Client) (SparkCluster, error) { var result SparkCluster = SparkCluster{} clustername := name // Before we do further checks, make sure that we have deploymentconfigs // If either the master or the worker deploymentconfig are missing, we // assume that the cluster is missing. These are the base objects that // we use to create a cluster ok, err := checkForDeploymentConfigs(osclient.DeploymentConfigs(namespace), clustername) if err != nil { return result, generalErr(err, findDepConfigMsg, ClientOperationCode) } if !ok { return result, generalErr(nil, fmt.Sprintf(noSuchClusterMsg, clustername), NoSuchClusterCode) } // Copy any named config referenced and update it with any explicit config values finalconfig, err := GetClusterConfig(config, client.ConfigMaps(namespace)) if err != nil { return result, generalErr(err, clusterConfigMsg, ErrorCode(err)) } workercount := int(finalconfig.WorkerCount) // TODO(tmckay) we need some way to track the current spark config for a cluster, // maybe in annotations. If someone tries to change the spark config for a cluster, // that should be an error at this point (unless we spin all the pods down and // redeploy) rcc := client.ReplicationControllers(namespace) repl, err := getReplController(rcc, clustername, workerType) if err != nil { return result, generalErr(err, replMsgWorker, ClientOperationCode) } else if repl == nil { return result, generalErr(err, replMsgWorker, ClusterIncompleteCode) } // If the current replica count does not match the request, update the replication controller if repl.Spec.Replicas != workercount { repl.Spec.Replicas = workercount _, err = rcc.Update(repl) if err != nil { return result, generalErr(err, updateReplMsg, ClientOperationCode) } } result.Name = name result.Namespace = namespace result.Config = finalconfig return result, nil } Write path of spark config volume to UPDATE_SPARK_CONF_DIR Update oshinko-core in the vendor dir: This change puts the path of the spark config volume in UPDATE_SPARK_CONF_DIR instead of SPARK_CONF_DIR in spark cluster pods. This allows the startup script to check for presence of the directory and handle it rather than directly changing where spark looks for it's configuration. (commit 7b1bc96be2e2e67f4923531d4b3c53369046f0a3 in oshinko-core) package clusters import ( "fmt" "os" "strconv" "strings" "time" oclient "github.com/openshift/origin/pkg/client" ocon "github.com/radanalyticsio/oshinko-core/clusters/containers" odc "github.com/radanalyticsio/oshinko-core/clusters/deploymentconfigs" opt "github.com/radanalyticsio/oshinko-core/clusters/podtemplates" "github.com/radanalyticsio/oshinko-core/clusters/probes" osv "github.com/radanalyticsio/oshinko-core/clusters/services" kapi "k8s.io/kubernetes/pkg/api" kclient "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/sets" ) const clusterConfigMsg = "invalid cluster configuration" const missingConfigMsg = "unable to find spark configuration '%s'" const findDepConfigMsg = "unable to find deployment configs" const createDepConfigMsg = "unable to create deployment config '%s'" const replMsgWorker = "unable to find replication controller for spark workers" const replMsgMaster = "unable to find replication controller for spark master" const masterSrvMsg = "unable to create spark master service endpoint" const mastermsg = "unable to find spark masters" const updateReplMsg = "unable to update replication controller for spark workers" const noSuchClusterMsg = "no such cluster '%s'" const podListMsg = "unable to retrive pod list" const typeLabel = "oshinko-type" const clusterLabel = "oshinko-cluster" const workerType = "worker" const masterType = "master" const webuiType = "webui" const masterPortName = "spark-master" const masterPort = 7077 const webPortName = "spark-webui" const webPort = 8080 const sparkconfdir = "/etc/oshinko-spark-configs" // The suffix to add to the spark master hostname (clustername) for the web service const webServiceSuffix = "-ui" type SparkPod struct { IP string Status string Type string } type SparkCluster struct { Namespace string `json:"namespace,omitempty"` Name string `json:"name,omitempty"` Href string `json:"href"` Image string `json:"image"` MasterURL string `json:"masterUrl"` MasterWebURL string `json:"masterWebUrl"` Status string `json:"status"` WorkerCount int `json:"workerCount"` MasterCount int `json:"masterCount,omitempty"` Config ClusterConfig Pods []SparkPod } func generalErr(err error, msg string, code int) ClusterError { if err != nil { if msg == "" { msg = "error: " + err.Error() } else { msg = msg + ", error: " + err.Error() } } return NewClusterError(msg, code) } func makeSelector(otype string, clustername string) kapi.ListOptions { // Build a selector list based on type and/or cluster name ls := labels.NewSelector() if otype != "" { ot, _ := labels.NewRequirement(typeLabel, labels.EqualsOperator, sets.NewString(otype)) ls = ls.Add(*ot) } if clustername != "" { cname, _ := labels.NewRequirement(clusterLabel, labels.EqualsOperator, sets.NewString(clustername)) ls = ls.Add(*cname) } return kapi.ListOptions{LabelSelector: ls} } func retrieveServiceURL(client kclient.ServiceInterface, stype, clustername string) string { selectorlist := makeSelector(stype, clustername) srvs, err := client.List(selectorlist) if err == nil && len(srvs.Items) != 0 { srv := srvs.Items[0] scheme := "http://" if stype == masterType { scheme = "spark://" } return scheme + srv.Name + ":" + strconv.Itoa(srv.Spec.Ports[0].Port) } return "" } func checkForDeploymentConfigs(client oclient.DeploymentConfigInterface, clustername string) (bool, error) { selectorlist := makeSelector(masterType, clustername) dcs, err := client.List(selectorlist) if err != nil { return false, err } if len(dcs.Items) == 0 { return false, nil } selectorlist = makeSelector(workerType, clustername) dcs, err = client.List(selectorlist) if err != nil { return false, err } if len(dcs.Items) == 0 { return false, nil } return true, nil } func makeEnvVars(clustername, sparkconfdir string) []kapi.EnvVar { envs := []kapi.EnvVar{} envs = append(envs, kapi.EnvVar{Name: "OSHINKO_SPARK_CLUSTER", Value: clustername}) envs = append(envs, kapi.EnvVar{Name: "OSHINKO_REST_HOST", Value: os.Getenv("OSHINKO_REST_SERVICE_HOST")}) envs = append(envs, kapi.EnvVar{Name: "OSHINKO_REST_PORT", Value: os.Getenv("OSHINKO_REST_SERVICE_PORT")}) if sparkconfdir != "" { envs = append(envs, kapi.EnvVar{Name: "UPDATE_SPARK_CONF_DIR", Value: sparkconfdir}) } return envs } func makeWorkerEnvVars(clustername, sparkconfdir string) []kapi.EnvVar { envs := []kapi.EnvVar{} envs = makeEnvVars(clustername, sparkconfdir) envs = append(envs, kapi.EnvVar{ Name: "SPARK_MASTER_ADDRESS", Value: "spark://" + clustername + ":" + strconv.Itoa(masterPort)}) envs = append(envs, kapi.EnvVar{ Name: "SPARK_MASTER_UI_ADDRESS", Value: "http://" + clustername + webServiceSuffix + ":" + strconv.Itoa(webPort)}) return envs } func sparkWorker(namespace string, image string, replicas int, clustername, sparkconfdir, sparkworkerconfig string) *odc.ODeploymentConfig { // Create the basic deployment config // We will use a label and pod selector based on the cluster name. // Openshift will add additional labels and selectors to distinguish pods handled by // this deploymentconfig from pods beloning to another. dc := odc.DeploymentConfig(clustername+"-w", namespace). TriggerOnConfigChange().RollingStrategy().Label(clusterLabel, clustername). Label(typeLabel, workerType). PodSelector(clusterLabel, clustername).Replicas(replicas) // Create a pod template spec with the matching label pt := opt.PodTemplateSpec().Label(clusterLabel, clustername).Label(typeLabel, workerType) // Create a container with the correct ports and start command webport := 8081 webp := ocon.ContainerPort(webPortName, webport) cont := ocon.Container(dc.Name, image). Ports(webp). SetLivenessProbe(probes.NewHTTPGetProbe(webport)).EnvVars(makeWorkerEnvVars(clustername, sparkconfdir)) if sparkworkerconfig != "" { pt = pt.SetConfigMapVolume(sparkworkerconfig) cont = cont.SetVolumeMount(sparkworkerconfig, sparkconfdir, true) } // Finally, assign the container to the pod template spec and // assign the pod template spec to the deployment config return dc.PodTemplateSpec(pt.Containers(cont)) } func sparkMaster(namespace, image, clustername, sparkconfdir, sparkmasterconfig string) *odc.ODeploymentConfig { // Create the basic deployment config // We will use a label and pod selector based on the cluster name // Openshift will add additional labels and selectors to distinguish pods handled by // this deploymentconfig from pods beloning to another. dc := odc.DeploymentConfig(clustername+"-m", namespace). TriggerOnConfigChange().RollingStrategy().Label(clusterLabel, clustername). Label(typeLabel, masterType). PodSelector(clusterLabel, clustername) // Create a pod template spec with the matching label pt := opt.PodTemplateSpec().Label(clusterLabel, clustername). Label(typeLabel, masterType) // Create a container with the correct ports and start command httpProbe := probes.NewHTTPGetProbe(webPort) masterp := ocon.ContainerPort(masterPortName, masterPort) webp := ocon.ContainerPort(webPortName, webPort) cont := ocon.Container(dc.Name, image). Ports(masterp, webp). SetLivenessProbe(httpProbe). SetReadinessProbe(httpProbe).EnvVars(makeEnvVars(clustername, sparkconfdir)) if sparkmasterconfig != "" { pt = pt.SetConfigMapVolume(sparkmasterconfig) cont = cont.SetVolumeMount(sparkmasterconfig, sparkconfdir, true) } // Finally, assign the container to the pod template spec and // assign the pod template spec to the deployment config return dc.PodTemplateSpec(pt.Containers(cont)) } func service(name string, port int, clustername, otype string, podselectors map[string]string) (*osv.OService, *osv.OServicePort) { p := osv.ServicePort(port).TargetPort(port) return osv.Service(name).Label(clusterLabel, clustername). Label(typeLabel, otype).PodSelectors(podselectors).Ports(p), p } func checkForConfigMap(name string, cm kclient.ConfigMapsInterface) error { _, err := cm.Get(name) if err != nil { if strings.Index(err.Error(), "not found") != -1 { return generalErr(err, fmt.Sprintf(missingConfigMsg, name), ClusterConfigCode) } return generalErr(nil, fmt.Sprintf(missingConfigMsg, name), ClientOperationCode) } return nil } func countWorkers(client kclient.PodInterface, clustername string) (int, *kapi.PodList, error) { // If we are unable to retrieve a list of worker pods, return -1 for count // This is an error case, differnt from a list of length 0. Let the caller // decide whether to report the error or the -1 count cnt := -1 selectorlist := makeSelector(workerType, clustername) pods, err := client.List(selectorlist) if pods != nil { cnt = len(pods.Items) } return cnt, pods, err } // CreateClusterResponse create a cluster and return the representation func CreateCluster(clustername, namespace, sparkimage string, config *ClusterConfig, osclient *oclient.Client, client *kclient.Client) (SparkCluster, error) { var masterconfdir string var workerconfdir string var result SparkCluster = SparkCluster{} createCode := func(err error) int { if err != nil && strings.Index(err.Error(), "already exists") != -1 { return ComponentExistsCode } return ClientOperationCode } masterhost := clustername // Copy any named config referenced and update it with any explicit config values finalconfig, err := GetClusterConfig(config, client.ConfigMaps(namespace)) if err != nil { return result, generalErr(err, clusterConfigMsg, ErrorCode(err)) } workercount := int(finalconfig.WorkerCount) // Check if finalconfig contains the names of ConfigMaps to use for spark // configuration. If so they must exist. The ConfigMaps will be mounted // as volumes on spark pods and the path stored in the environment // variable UPDATE_SPARK_CONF_DIR cm := client.ConfigMaps(namespace) if finalconfig.SparkMasterConfig != "" { err := checkForConfigMap(finalconfig.SparkMasterConfig, cm) if err != nil { return result, err } masterconfdir = sparkconfdir } if finalconfig.SparkWorkerConfig != "" { err := checkForConfigMap(finalconfig.SparkWorkerConfig, cm) if err != nil { return result, err } workerconfdir = sparkconfdir } // Create the master deployment config masterdc := sparkMaster(namespace, sparkimage, clustername, masterconfdir, finalconfig.SparkMasterConfig) // Create the services that will be associated with the master pod // They will be created with selectors based on the pod labels mastersv, _ := service(masterhost, masterdc.FindPort(masterPortName), clustername, masterType, masterdc.GetPodTemplateSpecLabels()) websv, _ := service(masterhost+webServiceSuffix, masterdc.FindPort(webPortName), clustername, webuiType, masterdc.GetPodTemplateSpecLabels()) // Create the worker deployment config workerdc := sparkWorker(namespace, sparkimage, workercount, clustername, workerconfdir, finalconfig.SparkWorkerConfig) // Launch all of the objects dcc := osclient.DeploymentConfigs(namespace) _, err = dcc.Create(&masterdc.DeploymentConfig) if err != nil { return result, generalErr(err, fmt.Sprintf(createDepConfigMsg, masterdc.Name), createCode(err)) } _, err = dcc.Create(&workerdc.DeploymentConfig) if err != nil { // Since we created the master deployment config, try to clean up DeleteCluster(clustername, namespace, osclient, client) return result, generalErr(err, fmt.Sprintf(createDepConfigMsg, workerdc.Name), createCode(err)) } sc := client.Services(namespace) _, err = sc.Create(&mastersv.Service) if err != nil { // Since we create the master and workers, try to clean up DeleteCluster(clustername, namespace, osclient, client) return result, generalErr(err, masterSrvMsg, createCode(err)) } // Note, if spark webui service fails for some reason we can live without it // TODO ties into cluster status, make a note if the service is missing sc.Create(&websv.Service) // Wait for the replication controllers to exist before building the response. rcc := client.ReplicationControllers(namespace) { var mrepl, wrepl *kapi.ReplicationController mrepl = nil wrepl = nil for i := 0; i < 4; i++ { if mrepl == nil { mrepl, _ = getReplController(rcc, clustername, masterType) } if wrepl == nil { wrepl, _ = getReplController(rcc, clustername, workerType) } if wrepl != nil && mrepl != nil { break } time.Sleep(250 * time.Millisecond) } } result.Name = clustername result.Namespace = namespace result.Href = "/clusters/" + clustername result.Image = sparkimage result.MasterURL = retrieveServiceURL(sc, masterType, clustername) result.MasterWebURL = retrieveServiceURL(sc, webuiType, clustername) if result.MasterURL == "" { result.Status = "MasterServiceMissing" } else { result.Status = "Running" } result.Config = finalconfig result.MasterCount = 0 result.WorkerCount = 0 result.Pods = []SparkPod{} return result, nil } func waitForCount(client kclient.ReplicationControllerInterface, name string, count int) { for i := 0; i < 5; i++ { r, _ := client.Get(name) if r.Status.Replicas == count { return } time.Sleep(1 * time.Second) } } func DeleteCluster(clustername, namespace string, osclient *oclient.Client, client *kclient.Client) (string, error) { var foundSomething bool = false info := []string{} scalerepls := []string{} // Build a selector list for the "oshinko-cluster" label selectorlist := makeSelector("", clustername) // Delete all of the deployment configs dcc := osclient.DeploymentConfigs(namespace) deployments, err := dcc.List(selectorlist) if err != nil { info = append(info, "unable to find deployment configs ("+err.Error()+")") } else { foundSomething = len(deployments.Items) > 0 } for i := range deployments.Items { name := deployments.Items[i].Name err = dcc.Delete(name) if err != nil { info = append(info, "unable to delete deployment config "+name+" ("+err.Error()+")") } } // Get a list of all the replication controllers for the cluster // and set all of the replica values to 0 rcc := client.ReplicationControllers(namespace) repls, err := rcc.List(selectorlist) if err != nil { info = append(info, "unable to find replication controllers ("+err.Error()+")") } else { foundSomething = foundSomething || len(repls.Items) > 0 } for i := range repls.Items { name := repls.Items[i].Name repls.Items[i].Spec.Replicas = 0 _, err = rcc.Update(&repls.Items[i]) if err != nil { info = append(info, "unable to scale replication controller "+name+" ("+err.Error()+")") } else { scalerepls = append(scalerepls, name) } } // Wait for the replica count to drop to 0 for each one we scaled for i := range scalerepls { waitForCount(rcc, scalerepls[i], 0) } // Delete each replication controller for i := range repls.Items { name := repls.Items[i].Name err = rcc.Delete(name) if err != nil { info = append(info, "unable to delete replication controller "+name+" ("+err.Error()+")") } } // Delete the services sc := client.Services(namespace) srvs, err := sc.List(selectorlist) if err != nil { info = append(info, "unable to find services ("+err.Error()+")") } else { foundSomething = foundSomething || len(srvs.Items) > 0 } for i := range srvs.Items { name := srvs.Items[i].Name err = sc.Delete(name) if err != nil { info = append(info, "unable to delete service "+name+" ("+err.Error()+")") } } // If we found some part of a cluster, then there is no error // even though the cluster may not have been fully complete. // If we didn't find any trace of a cluster, then call it an error if !foundSomething { return "", generalErr(nil, fmt.Sprintf(noSuchClusterMsg, clustername), NoSuchClusterCode) } return strings.Join(info, ", "), nil } // FindSingleClusterResponse find a cluster and return its representation func FindSingleCluster(name, namespace string, osclient *oclient.Client, client *kclient.Client) (SparkCluster, error) { addpod := func(p kapi.Pod) SparkPod { return SparkPod{ IP: p.Status.PodIP, Status: string(p.Status.Phase), Type: p.Labels[typeLabel], } } clustername := name var result SparkCluster = SparkCluster{} // Before we do further checks, make sure that we have deploymentconfigs // If either the master or the worker deploymentconfig are missing, we // assume that the cluster is missing. These are the base objects that // we use to create a cluster ok, err := checkForDeploymentConfigs(osclient.DeploymentConfigs(namespace), clustername) if err != nil { return result, generalErr(err, findDepConfigMsg, ClientOperationCode) } if !ok { return result, generalErr(nil, fmt.Sprintf(noSuchClusterMsg, clustername), NoSuchClusterCode) } pc := client.Pods(namespace) sc := client.Services(namespace) rcc := client.ReplicationControllers(namespace) mrepl, err := getReplController(rcc, clustername, masterType) if err != nil { return result, generalErr(err, replMsgMaster, ClientOperationCode) } else if mrepl == nil { return result, generalErr(err, replMsgMaster, ClusterIncompleteCode) } wrepl, err := getReplController(rcc, clustername, workerType) if err != nil { return result, generalErr(err, replMsgWorker, ClientOperationCode) } else if wrepl == nil { return result, generalErr(err, replMsgWorker, ClusterIncompleteCode) } // TODO (tmckay) we should add the spark master and worker configuration values here. // the most likely thing to do is store them in an annotation result.Name = name result.Namespace = namespace result.Href = "/clusters/" + clustername result.WorkerCount, _, _ = countWorkers(pc, clustername) result.MasterCount = 1 result.Config.WorkerCount = wrepl.Spec.Replicas result.Config.MasterCount = mrepl.Spec.Replicas result.MasterURL = retrieveServiceURL(sc, masterType, clustername) result.MasterWebURL = retrieveServiceURL(sc, webuiType, clustername) if result.MasterURL == "" { result.Status = "MasterServiceMissing" } else { result.Status = "Running" } // Report pos result.Pods = []SparkPod{} selectorlist := makeSelector(masterType, clustername) pods, err := pc.List(selectorlist) if err != nil { return result, generalErr(err, podListMsg, ClientOperationCode) } for i := range pods.Items { result.Pods = append(result.Pods, addpod(pods.Items[i])) } _, workers, err := countWorkers(pc, clustername) if err != nil { return result, generalErr(err, podListMsg, ClientOperationCode) } for i := range workers.Items { result.Pods = append(result.Pods, addpod(workers.Items[i])) } return result, nil } // FindClusters find a cluster and return its representation func FindClusters(namespace string, client *kclient.Client) ([]SparkCluster, error) { var result []SparkCluster = []SparkCluster{} pc := client.Pods(namespace) sc := client.Services(namespace) // Create a map so that we can track clusters by name while we // find out information about them clist := map[string]*SparkCluster{} // Get all of the master pods pods, err := pc.List(makeSelector(masterType, "")) if err != nil { return result, generalErr(err, mastermsg, ClientOperationCode) } // TODO should we do something else to find the clusters, like count deployment configs? // From the list of master pods, figure out which clusters we have for i := range pods.Items { // Build the cluster record if we don't already have it // (theoretically with HA we might have more than 1 master) clustername := pods.Items[i].Labels[clusterLabel] if citem, ok := clist[clustername]; !ok { clist[clustername] = new(SparkCluster) citem = clist[clustername] citem.Name = clustername citem.Href = "/clusters/" + clustername // Note, we do not report an error here since we are // reporting on multiple clusters. Instead cnt will be -1. cnt, _, _ := countWorkers(pc, clustername) // TODO we only want to count running pods (not terminating) citem.WorkerCount = cnt citem.MasterURL = retrieveServiceURL(sc, masterType, clustername) citem.MasterWebURL = retrieveServiceURL(sc, webuiType, clustername) // TODO make something real for status if citem.MasterURL == "" { citem.Status = "MasterServiceMissing" } else { citem.Status = "Running" } result = append(result, *citem) } } return result, nil } func getReplController(client kclient.ReplicationControllerInterface, clustername, otype string) (*kapi.ReplicationController, error) { selectorlist := makeSelector(otype, clustername) repls, err := client.List(selectorlist) if err != nil || len(repls.Items) == 0 { return nil, err } // Use the latest replication controller. There could be more than one // if the user did something like oc env to set a new env var on a deployment newestRepl := repls.Items[0] for i := 0; i < len(repls.Items); i++ { if repls.Items[i].CreationTimestamp.Unix() > newestRepl.CreationTimestamp.Unix() { newestRepl = repls.Items[i] } } return &newestRepl, nil } // UpdateSingleClusterResponse update a cluster and return the new representation func UpdateCluster(name, namespace string, config *ClusterConfig, osclient *oclient.Client, client *kclient.Client) (SparkCluster, error) { var result SparkCluster = SparkCluster{} clustername := name // Before we do further checks, make sure that we have deploymentconfigs // If either the master or the worker deploymentconfig are missing, we // assume that the cluster is missing. These are the base objects that // we use to create a cluster ok, err := checkForDeploymentConfigs(osclient.DeploymentConfigs(namespace), clustername) if err != nil { return result, generalErr(err, findDepConfigMsg, ClientOperationCode) } if !ok { return result, generalErr(nil, fmt.Sprintf(noSuchClusterMsg, clustername), NoSuchClusterCode) } // Copy any named config referenced and update it with any explicit config values finalconfig, err := GetClusterConfig(config, client.ConfigMaps(namespace)) if err != nil { return result, generalErr(err, clusterConfigMsg, ErrorCode(err)) } workercount := int(finalconfig.WorkerCount) // TODO(tmckay) we need some way to track the current spark config for a cluster, // maybe in annotations. If someone tries to change the spark config for a cluster, // that should be an error at this point (unless we spin all the pods down and // redeploy) rcc := client.ReplicationControllers(namespace) repl, err := getReplController(rcc, clustername, workerType) if err != nil { return result, generalErr(err, replMsgWorker, ClientOperationCode) } else if repl == nil { return result, generalErr(err, replMsgWorker, ClusterIncompleteCode) } // If the current replica count does not match the request, update the replication controller if repl.Spec.Replicas != workercount { repl.Spec.Replicas = workercount _, err = rcc.Update(repl) if err != nil { return result, generalErr(err, updateReplMsg, ClientOperationCode) } } result.Name = name result.Namespace = namespace result.Config = finalconfig return result, nil }
package main import ( "context" "net/http" _ "net/http/pprof" "os" "runtime" "strings" "time" "cloud.google.com/go/logging" "github.com/cloudfoundry-community/go-cfclient" "github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzle/cloudfoundry" "github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzle/config" "github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzle/filter" "github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzle/heartbeat" "github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzle/nozzle" "github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzle/stackdriver" "github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzle/version" "github.com/cloudfoundry/lager" ) func main() { a := newApp() ctx, cancel := context.WithCancel(context.Background()) if a.c.DebugNozzle { defer handleFatalError(a, cancel) go func() { a.logger.Info("pprof", lager.Data{ "http.ListenAndServe": http.ListenAndServe("localhost:6060", nil), }) }() } producer := a.newProducer() consumer := a.newConsumer(ctx) errs, fhErrs := consumer.Start(producer) defer consumer.Stop() go func() { for err := range errs { a.logger.Error("nozzle", err) } }() fatalErr := <-fhErrs if fatalErr != nil { cancel() t := time.NewTimer(5 * time.Second) for { select { case <-time.Tick(100 * time.Millisecond): if a.bufferEmpty() { break } case <-t.C: break } } a.logger.Fatal("firehose", fatalErr) } } func handleFatalError(a *app, cancel context.CancelFunc) { if e := recover(); e != nil { // Cancel the context cancel() stack := make([]byte, 1<<16) stackSize := runtime.Stack(stack, true) stackTrace := string(stack[:stackSize]) payload := map[string]interface{}{ "serviceContext": map[string]interface{}{ "service": version.Name, "version": version.Release(), }, "message": stackTrace, } log := &stackdriver.Log{ Payload: payload, Labels: map[string]string{}, Severity: logging.Error, } // Purposefully get a new log adapter here since there // were issues re-using the one that the nozzle uses. logAdapter, _ := a.newLogAdapter() logAdapter.PostLog(log) logAdapter.Flush() // Re-throw the error, we want to ensure it's logged directly to // stackdriver but we are not in a recoverable state. panic(e) } } func newApp() *app { logger := lager.NewLogger("stackdriver-nozzle") logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG)) logger.Info("version", lager.Data{"name": version.Name, "release": version.Release(), "user_agent": version.UserAgent()}) c, err := config.NewConfig() if err != nil { logger.Fatal("config", err) } logger.Info("arguments", c.ToData()) metricClient, err := stackdriver.NewMetricClient() if err != nil { logger.Fatal("metricClient", err) } // Create a metricAdapter that will be used by the heartbeater // to send heartbeat metrics to Stackdriver. This metricAdapter // has its own heartbeater (with its own trigger) that writes to a logger. trigger := time.NewTicker(time.Duration(c.HeartbeatRate) * time.Second).C adapterHeartbeater := heartbeat.NewHeartbeater(logger, trigger) adapterHeartbeater.Start() metricAdapter, err := stackdriver.NewMetricAdapter(c.ProjectID, metricClient, adapterHeartbeater) if err != nil { logger.Error("metricAdapter", err) } // Create a heartbeater that will write heartbeat events to Stackdriver // logging and monitoring. It uses the metricAdapter created previously // to write to Stackdriver. metricHandler := heartbeat.NewMetricHandler(metricAdapter, logger, c.NozzleId, c.NozzleZone) trigger2 := time.NewTicker(time.Duration(c.HeartbeatRate) * time.Second).C heartbeater := heartbeat.NewLoggerMetricHeartbeater(metricHandler, logger, trigger2) cfConfig := &cfclient.Config{ ApiAddress: c.APIEndpoint, Username: c.Username, Password: c.Password, SkipSslValidation: c.SkipSSL} cfClient := cfclient.NewClient(cfConfig) var appInfoRepository cloudfoundry.AppInfoRepository if c.ResolveAppMetadata { appInfoRepository = cloudfoundry.NewAppInfoRepository(cfClient) } else { appInfoRepository = cloudfoundry.NullAppInfoRepository() } labelMaker := nozzle.NewLabelMaker(appInfoRepository) return &app{ logger: logger, c: c, cfConfig: cfConfig, cfClient: cfClient, labelMaker: labelMaker, heartbeater: heartbeater, } } type app struct { logger lager.Logger c *config.Config cfConfig *cfclient.Config cfClient *cfclient.Client labelMaker nozzle.LabelMaker heartbeater heartbeat.Heartbeater bufferEmpty func() bool } func (a *app) newProducer() cloudfoundry.Firehose { firehose := cloudfoundry.NewFirehose(a.cfConfig, a.cfClient, a.c.SubscriptionID) producer, err := filter.New(firehose, strings.Split(a.c.Events, ","), a.heartbeater) if err != nil { a.logger.Fatal("filter", err) } return producer } func (a *app) newConsumer(ctx context.Context) *nozzle.Nozzle { return &nozzle.Nozzle{ LogSink: a.newLogSink(), MetricSink: a.newMetricSink(ctx), Heartbeater: a.heartbeater, } } func (a *app) newLogSink() nozzle.Sink { logAdapter, logErrs := a.newLogAdapter() go func() { err := <-logErrs a.logger.Error("logAdapter", err) }() return nozzle.NewLogSink(a.labelMaker, logAdapter, a.c.NewlineToken) } func (a *app) newLogAdapter() (stackdriver.LogAdapter, <-chan error) { return stackdriver.NewLogAdapter( a.c.ProjectID, a.c.BatchCount, time.Duration(a.c.BatchDuration)*time.Second, a.heartbeater, ) } func (a *app) newMetricSink(ctx context.Context) nozzle.Sink { metricClient, err := stackdriver.NewMetricClient() if err != nil { a.logger.Fatal("metricClient", err) } metricAdapter, err := stackdriver.NewMetricAdapter(a.c.ProjectID, metricClient, a.heartbeater) if err != nil { a.logger.Error("metricAdapter", err) } metricBuffer, errs := stackdriver.NewAutoCulledMetricsBuffer(ctx, a.logger, time.Duration(a.c.MetricsBufferDuration)*time.Second, a.c.MetricsBufferSize, metricAdapter) a.bufferEmpty = metricBuffer.IsEmpty go func() { for err = range errs { a.logger.Error("metricsBuffer", err) } }() return nozzle.NewMetricSink(a.labelMaker, metricBuffer, nozzle.NewUnitParser()) } main: Fix shutdown bug If a fatal error occurred, the program would hanging trying to drain the log buffer. package main import ( "context" "net/http" _ "net/http/pprof" "os" "runtime" "strings" "time" "cloud.google.com/go/logging" "github.com/cloudfoundry-community/go-cfclient" "github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzle/cloudfoundry" "github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzle/config" "github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzle/filter" "github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzle/heartbeat" "github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzle/nozzle" "github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzle/stackdriver" "github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzle/version" "github.com/cloudfoundry/lager" ) func main() { a := newApp() ctx, cancel := context.WithCancel(context.Background()) if a.c.DebugNozzle { defer handleFatalError(a, cancel) go func() { a.logger.Info("pprof", lager.Data{ "http.ListenAndServe": http.ListenAndServe("localhost:6060", nil), }) }() } producer := a.newProducer() consumer := a.newConsumer(ctx) errs, fhErrs := consumer.Start(producer) defer consumer.Stop() go func() { for err := range errs { a.logger.Error("nozzle", err) } }() fatalErr := <-fhErrs if fatalErr != nil { cancel() t := time.NewTimer(5 * time.Second) for { select { case <-time.Tick(100 * time.Millisecond): if a.bufferEmpty() { a.logger.Fatal("firehose", fatalErr, lager.Data{"cleanup": "The metrics buffer was successfully flushed before shutdown"}) } case <-t.C: a.logger.Fatal("firehose", fatalErr, lager.Data{"cleanup": "The metrics buffer could not be flushed before shutdown"}) } } } } func handleFatalError(a *app, cancel context.CancelFunc) { if e := recover(); e != nil { // Cancel the context cancel() stack := make([]byte, 1<<16) stackSize := runtime.Stack(stack, true) stackTrace := string(stack[:stackSize]) payload := map[string]interface{}{ "serviceContext": map[string]interface{}{ "service": version.Name, "version": version.Release(), }, "message": stackTrace, } log := &stackdriver.Log{ Payload: payload, Labels: map[string]string{}, Severity: logging.Error, } // Purposefully get a new log adapter here since there // were issues re-using the one that the nozzle uses. logAdapter, _ := a.newLogAdapter() logAdapter.PostLog(log) logAdapter.Flush() // Re-throw the error, we want to ensure it's logged directly to // stackdriver but we are not in a recoverable state. panic(e) } } func newApp() *app { logger := lager.NewLogger("stackdriver-nozzle") logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG)) logger.Info("version", lager.Data{"name": version.Name, "release": version.Release(), "user_agent": version.UserAgent()}) c, err := config.NewConfig() if err != nil { logger.Fatal("config", err) } logger.Info("arguments", c.ToData()) metricClient, err := stackdriver.NewMetricClient() if err != nil { logger.Fatal("metricClient", err) } // Create a metricAdapter that will be used by the heartbeater // to send heartbeat metrics to Stackdriver. This metricAdapter // has its own heartbeater (with its own trigger) that writes to a logger. trigger := time.NewTicker(time.Duration(c.HeartbeatRate) * time.Second).C adapterHeartbeater := heartbeat.NewHeartbeater(logger, trigger) adapterHeartbeater.Start() metricAdapter, err := stackdriver.NewMetricAdapter(c.ProjectID, metricClient, adapterHeartbeater) if err != nil { logger.Error("metricAdapter", err) } // Create a heartbeater that will write heartbeat events to Stackdriver // logging and monitoring. It uses the metricAdapter created previously // to write to Stackdriver. metricHandler := heartbeat.NewMetricHandler(metricAdapter, logger, c.NozzleId, c.NozzleZone) trigger2 := time.NewTicker(time.Duration(c.HeartbeatRate) * time.Second).C heartbeater := heartbeat.NewLoggerMetricHeartbeater(metricHandler, logger, trigger2) cfConfig := &cfclient.Config{ ApiAddress: c.APIEndpoint, Username: c.Username, Password: c.Password, SkipSslValidation: c.SkipSSL} cfClient := cfclient.NewClient(cfConfig) var appInfoRepository cloudfoundry.AppInfoRepository if c.ResolveAppMetadata { appInfoRepository = cloudfoundry.NewAppInfoRepository(cfClient) } else { appInfoRepository = cloudfoundry.NullAppInfoRepository() } labelMaker := nozzle.NewLabelMaker(appInfoRepository) return &app{ logger: logger, c: c, cfConfig: cfConfig, cfClient: cfClient, labelMaker: labelMaker, heartbeater: heartbeater, } } type app struct { logger lager.Logger c *config.Config cfConfig *cfclient.Config cfClient *cfclient.Client labelMaker nozzle.LabelMaker heartbeater heartbeat.Heartbeater bufferEmpty func() bool } func (a *app) newProducer() cloudfoundry.Firehose { firehose := cloudfoundry.NewFirehose(a.cfConfig, a.cfClient, a.c.SubscriptionID) producer, err := filter.New(firehose, strings.Split(a.c.Events, ","), a.heartbeater) if err != nil { a.logger.Fatal("filter", err) } return producer } func (a *app) newConsumer(ctx context.Context) *nozzle.Nozzle { return &nozzle.Nozzle{ LogSink: a.newLogSink(), MetricSink: a.newMetricSink(ctx), Heartbeater: a.heartbeater, } } func (a *app) newLogSink() nozzle.Sink { logAdapter, logErrs := a.newLogAdapter() go func() { err := <-logErrs a.logger.Error("logAdapter", err) }() return nozzle.NewLogSink(a.labelMaker, logAdapter, a.c.NewlineToken) } func (a *app) newLogAdapter() (stackdriver.LogAdapter, <-chan error) { return stackdriver.NewLogAdapter( a.c.ProjectID, a.c.BatchCount, time.Duration(a.c.BatchDuration)*time.Second, a.heartbeater, ) } func (a *app) newMetricSink(ctx context.Context) nozzle.Sink { metricClient, err := stackdriver.NewMetricClient() if err != nil { a.logger.Fatal("metricClient", err) } metricAdapter, err := stackdriver.NewMetricAdapter(a.c.ProjectID, metricClient, a.heartbeater) if err != nil { a.logger.Error("metricAdapter", err) } metricBuffer, errs := stackdriver.NewAutoCulledMetricsBuffer(ctx, a.logger, time.Duration(a.c.MetricsBufferDuration)*time.Second, a.c.MetricsBufferSize, metricAdapter) a.bufferEmpty = metricBuffer.IsEmpty go func() { for err = range errs { a.logger.Error("metricsBuffer", err) } }() return nozzle.NewMetricSink(a.labelMaker, metricBuffer, nozzle.NewUnitParser()) }
// Copyright 2015 SteelSeries ApS. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This package implements a basic LISP interpretor for embedding in a go program for scripting. // This file contains the list set-like primitive functions. package golisp import ( "fmt" ) func RegisterListSetPrimitives() { MakePrimitiveFunction("union", -1, UnionImpl) MakePrimitiveFunction("intersection", -1, IntersectionImpl) } func memp(i *Data, l *Data) bool { for c := l; NotNilP(c); c = Cdr(c) { if IsEqual(i, Car(c)) { return true } } return false } func UnionImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) { var col *Data for a := args; NotNilP(a); a = Cdr(a) { col, err = Eval(Car(a), env) if err != nil { return } if !ListP(col) { err = ProcessError(fmt.Sprintf("union needs lists as its arguments, but got %s.", String(col)), env) return } for cell := col; NotNilP(cell); cell = Cdr(cell) { if !memp(Car(cell), result) { result = Append(result, Car(cell)) } } } return } func IntersectionImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) { var col *Data result = Car(args) for a := Cdr(args); NotNilP(a); a = Cdr(a) { col, err = Eval(Car(a), env) if err != nil { return } if !ListP(col) { err = ProcessError(fmt.Sprintf("union needs lists as its arguments, but got %s.", String(col)), env) return } for cell := result; NotNilP(cell); cell = Cdr(cell) { if !memp(Car(cell), col) { result = RemoveFromListBang(result, Car(cell)) } } } return } Fix bug in intersection // Copyright 2015 SteelSeries ApS. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This package implements a basic LISP interpretor for embedding in a go program for scripting. // This file contains the list set-like primitive functions. package golisp import ( "fmt" ) func RegisterListSetPrimitives() { MakePrimitiveFunction("union", -1, UnionImpl) MakePrimitiveFunction("intersection", -1, IntersectionImpl) } func memp(i *Data, l *Data) bool { for c := l; NotNilP(c); c = Cdr(c) { if IsEqual(i, Car(c)) { return true } } return false } func UnionImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) { var col *Data for a := args; NotNilP(a); a = Cdr(a) { col, err = Eval(Car(a), env) if err != nil { return } if !ListP(col) { err = ProcessError(fmt.Sprintf("union needs lists as its arguments, but got %s.", String(col)), env) return } for cell := col; NotNilP(cell); cell = Cdr(cell) { if !memp(Car(cell), result) { result = Append(result, Car(cell)) } } } return } func IntersectionImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) { var col *Data result, err = Eval(Car(args), env) if err != nil { return } if !ListP(result) { err = ProcessError(fmt.Sprintf("intersection needs lists as its arguments, but got %s.", String(result)), env) return } for a := Cdr(args); NotNilP(a); a = Cdr(a) { col, err = Eval(Car(a), env) if err != nil { return } if !ListP(col) { err = ProcessError(fmt.Sprintf("intersection needs lists as its arguments, but got %s.", String(col)), env) return } for cell := result; NotNilP(cell); cell = Cdr(cell) { if !memp(Car(cell), col) { result = RemoveFromListBang(result, Car(cell)) } } } return }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package persistentvolume import ( "fmt" "sort" "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/storage/etcd" coreinformers "k8s.io/client-go/informers/core/v1" storageinformers "k8s.io/client-go/informers/storage/v1" clientset "k8s.io/client-go/kubernetes" storagelisters "k8s.io/client-go/listers/storage/v1" "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) // SchedulerVolumeBinder is used by the scheduler to handle PVC/PV binding // and dynamic provisioning. The binding decisions are integrated into the pod scheduling // workflow so that the PV NodeAffinity is also considered along with the pod's other // scheduling requirements. // // This integrates into the existing default scheduler workflow as follows: // 1. The scheduler takes a Pod off the scheduler queue and processes it serially: // a. Invokes all predicate functions, parallelized across nodes. FindPodVolumes() is invoked here. // b. Invokes all priority functions. Future/TBD // c. Selects the best node for the Pod. // d. Cache the node selection for the Pod. AssumePodVolumes() is invoked here. // i. If PVC binding is required, cache in-memory only: // * For manual binding: update PV objects for prebinding to the corresponding PVCs. // * For dynamic provisioning: update PVC object with a selected node from c) // * For the pod, which PVCs and PVs need API updates. // ii. Afterwards, the main scheduler caches the Pod->Node binding in the scheduler's pod cache, // This is handled in the scheduler and not here. // e. Asynchronously bind volumes and pod in a separate goroutine // i. BindPodVolumes() is called first. It makes all the necessary API updates and waits for // PV controller to fully bind and provision the PVCs. If binding fails, the Pod is sent // back through the scheduler. // ii. After BindPodVolumes() is complete, then the scheduler does the final Pod->Node binding. // 2. Once all the assume operations are done in d), the scheduler processes the next Pod in the scheduler queue // while the actual binding operation occurs in the background. type SchedulerVolumeBinder interface { // FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the node. // // If a PVC is bound, it checks if the PV's NodeAffinity matches the Node. // Otherwise, it tries to find an available PV to bind to the PVC. // // It returns true if all of the Pod's PVCs have matching PVs or can be dynamic provisioned, // and returns true if bound volumes satisfy the PV NodeAffinity. // // This function is called by the volume binding scheduler predicate and can be called in parallel FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisified, boundVolumesSatisfied bool, err error) // AssumePodVolumes will: // 1. Take the PV matches for unbound PVCs and update the PV cache assuming // that the PV is prebound to the PVC. // 2. Take the PVCs that need provisioning and update the PVC cache with related // annotations set. // // It returns true if all volumes are fully bound // // This function will modify assumedPod with the node name. // This function is called serially. AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, err error) // BindPodVolumes will: // 1. Initiate the volume binding by making the API call to prebind the PV // to its matching PVC. // 2. Trigger the volume provisioning by making the API call to set related // annotations on the PVC // 3. Wait for PVCs to be completely bound by the PV controller // // This function can be called in parallel. BindPodVolumes(assumedPod *v1.Pod) error // GetBindingsCache returns the cache used (if any) to store volume binding decisions. GetBindingsCache() PodBindingCache } type volumeBinder struct { kubeClient clientset.Interface classLister storagelisters.StorageClassLister nodeInformer coreinformers.NodeInformer pvcCache PVCAssumeCache pvCache PVAssumeCache // Stores binding decisions that were made in FindPodVolumes for use in AssumePodVolumes. // AssumePodVolumes modifies the bindings again for use in BindPodVolumes. podBindingCache PodBindingCache // Amount of time to wait for the bind operation to succeed bindTimeout time.Duration } // NewVolumeBinder sets up all the caches needed for the scheduler to make volume binding decisions. func NewVolumeBinder( kubeClient clientset.Interface, nodeInformer coreinformers.NodeInformer, pvcInformer coreinformers.PersistentVolumeClaimInformer, pvInformer coreinformers.PersistentVolumeInformer, storageClassInformer storageinformers.StorageClassInformer, bindTimeout time.Duration) SchedulerVolumeBinder { b := &volumeBinder{ kubeClient: kubeClient, classLister: storageClassInformer.Lister(), nodeInformer: nodeInformer, pvcCache: NewPVCAssumeCache(pvcInformer.Informer()), pvCache: NewPVAssumeCache(pvInformer.Informer()), podBindingCache: NewPodBindingCache(), bindTimeout: bindTimeout, } return b } func (b *volumeBinder) GetBindingsCache() PodBindingCache { return b.podBindingCache } // FindPodVolumes caches the matching PVs and PVCs to provision per node in podBindingCache. // This method intentionally takes in a *v1.Node object instead of using volumebinder.nodeInformer. // That's necessary because some operations will need to pass in to the predicate fake node objects. func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatisfied bool, err error) { podName := getPodName(pod) // Warning: Below log needs high verbosity as it can be printed several times (#60933). klog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name) // Initialize to true for pods that don't have volumes unboundVolumesSatisfied = true boundVolumesSatisfied = true start := time.Now() defer func() { VolumeSchedulingStageLatency.WithLabelValues("predicate").Observe(time.Since(start).Seconds()) if err != nil { VolumeSchedulingStageFailed.WithLabelValues("predicate").Inc() } }() var ( matchedClaims []*bindingInfo provisionedClaims []*v1.PersistentVolumeClaim ) defer func() { // We recreate bindings for each new schedule loop. if len(matchedClaims) == 0 && len(provisionedClaims) == 0 { // Clear cache if no claims to bind or provision for this node. b.podBindingCache.ClearBindings(pod, node.Name) return } // Although we do not distinguish nil from empty in this function, for // easier testing, we normalize empty to nil. if len(matchedClaims) == 0 { matchedClaims = nil } if len(provisionedClaims) == 0 { provisionedClaims = nil } // Mark cache with all matched and provisioned claims for this node b.podBindingCache.UpdateBindings(pod, node.Name, matchedClaims, provisionedClaims) }() // The pod's volumes need to be processed in one call to avoid the race condition where // volumes can get bound/provisioned in between calls. boundClaims, claimsToBind, unboundClaimsImmediate, err := b.getPodVolumes(pod) if err != nil { return false, false, err } // Immediate claims should be bound if len(unboundClaimsImmediate) > 0 { return false, false, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims") } // Check PV node affinity on bound volumes if len(boundClaims) > 0 { boundVolumesSatisfied, err = b.checkBoundClaims(boundClaims, node, podName) if err != nil { return false, false, err } } // Find matching volumes and node for unbound claims if len(claimsToBind) > 0 { var ( claimsToFindMatching []*v1.PersistentVolumeClaim claimsToProvision []*v1.PersistentVolumeClaim ) // Filter out claims to provision for _, claim := range claimsToBind { if selectedNode, ok := claim.Annotations[annSelectedNode]; ok { if selectedNode != node.Name { // Fast path, skip unmatched node return false, boundVolumesSatisfied, nil } claimsToProvision = append(claimsToProvision, claim) } else { claimsToFindMatching = append(claimsToFindMatching, claim) } } // Find matching volumes if len(claimsToFindMatching) > 0 { var unboundClaims []*v1.PersistentVolumeClaim unboundVolumesSatisfied, matchedClaims, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, node) if err != nil { return false, false, err } claimsToProvision = append(claimsToProvision, unboundClaims...) } // Check for claims to provision if len(claimsToProvision) > 0 { unboundVolumesSatisfied, provisionedClaims, err = b.checkVolumeProvisions(pod, claimsToProvision, node) if err != nil { return false, false, err } } } return unboundVolumesSatisfied, boundVolumesSatisfied, nil } // AssumePodVolumes will take the cached matching PVs and PVCs to provision // in podBindingCache for the chosen node, and: // 1. Update the pvCache with the new prebound PV. // 2. Update the pvcCache with the new PVCs with annotations set // 3. Update podBindingCache again with cached API updates for PVs and PVCs. func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, err error) { podName := getPodName(assumedPod) klog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName) start := time.Now() defer func() { VolumeSchedulingStageLatency.WithLabelValues("assume").Observe(time.Since(start).Seconds()) if err != nil { VolumeSchedulingStageFailed.WithLabelValues("assume").Inc() } }() if allBound := b.arePodVolumesBound(assumedPod); allBound { klog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName) return true, nil } assumedPod.Spec.NodeName = nodeName claimsToBind := b.podBindingCache.GetBindings(assumedPod, nodeName) claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, nodeName) // Assume PV newBindings := []*bindingInfo{} for _, binding := range claimsToBind { newPV, dirty, err := GetBindVolumeToClaim(binding.pv, binding.pvc) klog.V(5).Infof("AssumePodVolumes: GetBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v", podName, binding.pv.Name, binding.pvc.Name, newPV, dirty, err) if err != nil { b.revertAssumedPVs(newBindings) return false, err } // TODO: can we assume everytime? if dirty { err = b.pvCache.Assume(newPV) if err != nil { b.revertAssumedPVs(newBindings) return false, err } } newBindings = append(newBindings, &bindingInfo{pv: newPV, pvc: binding.pvc}) } // Assume PVCs newProvisionedPVCs := []*v1.PersistentVolumeClaim{} for _, claim := range claimsToProvision { // The claims from method args can be pointing to watcher cache. We must not // modify these, therefore create a copy. claimClone := claim.DeepCopy() metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annSelectedNode, nodeName) err = b.pvcCache.Assume(claimClone) if err != nil { b.revertAssumedPVs(newBindings) b.revertAssumedPVCs(newProvisionedPVCs) return } newProvisionedPVCs = append(newProvisionedPVCs, claimClone) } // Update cache with the assumed pvcs and pvs // Even if length is zero, update the cache with an empty slice to indicate that no // operations are needed b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings, newProvisionedPVCs) return } // BindPodVolumes gets the cached bindings and PVCs to provision in podBindingCache, // makes the API update for those PVs/PVCs, and waits for the PVCs to be completely bound // by the PV controller. func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) (err error) { podName := getPodName(assumedPod) klog.V(4).Infof("BindPodVolumes for pod %q, node %q", podName, assumedPod.Spec.NodeName) start := time.Now() defer func() { VolumeSchedulingStageLatency.WithLabelValues("bind").Observe(time.Since(start).Seconds()) if err != nil { VolumeSchedulingStageFailed.WithLabelValues("bind").Inc() } }() bindings := b.podBindingCache.GetBindings(assumedPod, assumedPod.Spec.NodeName) claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, assumedPod.Spec.NodeName) // Start API operations err = b.bindAPIUpdate(podName, bindings, claimsToProvision) if err != nil { return err } return wait.Poll(time.Second, b.bindTimeout, func() (bool, error) { b, err := b.checkBindings(assumedPod, bindings, claimsToProvision) return b, err }) } func getPodName(pod *v1.Pod) string { return pod.Namespace + "/" + pod.Name } func getPVCName(pvc *v1.PersistentVolumeClaim) string { return pvc.Namespace + "/" + pvc.Name } // bindAPIUpdate gets the cached bindings and PVCs to provision in podBindingCache // and makes the API update for those PVs/PVCs. func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error { if bindings == nil { return fmt.Errorf("failed to get cached bindings for pod %q", podName) } if claimsToProvision == nil { return fmt.Errorf("failed to get cached claims to provision for pod %q", podName) } lastProcessedBinding := 0 lastProcessedProvisioning := 0 defer func() { // only revert assumed cached updates for volumes we haven't successfully bound if lastProcessedBinding < len(bindings) { b.revertAssumedPVs(bindings[lastProcessedBinding:]) } // only revert assumed cached updates for claims we haven't updated, if lastProcessedProvisioning < len(claimsToProvision) { b.revertAssumedPVCs(claimsToProvision[lastProcessedProvisioning:]) } }() var ( binding *bindingInfo i int claim *v1.PersistentVolumeClaim ) // Do the actual prebinding. Let the PV controller take care of the rest // There is no API rollback if the actual binding fails for _, binding = range bindings { klog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name) // TODO: does it hurt if we make an api call and nothing needs to be updated? claimKey := claimToClaimKey(binding.pvc) klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name) if newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(binding.pv); err != nil { klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err) return err } else { klog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", binding.pv.Name, claimKey) // Save updated object from apiserver for later checking. binding.pv = newPV } lastProcessedBinding++ } // Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest // PV controller is expect to signal back by removing related annotations if actual provisioning fails for i, claim = range claimsToProvision { klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim)) if newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil { return err } else { // Save updated object from apiserver for later checking. claimsToProvision[i] = newClaim } lastProcessedProvisioning++ } return nil } var ( versioner = etcd.APIObjectVersioner{} ) // checkBindings runs through all the PVCs in the Pod and checks: // * if the PVC is fully bound // * if there are any conditions that require binding to fail and be retried // // It returns true when all of the Pod's PVCs are fully bound, and error if // binding (and scheduling) needs to be retried // Note that it checks on API objects not PV/PVC cache, this is because // PV/PVC cache can be assumed again in main scheduler loop, we must check // latest state in API server which are shared with PV controller and // provisioners func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*bindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) (bool, error) { podName := getPodName(pod) if bindings == nil { return false, fmt.Errorf("failed to get cached bindings for pod %q", podName) } if claimsToProvision == nil { return false, fmt.Errorf("failed to get cached claims to provision for pod %q", podName) } node, err := b.nodeInformer.Lister().Get(pod.Spec.NodeName) if err != nil { return false, fmt.Errorf("failed to get node %q: %v", pod.Spec.NodeName, err) } // Check for any conditions that might require scheduling retry // When pod is removed from scheduling queue because of deletion or any // other reasons, binding operation should be cancelled. There is no need // to check PV/PVC bindings any more. // We check pod binding cache here which will be cleared when pod is // removed from scheduling queue. if b.podBindingCache.GetDecisions(pod) == nil { return false, fmt.Errorf("pod %q does not exist any more", podName) } for _, binding := range bindings { pv, err := b.pvCache.GetAPIPV(binding.pv.Name) if err != nil { return false, fmt.Errorf("failed to check binding: %v", err) } pvc, err := b.pvcCache.GetAPIPVC(getPVCName(binding.pvc)) if err != nil { return false, fmt.Errorf("failed to check binding: %v", err) } // Because we updated PV in apiserver, skip if API object is older // and wait for new API object propagated from apiserver. if versioner.CompareResourceVersion(binding.pv, pv) > 0 { return false, nil } // Check PV's node affinity (the node might not have the proper label) if err := volumeutil.CheckNodeAffinity(pv, node.Labels); err != nil { return false, fmt.Errorf("pv %q node affinity doesn't match node %q: %v", pv.Name, node.Name, err) } // Check if pv.ClaimRef got dropped by unbindVolume() if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID == "" { return false, fmt.Errorf("ClaimRef got reset for pv %q", pv.Name) } // Check if pvc is fully bound if !b.isPVCFullyBound(pvc) { return false, nil } } for _, claim := range claimsToProvision { pvc, err := b.pvcCache.GetAPIPVC(getPVCName(claim)) if err != nil { return false, fmt.Errorf("failed to check provisioning pvc: %v", err) } // Because we updated PVC in apiserver, skip if API object is older // and wait for new API object propagated from apiserver. if versioner.CompareResourceVersion(claim, pvc) > 0 { return false, nil } // Check if selectedNode annotation is still set if pvc.Annotations == nil { return false, fmt.Errorf("selectedNode annotation reset for PVC %q", pvc.Name) } selectedNode := pvc.Annotations[annSelectedNode] if selectedNode != pod.Spec.NodeName { return false, fmt.Errorf("selectedNode annotation value %q not set to scheduled node %q", selectedNode, pod.Spec.NodeName) } // If the PVC is bound to a PV, check its node affinity if pvc.Spec.VolumeName != "" { pv, err := b.pvCache.GetAPIPV(pvc.Spec.VolumeName) if err != nil { if _, ok := err.(*errNotFound); ok { // We tolerate NotFound error here, because PV is possibly // not found because of API delay, we can check next time. // And if PV does not exist because it's deleted, PVC will // be unbound eventually. return false, nil } else { return false, fmt.Errorf("failed to get pv %q from cache: %v", pvc.Spec.VolumeName, err) } } if err := volumeutil.CheckNodeAffinity(pv, node.Labels); err != nil { return false, fmt.Errorf("pv %q node affinity doesn't match node %q: %v", pv.Name, node.Name, err) } } // Check if pvc is fully bound if !b.isPVCFullyBound(pvc) { return false, nil } } // All pvs and pvcs that we operated on are bound klog.V(4).Infof("All PVCs for pod %q are bound", podName) return true, nil } func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume) (bool, *v1.PersistentVolumeClaim, error) { if vol.PersistentVolumeClaim == nil { return true, nil, nil } pvcName := vol.PersistentVolumeClaim.ClaimName return b.isPVCBound(namespace, pvcName) } func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.PersistentVolumeClaim, error) { claim := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, Namespace: namespace, }, } pvcKey := getPVCName(claim) pvc, err := b.pvcCache.GetPVC(pvcKey) if err != nil || pvc == nil { return false, nil, fmt.Errorf("error getting PVC %q: %v", pvcKey, err) } fullyBound := b.isPVCFullyBound(pvc) if fullyBound { klog.V(5).Infof("PVC %q is fully bound to PV %q", pvcKey, pvc.Spec.VolumeName) } else { if pvc.Spec.VolumeName != "" { klog.V(5).Infof("PVC %q is not fully bound to PV %q", pvcKey, pvc.Spec.VolumeName) } else { klog.V(5).Infof("PVC %q is not bound", pvcKey) } } return fullyBound, pvc, nil } func (b *volumeBinder) isPVCFullyBound(pvc *v1.PersistentVolumeClaim) bool { return pvc.Spec.VolumeName != "" && metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted) } // arePodVolumesBound returns true if all volumes are fully bound func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool { for _, vol := range pod.Spec.Volumes { if isBound, _, _ := b.isVolumeBound(pod.Namespace, &vol); !isBound { // Pod has at least one PVC that needs binding return false } } return true } // getPodVolumes returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning) // and unbound with immediate binding (including prebound) func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentVolumeClaim, unboundClaims []*v1.PersistentVolumeClaim, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) { boundClaims = []*v1.PersistentVolumeClaim{} unboundClaimsImmediate = []*v1.PersistentVolumeClaim{} unboundClaims = []*v1.PersistentVolumeClaim{} for _, vol := range pod.Spec.Volumes { volumeBound, pvc, err := b.isVolumeBound(pod.Namespace, &vol) if err != nil { return nil, nil, nil, err } if pvc == nil { continue } if volumeBound { boundClaims = append(boundClaims, pvc) } else { delayBindingMode, err := IsDelayBindingMode(pvc, b.classLister) if err != nil { return nil, nil, nil, err } // Prebound PVCs are treated as unbound immediate binding if delayBindingMode && pvc.Spec.VolumeName == "" { // Scheduler path unboundClaims = append(unboundClaims, pvc) } else { // !delayBindingMode || pvc.Spec.VolumeName != "" // Immediate binding should have already been bound unboundClaimsImmediate = append(unboundClaimsImmediate, pvc) } } } return boundClaims, unboundClaims, unboundClaimsImmediate, nil } func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, podName string) (bool, error) { for _, pvc := range claims { pvName := pvc.Spec.VolumeName pv, err := b.pvCache.GetPV(pvName) if err != nil { return false, err } err = volumeutil.CheckNodeAffinity(pv, node.Labels) if err != nil { klog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, podName, err) return false, nil } klog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName) } klog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name) return true, nil } // findMatchingVolumes tries to find matching volumes for given claims, // and return unbound claims for further provision. func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (foundMatches bool, matchedClaims []*bindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) { podName := getPodName(pod) // Sort all the claims by increasing size request to get the smallest fits sort.Sort(byPVCSize(claimsToBind)) chosenPVs := map[string]*v1.PersistentVolume{} foundMatches = true matchedClaims = []*bindingInfo{} for _, pvc := range claimsToBind { // Get storage class name from each PVC storageClassName := "" storageClass := pvc.Spec.StorageClassName if storageClass != nil { storageClassName = *storageClass } allPVs := b.pvCache.ListPVs(storageClassName) pvcName := getPVCName(pvc) // Find a matching PV pv, err := findMatchingVolume(pvc, allPVs, node, chosenPVs, true) if err != nil { return false, nil, nil, err } if pv == nil { klog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, pvcName, node.Name) unboundClaims = append(unboundClaims, pvc) foundMatches = false continue } // matching PV needs to be excluded so we don't select it again chosenPVs[pv.Name] = pv matchedClaims = append(matchedClaims, &bindingInfo{pv: pv, pvc: pvc}) klog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", pv.Name, pvcName, node.Name, podName) } if foundMatches { klog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name) } return } // checkVolumeProvisions checks given unbound claims (the claims have gone through func // findMatchingVolumes, and do not have matching volumes for binding), and return true // if all of the claims are eligible for dynamic provision. func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied bool, provisionedClaims []*v1.PersistentVolumeClaim, err error) { podName := getPodName(pod) provisionedClaims = []*v1.PersistentVolumeClaim{} for _, claim := range claimsToProvision { pvcName := getPVCName(claim) className := v1helper.GetPersistentVolumeClaimClass(claim) if className == "" { return false, nil, fmt.Errorf("no class for claim %q", pvcName) } class, err := b.classLister.Get(className) if err != nil { return false, nil, fmt.Errorf("failed to find storage class %q", className) } provisioner := class.Provisioner if provisioner == "" || provisioner == notSupportedProvisioner { klog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, pvcName) return false, nil, nil } // Check if the node can satisfy the topology requirement in the class if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) { klog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, pvcName) return false, nil, nil } // TODO: Check if capacity of the node domain in the storage class // can satisfy resource requirement of given claim provisionedClaims = append(provisionedClaims, claim) } klog.V(4).Infof("Provisioning for claims of pod %q that has no matching volumes on node %q ...", podName, node.Name) return true, provisionedClaims, nil } func (b *volumeBinder) revertAssumedPVs(bindings []*bindingInfo) { for _, bindingInfo := range bindings { b.pvCache.Restore(bindingInfo.pv.Name) } } func (b *volumeBinder) revertAssumedPVCs(claims []*v1.PersistentVolumeClaim) { for _, claim := range claims { b.pvcCache.Restore(getPVCName(claim)) } } type bindingInfo struct { // Claim that needs to be bound pvc *v1.PersistentVolumeClaim // Proposed PV to bind to this claim pv *v1.PersistentVolume } type byPVCSize []*v1.PersistentVolumeClaim func (a byPVCSize) Len() int { return len(a) } func (a byPVCSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byPVCSize) Less(i, j int) bool { iSize := a[i].Spec.Resources.Requests[v1.ResourceStorage] jSize := a[j].Spec.Resources.Requests[v1.ResourceStorage] // return true if iSize is less than jSize return iSize.Cmp(jSize) == -1 } Rename some varible and clean up codes in scheduler_binder.go /* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package persistentvolume import ( "fmt" "sort" "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/storage/etcd" coreinformers "k8s.io/client-go/informers/core/v1" storageinformers "k8s.io/client-go/informers/storage/v1" clientset "k8s.io/client-go/kubernetes" storagelisters "k8s.io/client-go/listers/storage/v1" "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) // SchedulerVolumeBinder is used by the scheduler to handle PVC/PV binding // and dynamic provisioning. The binding decisions are integrated into the pod scheduling // workflow so that the PV NodeAffinity is also considered along with the pod's other // scheduling requirements. // // This integrates into the existing default scheduler workflow as follows: // 1. The scheduler takes a Pod off the scheduler queue and processes it serially: // a. Invokes all predicate functions, parallelized across nodes. FindPodVolumes() is invoked here. // b. Invokes all priority functions. Future/TBD // c. Selects the best node for the Pod. // d. Cache the node selection for the Pod. AssumePodVolumes() is invoked here. // i. If PVC binding is required, cache in-memory only: // * For manual binding: update PV objects for prebinding to the corresponding PVCs. // * For dynamic provisioning: update PVC object with a selected node from c) // * For the pod, which PVCs and PVs need API updates. // ii. Afterwards, the main scheduler caches the Pod->Node binding in the scheduler's pod cache, // This is handled in the scheduler and not here. // e. Asynchronously bind volumes and pod in a separate goroutine // i. BindPodVolumes() is called first. It makes all the necessary API updates and waits for // PV controller to fully bind and provision the PVCs. If binding fails, the Pod is sent // back through the scheduler. // ii. After BindPodVolumes() is complete, then the scheduler does the final Pod->Node binding. // 2. Once all the assume operations are done in d), the scheduler processes the next Pod in the scheduler queue // while the actual binding operation occurs in the background. type SchedulerVolumeBinder interface { // FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the node. // // If a PVC is bound, it checks if the PV's NodeAffinity matches the Node. // Otherwise, it tries to find an available PV to bind to the PVC. // // It returns true if all of the Pod's PVCs have matching PVs or can be dynamic provisioned, // and returns true if bound volumes satisfy the PV NodeAffinity. // // This function is called by the volume binding scheduler predicate and can be called in parallel FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisified, boundVolumesSatisfied bool, err error) // AssumePodVolumes will: // 1. Take the PV matches for unbound PVCs and update the PV cache assuming // that the PV is prebound to the PVC. // 2. Take the PVCs that need provisioning and update the PVC cache with related // annotations set. // // It returns true if all volumes are fully bound // // This function will modify assumedPod with the node name. // This function is called serially. AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, err error) // BindPodVolumes will: // 1. Initiate the volume binding by making the API call to prebind the PV // to its matching PVC. // 2. Trigger the volume provisioning by making the API call to set related // annotations on the PVC // 3. Wait for PVCs to be completely bound by the PV controller // // This function can be called in parallel. BindPodVolumes(assumedPod *v1.Pod) error // GetBindingsCache returns the cache used (if any) to store volume binding decisions. GetBindingsCache() PodBindingCache } type volumeBinder struct { kubeClient clientset.Interface classLister storagelisters.StorageClassLister nodeInformer coreinformers.NodeInformer pvcCache PVCAssumeCache pvCache PVAssumeCache // Stores binding decisions that were made in FindPodVolumes for use in AssumePodVolumes. // AssumePodVolumes modifies the bindings again for use in BindPodVolumes. podBindingCache PodBindingCache // Amount of time to wait for the bind operation to succeed bindTimeout time.Duration } // NewVolumeBinder sets up all the caches needed for the scheduler to make volume binding decisions. func NewVolumeBinder( kubeClient clientset.Interface, nodeInformer coreinformers.NodeInformer, pvcInformer coreinformers.PersistentVolumeClaimInformer, pvInformer coreinformers.PersistentVolumeInformer, storageClassInformer storageinformers.StorageClassInformer, bindTimeout time.Duration) SchedulerVolumeBinder { b := &volumeBinder{ kubeClient: kubeClient, classLister: storageClassInformer.Lister(), nodeInformer: nodeInformer, pvcCache: NewPVCAssumeCache(pvcInformer.Informer()), pvCache: NewPVAssumeCache(pvInformer.Informer()), podBindingCache: NewPodBindingCache(), bindTimeout: bindTimeout, } return b } func (b *volumeBinder) GetBindingsCache() PodBindingCache { return b.podBindingCache } // FindPodVolumes caches the matching PVs and PVCs to provision per node in podBindingCache. // This method intentionally takes in a *v1.Node object instead of using volumebinder.nodeInformer. // That's necessary because some operations will need to pass in to the predicate fake node objects. func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatisfied bool, err error) { podName := getPodName(pod) // Warning: Below log needs high verbosity as it can be printed several times (#60933). klog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name) // Initialize to true for pods that don't have volumes unboundVolumesSatisfied = true boundVolumesSatisfied = true start := time.Now() defer func() { VolumeSchedulingStageLatency.WithLabelValues("predicate").Observe(time.Since(start).Seconds()) if err != nil { VolumeSchedulingStageFailed.WithLabelValues("predicate").Inc() } }() var ( matchedBindings []*bindingInfo provisionedClaims []*v1.PersistentVolumeClaim ) defer func() { // We recreate bindings for each new schedule loop. if len(matchedBindings) == 0 && len(provisionedClaims) == 0 { // Clear cache if no claims to bind or provision for this node. b.podBindingCache.ClearBindings(pod, node.Name) return } // Although we do not distinguish nil from empty in this function, for // easier testing, we normalize empty to nil. if len(matchedBindings) == 0 { matchedBindings = nil } if len(provisionedClaims) == 0 { provisionedClaims = nil } // Mark cache with all matched and provisioned claims for this node b.podBindingCache.UpdateBindings(pod, node.Name, matchedBindings, provisionedClaims) }() // The pod's volumes need to be processed in one call to avoid the race condition where // volumes can get bound/provisioned in between calls. boundClaims, claimsToBind, unboundClaimsImmediate, err := b.getPodVolumes(pod) if err != nil { return false, false, err } // Immediate claims should be bound if len(unboundClaimsImmediate) > 0 { return false, false, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims") } // Check PV node affinity on bound volumes if len(boundClaims) > 0 { boundVolumesSatisfied, err = b.checkBoundClaims(boundClaims, node, podName) if err != nil { return false, false, err } } // Find matching volumes and node for unbound claims if len(claimsToBind) > 0 { var ( claimsToFindMatching []*v1.PersistentVolumeClaim claimsToProvision []*v1.PersistentVolumeClaim ) // Filter out claims to provision for _, claim := range claimsToBind { if selectedNode, ok := claim.Annotations[annSelectedNode]; ok { if selectedNode != node.Name { // Fast path, skip unmatched node return false, boundVolumesSatisfied, nil } claimsToProvision = append(claimsToProvision, claim) } else { claimsToFindMatching = append(claimsToFindMatching, claim) } } // Find matching volumes if len(claimsToFindMatching) > 0 { var unboundClaims []*v1.PersistentVolumeClaim unboundVolumesSatisfied, matchedBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, node) if err != nil { return false, false, err } claimsToProvision = append(claimsToProvision, unboundClaims...) } // Check for claims to provision if len(claimsToProvision) > 0 { unboundVolumesSatisfied, provisionedClaims, err = b.checkVolumeProvisions(pod, claimsToProvision, node) if err != nil { return false, false, err } } } return unboundVolumesSatisfied, boundVolumesSatisfied, nil } // AssumePodVolumes will take the cached matching PVs and PVCs to provision // in podBindingCache for the chosen node, and: // 1. Update the pvCache with the new prebound PV. // 2. Update the pvcCache with the new PVCs with annotations set // 3. Update podBindingCache again with cached API updates for PVs and PVCs. func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, err error) { podName := getPodName(assumedPod) klog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName) start := time.Now() defer func() { VolumeSchedulingStageLatency.WithLabelValues("assume").Observe(time.Since(start).Seconds()) if err != nil { VolumeSchedulingStageFailed.WithLabelValues("assume").Inc() } }() if allBound := b.arePodVolumesBound(assumedPod); allBound { klog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName) return true, nil } assumedPod.Spec.NodeName = nodeName claimsToBind := b.podBindingCache.GetBindings(assumedPod, nodeName) claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, nodeName) // Assume PV newBindings := []*bindingInfo{} for _, binding := range claimsToBind { newPV, dirty, err := GetBindVolumeToClaim(binding.pv, binding.pvc) klog.V(5).Infof("AssumePodVolumes: GetBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v", podName, binding.pv.Name, binding.pvc.Name, newPV, dirty, err) if err != nil { b.revertAssumedPVs(newBindings) return false, err } // TODO: can we assume everytime? if dirty { err = b.pvCache.Assume(newPV) if err != nil { b.revertAssumedPVs(newBindings) return false, err } } newBindings = append(newBindings, &bindingInfo{pv: newPV, pvc: binding.pvc}) } // Assume PVCs newProvisionedPVCs := []*v1.PersistentVolumeClaim{} for _, claim := range claimsToProvision { // The claims from method args can be pointing to watcher cache. We must not // modify these, therefore create a copy. claimClone := claim.DeepCopy() metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annSelectedNode, nodeName) err = b.pvcCache.Assume(claimClone) if err != nil { b.revertAssumedPVs(newBindings) b.revertAssumedPVCs(newProvisionedPVCs) return } newProvisionedPVCs = append(newProvisionedPVCs, claimClone) } // Update cache with the assumed pvcs and pvs // Even if length is zero, update the cache with an empty slice to indicate that no // operations are needed b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings, newProvisionedPVCs) return } // BindPodVolumes gets the cached bindings and PVCs to provision in podBindingCache, // makes the API update for those PVs/PVCs, and waits for the PVCs to be completely bound // by the PV controller. func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) (err error) { podName := getPodName(assumedPod) klog.V(4).Infof("BindPodVolumes for pod %q, node %q", podName, assumedPod.Spec.NodeName) start := time.Now() defer func() { VolumeSchedulingStageLatency.WithLabelValues("bind").Observe(time.Since(start).Seconds()) if err != nil { VolumeSchedulingStageFailed.WithLabelValues("bind").Inc() } }() bindings := b.podBindingCache.GetBindings(assumedPod, assumedPod.Spec.NodeName) claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, assumedPod.Spec.NodeName) // Start API operations err = b.bindAPIUpdate(podName, bindings, claimsToProvision) if err != nil { return err } return wait.Poll(time.Second, b.bindTimeout, func() (bool, error) { b, err := b.checkBindings(assumedPod, bindings, claimsToProvision) return b, err }) } func getPodName(pod *v1.Pod) string { return pod.Namespace + "/" + pod.Name } func getPVCName(pvc *v1.PersistentVolumeClaim) string { return pvc.Namespace + "/" + pvc.Name } // bindAPIUpdate gets the cached bindings and PVCs to provision in podBindingCache // and makes the API update for those PVs/PVCs. func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error { if bindings == nil { return fmt.Errorf("failed to get cached bindings for pod %q", podName) } if claimsToProvision == nil { return fmt.Errorf("failed to get cached claims to provision for pod %q", podName) } lastProcessedBinding := 0 lastProcessedProvisioning := 0 defer func() { // only revert assumed cached updates for volumes we haven't successfully bound if lastProcessedBinding < len(bindings) { b.revertAssumedPVs(bindings[lastProcessedBinding:]) } // only revert assumed cached updates for claims we haven't updated, if lastProcessedProvisioning < len(claimsToProvision) { b.revertAssumedPVCs(claimsToProvision[lastProcessedProvisioning:]) } }() var ( binding *bindingInfo i int claim *v1.PersistentVolumeClaim ) // Do the actual prebinding. Let the PV controller take care of the rest // There is no API rollback if the actual binding fails for _, binding = range bindings { klog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name) // TODO: does it hurt if we make an api call and nothing needs to be updated? claimKey := claimToClaimKey(binding.pvc) klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name) if newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(binding.pv); err != nil { klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err) return err } else { klog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", binding.pv.Name, claimKey) // Save updated object from apiserver for later checking. binding.pv = newPV } lastProcessedBinding++ } // Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest // PV controller is expect to signal back by removing related annotations if actual provisioning fails for i, claim = range claimsToProvision { klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim)) if newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil { return err } else { // Save updated object from apiserver for later checking. claimsToProvision[i] = newClaim } lastProcessedProvisioning++ } return nil } var ( versioner = etcd.APIObjectVersioner{} ) // checkBindings runs through all the PVCs in the Pod and checks: // * if the PVC is fully bound // * if there are any conditions that require binding to fail and be retried // // It returns true when all of the Pod's PVCs are fully bound, and error if // binding (and scheduling) needs to be retried // Note that it checks on API objects not PV/PVC cache, this is because // PV/PVC cache can be assumed again in main scheduler loop, we must check // latest state in API server which are shared with PV controller and // provisioners func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*bindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) (bool, error) { podName := getPodName(pod) if bindings == nil { return false, fmt.Errorf("failed to get cached bindings for pod %q", podName) } if claimsToProvision == nil { return false, fmt.Errorf("failed to get cached claims to provision for pod %q", podName) } node, err := b.nodeInformer.Lister().Get(pod.Spec.NodeName) if err != nil { return false, fmt.Errorf("failed to get node %q: %v", pod.Spec.NodeName, err) } // Check for any conditions that might require scheduling retry // When pod is removed from scheduling queue because of deletion or any // other reasons, binding operation should be cancelled. There is no need // to check PV/PVC bindings any more. // We check pod binding cache here which will be cleared when pod is // removed from scheduling queue. if b.podBindingCache.GetDecisions(pod) == nil { return false, fmt.Errorf("pod %q does not exist any more", podName) } for _, binding := range bindings { pv, err := b.pvCache.GetAPIPV(binding.pv.Name) if err != nil { return false, fmt.Errorf("failed to check binding: %v", err) } pvc, err := b.pvcCache.GetAPIPVC(getPVCName(binding.pvc)) if err != nil { return false, fmt.Errorf("failed to check binding: %v", err) } // Because we updated PV in apiserver, skip if API object is older // and wait for new API object propagated from apiserver. if versioner.CompareResourceVersion(binding.pv, pv) > 0 { return false, nil } // Check PV's node affinity (the node might not have the proper label) if err := volumeutil.CheckNodeAffinity(pv, node.Labels); err != nil { return false, fmt.Errorf("pv %q node affinity doesn't match node %q: %v", pv.Name, node.Name, err) } // Check if pv.ClaimRef got dropped by unbindVolume() if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID == "" { return false, fmt.Errorf("ClaimRef got reset for pv %q", pv.Name) } // Check if pvc is fully bound if !b.isPVCFullyBound(pvc) { return false, nil } } for _, claim := range claimsToProvision { pvc, err := b.pvcCache.GetAPIPVC(getPVCName(claim)) if err != nil { return false, fmt.Errorf("failed to check provisioning pvc: %v", err) } // Because we updated PVC in apiserver, skip if API object is older // and wait for new API object propagated from apiserver. if versioner.CompareResourceVersion(claim, pvc) > 0 { return false, nil } // Check if selectedNode annotation is still set if pvc.Annotations == nil { return false, fmt.Errorf("selectedNode annotation reset for PVC %q", pvc.Name) } selectedNode := pvc.Annotations[annSelectedNode] if selectedNode != pod.Spec.NodeName { return false, fmt.Errorf("selectedNode annotation value %q not set to scheduled node %q", selectedNode, pod.Spec.NodeName) } // If the PVC is bound to a PV, check its node affinity if pvc.Spec.VolumeName != "" { pv, err := b.pvCache.GetAPIPV(pvc.Spec.VolumeName) if err != nil { if _, ok := err.(*errNotFound); ok { // We tolerate NotFound error here, because PV is possibly // not found because of API delay, we can check next time. // And if PV does not exist because it's deleted, PVC will // be unbound eventually. return false, nil } else { return false, fmt.Errorf("failed to get pv %q from cache: %v", pvc.Spec.VolumeName, err) } } if err := volumeutil.CheckNodeAffinity(pv, node.Labels); err != nil { return false, fmt.Errorf("pv %q node affinity doesn't match node %q: %v", pv.Name, node.Name, err) } } // Check if pvc is fully bound if !b.isPVCFullyBound(pvc) { return false, nil } } // All pvs and pvcs that we operated on are bound klog.V(4).Infof("All PVCs for pod %q are bound", podName) return true, nil } func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume) (bool, *v1.PersistentVolumeClaim, error) { if vol.PersistentVolumeClaim == nil { return true, nil, nil } pvcName := vol.PersistentVolumeClaim.ClaimName return b.isPVCBound(namespace, pvcName) } func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.PersistentVolumeClaim, error) { claim := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, Namespace: namespace, }, } pvcKey := getPVCName(claim) pvc, err := b.pvcCache.GetPVC(pvcKey) if err != nil || pvc == nil { return false, nil, fmt.Errorf("error getting PVC %q: %v", pvcKey, err) } fullyBound := b.isPVCFullyBound(pvc) if fullyBound { klog.V(5).Infof("PVC %q is fully bound to PV %q", pvcKey, pvc.Spec.VolumeName) } else { if pvc.Spec.VolumeName != "" { klog.V(5).Infof("PVC %q is not fully bound to PV %q", pvcKey, pvc.Spec.VolumeName) } else { klog.V(5).Infof("PVC %q is not bound", pvcKey) } } return fullyBound, pvc, nil } func (b *volumeBinder) isPVCFullyBound(pvc *v1.PersistentVolumeClaim) bool { return pvc.Spec.VolumeName != "" && metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted) } // arePodVolumesBound returns true if all volumes are fully bound func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool { for _, vol := range pod.Spec.Volumes { if isBound, _, _ := b.isVolumeBound(pod.Namespace, &vol); !isBound { // Pod has at least one PVC that needs binding return false } } return true } // getPodVolumes returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning) // and unbound with immediate binding (including prebound) func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentVolumeClaim, unboundClaimsDelayBinding []*v1.PersistentVolumeClaim, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) { boundClaims = []*v1.PersistentVolumeClaim{} unboundClaimsImmediate = []*v1.PersistentVolumeClaim{} unboundClaimsDelayBinding = []*v1.PersistentVolumeClaim{} for _, vol := range pod.Spec.Volumes { volumeBound, pvc, err := b.isVolumeBound(pod.Namespace, &vol) if err != nil { return nil, nil, nil, err } if pvc == nil { continue } if volumeBound { boundClaims = append(boundClaims, pvc) } else { delayBindingMode, err := IsDelayBindingMode(pvc, b.classLister) if err != nil { return nil, nil, nil, err } // Prebound PVCs are treated as unbound immediate binding if delayBindingMode && pvc.Spec.VolumeName == "" { // Scheduler path unboundClaimsDelayBinding = append(unboundClaimsDelayBinding, pvc) } else { // !delayBindingMode || pvc.Spec.VolumeName != "" // Immediate binding should have already been bound unboundClaimsImmediate = append(unboundClaimsImmediate, pvc) } } } return boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate, nil } func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, podName string) (bool, error) { for _, pvc := range claims { pvName := pvc.Spec.VolumeName pv, err := b.pvCache.GetPV(pvName) if err != nil { return false, err } err = volumeutil.CheckNodeAffinity(pv, node.Labels) if err != nil { klog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, podName, err) return false, nil } klog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName) } klog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name) return true, nil } // findMatchingVolumes tries to find matching volumes for given claims, // and return unbound claims for further provision. func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (foundMatches bool, bindings []*bindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) { podName := getPodName(pod) // Sort all the claims by increasing size request to get the smallest fits sort.Sort(byPVCSize(claimsToBind)) chosenPVs := map[string]*v1.PersistentVolume{} foundMatches = true for _, pvc := range claimsToBind { // Get storage class name from each PVC storageClassName := "" storageClass := pvc.Spec.StorageClassName if storageClass != nil { storageClassName = *storageClass } allPVs := b.pvCache.ListPVs(storageClassName) pvcName := getPVCName(pvc) // Find a matching PV pv, err := findMatchingVolume(pvc, allPVs, node, chosenPVs, true) if err != nil { return false, nil, nil, err } if pv == nil { klog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, pvcName, node.Name) unboundClaims = append(unboundClaims, pvc) foundMatches = false continue } // matching PV needs to be excluded so we don't select it again chosenPVs[pv.Name] = pv bindings = append(bindings, &bindingInfo{pv: pv, pvc: pvc}) klog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", pv.Name, pvcName, node.Name, podName) } if foundMatches { klog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name) } return } // checkVolumeProvisions checks given unbound claims (the claims have gone through func // findMatchingVolumes, and do not have matching volumes for binding), and return true // if all of the claims are eligible for dynamic provision. func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied bool, provisionedClaims []*v1.PersistentVolumeClaim, err error) { podName := getPodName(pod) provisionedClaims = []*v1.PersistentVolumeClaim{} for _, claim := range claimsToProvision { pvcName := getPVCName(claim) className := v1helper.GetPersistentVolumeClaimClass(claim) if className == "" { return false, nil, fmt.Errorf("no class for claim %q", pvcName) } class, err := b.classLister.Get(className) if err != nil { return false, nil, fmt.Errorf("failed to find storage class %q", className) } provisioner := class.Provisioner if provisioner == "" || provisioner == notSupportedProvisioner { klog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, pvcName) return false, nil, nil } // Check if the node can satisfy the topology requirement in the class if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) { klog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, pvcName) return false, nil, nil } // TODO: Check if capacity of the node domain in the storage class // can satisfy resource requirement of given claim provisionedClaims = append(provisionedClaims, claim) } klog.V(4).Infof("Provisioning for claims of pod %q that has no matching volumes on node %q ...", podName, node.Name) return true, provisionedClaims, nil } func (b *volumeBinder) revertAssumedPVs(bindings []*bindingInfo) { for _, bindingInfo := range bindings { b.pvCache.Restore(bindingInfo.pv.Name) } } func (b *volumeBinder) revertAssumedPVCs(claims []*v1.PersistentVolumeClaim) { for _, claim := range claims { b.pvcCache.Restore(getPVCName(claim)) } } type bindingInfo struct { // Claim that needs to be bound pvc *v1.PersistentVolumeClaim // Proposed PV to bind to this claim pv *v1.PersistentVolume } type byPVCSize []*v1.PersistentVolumeClaim func (a byPVCSize) Len() int { return len(a) } func (a byPVCSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byPVCSize) Less(i, j int) bool { iSize := a[i].Spec.Resources.Requests[v1.ResourceStorage] jSize := a[j].Spec.Resources.Requests[v1.ResourceStorage] // return true if iSize is less than jSize return iSize.Cmp(jSize) == -1 }
/* Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package knativeserving import ( "context" "fmt" mf "github.com/manifestival/manifestival" "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/tools/cache" "knative.dev/pkg/controller" servingv1alpha1 "knative.dev/serving-operator/pkg/apis/serving/v1alpha1" listers "knative.dev/serving-operator/pkg/client/listers/serving/v1alpha1" "knative.dev/serving-operator/pkg/reconciler" "knative.dev/serving-operator/pkg/reconciler/knativeserving/common" "knative.dev/serving-operator/version" ) const ( finalizerName = "delete-knative-serving-manifest" creationChange = "creation" editChange = "edit" deletionChange = "deletion" ) // Reconciler implements controller.Reconciler for Knativeserving resources. type Reconciler struct { *reconciler.Base // Listers index properties about resources knativeServingLister listers.KnativeServingLister config mf.Manifest servings map[string]int64 // Platform-specific behavior to affect the transform platform common.Platforms } // Check that our Reconciler implements controller.Reconciler var _ controller.Reconciler = (*Reconciler)(nil) // Reconcile compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the Knativeserving resource // with the current status of the resource. func (r *Reconciler) Reconcile(ctx context.Context, key string) error { // Convert the namespace/name string into a distinct namespace and name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { r.Logger.Errorf("invalid resource key: %s", key) return nil } // Get the KnativeServing resource with this namespace/name. original, err := r.knativeServingLister.KnativeServings(namespace).Get(name) if apierrs.IsNotFound(err) { return nil } else if err != nil { r.Logger.Error(err, "Error getting KnativeServing") return err } if original.GetDeletionTimestamp() != nil { if _, ok := r.servings[key]; ok { delete(r.servings, key) r.StatsReporter.ReportKnativeservingChange(key, deletionChange) } return r.delete(original) } // Keep track of the number and generation of KnativeServings in the cluster. newGen := original.Generation if oldGen, ok := r.servings[key]; ok { if newGen > oldGen { r.StatsReporter.ReportKnativeservingChange(key, editChange) } else if newGen < oldGen { return fmt.Errorf("reconciling obsolete generation of KnativeServing %s: newGen = %d and oldGen = %d", key, newGen, oldGen) } } else { // No metrics are emitted when newGen > 1: the first reconciling of // a new operator on an existing KnativeServing resource. if newGen == 1 { r.StatsReporter.ReportKnativeservingChange(key, creationChange) } } r.servings[key] = newGen // Don't modify the informers copy. knativeServing := original.DeepCopy() // Reconcile this copy of the KnativeServing resource and then write back any status // updates regardless of whether the reconciliation errored out. reconcileErr := r.reconcile(ctx, knativeServing) if equality.Semantic.DeepEqual(original.Status, knativeServing.Status) { // If we didn't change anything then don't call updateStatus. // This is important because the copy we loaded from the informer's // cache may be stale and we don't want to overwrite a prior update // to status with this stale state. } else if err = r.updateStatus(knativeServing); err != nil { r.Logger.Warnw("Failed to update knativeServing status", zap.Error(err)) r.Recorder.Eventf(knativeServing, corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for KnativeServing %q: %v", knativeServing.Name, err) return err } if reconcileErr != nil { r.Recorder.Event(knativeServing, corev1.EventTypeWarning, "InternalError", reconcileErr.Error()) return reconcileErr } return nil } func (r *Reconciler) reconcile(ctx context.Context, ks *servingv1alpha1.KnativeServing) error { reqLogger := r.Logger.With(zap.String("Request.Namespace", ks.Namespace)).With("Request.Name", ks.Name) reqLogger.Infow("Reconciling KnativeServing", "status", ks.Status) stages := []func(*mf.Manifest, *servingv1alpha1.KnativeServing) error{ r.ensureFinalizer, r.initStatus, r.install, r.checkDeployments, r.deleteObsoleteResources, } manifest, err := r.transform(ks) if err != nil { ks.Status.MarkInstallFailed(err.Error()) return err } for _, stage := range stages { if err := stage(&manifest, ks); err != nil { return err } } reqLogger.Infow("Reconcile stages complete", "status", ks.Status) return nil } // Transform the resources func (r *Reconciler) transform(instance *servingv1alpha1.KnativeServing) (mf.Manifest, error) { r.Logger.Debug("Transforming manifest") transforms, err := r.platform.Transformers(r.KubeClientSet, instance, r.Logger) if err != nil { return mf.Manifest{}, err } return r.config.Transform(transforms...) } // Update the status subresource func (r *Reconciler) updateStatus(instance *servingv1alpha1.KnativeServing) error { afterUpdate, err := r.KnativeServingClientSet.OperatorV1alpha1().KnativeServings(instance.Namespace).UpdateStatus(instance) if err != nil { return err } // TODO: We shouldn't rely on mutability and return the updated entities from functions instead. afterUpdate.DeepCopyInto(instance) return nil } // Initialize status conditions func (r *Reconciler) initStatus(_ *mf.Manifest, instance *servingv1alpha1.KnativeServing) error { r.Logger.Debug("Initializing status") if len(instance.Status.Conditions) == 0 { instance.Status.InitializeConditions() if err := r.updateStatus(instance); err != nil { return err } } return nil } // Apply the manifest resources func (r *Reconciler) install(manifest *mf.Manifest, instance *servingv1alpha1.KnativeServing) error { r.Logger.Debug("Installing manifest") if err := manifest.Apply(); err != nil { instance.Status.MarkInstallFailed(err.Error()) return err } instance.Status.MarkInstallSucceeded() instance.Status.Version = version.Version return nil } // Check for all deployments available func (r *Reconciler) checkDeployments(manifest *mf.Manifest, instance *servingv1alpha1.KnativeServing) error { r.Logger.Debug("Checking deployments") available := func(d *appsv1.Deployment) bool { for _, c := range d.Status.Conditions { if c.Type == appsv1.DeploymentAvailable && c.Status == corev1.ConditionTrue { return true } } return false } for _, u := range manifest.Filter(mf.ByKind("Deployment")).Resources() { deployment, err := r.KubeClientSet.AppsV1().Deployments(u.GetNamespace()).Get(u.GetName(), metav1.GetOptions{}) if err != nil { instance.Status.MarkDeploymentsNotReady() if errors.IsNotFound(err) { return nil } return err } if !available(deployment) { instance.Status.MarkDeploymentsNotReady() return nil } } instance.Status.MarkDeploymentsAvailable() return nil } // ensureFinalizer attaches a "delete manifest" finalizer to the instance func (r *Reconciler) ensureFinalizer(manifest *mf.Manifest, instance *servingv1alpha1.KnativeServing) error { for _, finalizer := range instance.GetFinalizers() { if finalizer == finalizerName { return nil } } instance.SetFinalizers(append(instance.GetFinalizers(), finalizerName)) instance, err := r.KnativeServingClientSet.OperatorV1alpha1().KnativeServings(instance.Namespace).Update(instance) return err } // delete all the resources in the release manifest func (r *Reconciler) delete(instance *servingv1alpha1.KnativeServing) error { if len(instance.GetFinalizers()) == 0 || instance.GetFinalizers()[0] != finalizerName { return nil } if len(r.servings) == 0 { if err := r.config.Filter(mf.ByKind("Deployment")).Delete(); err != nil { return err } if err := r.config.Filter(mf.NoCRDs).Delete(); err != nil { return err } } // The deletionTimestamp might've changed. Fetch the resource again. refetched, err := r.knativeServingLister.KnativeServings(instance.Namespace).Get(instance.Name) if err != nil { return err } refetched.SetFinalizers(refetched.GetFinalizers()[1:]) _, err = r.KnativeServingClientSet.OperatorV1alpha1().KnativeServings(refetched.Namespace).Update(refetched) return err } // Delete obsolete resources from previous versions func (r *Reconciler) deleteObsoleteResources(manifest *mf.Manifest, instance *servingv1alpha1.KnativeServing) error { // istio-system resources from 0.3 resource := &unstructured.Unstructured{} resource.SetNamespace("istio-system") resource.SetName("knative-ingressgateway") resource.SetAPIVersion("v1") resource.SetKind("Service") if err := manifest.Client.Delete(resource); err != nil { return err } resource.SetAPIVersion("apps/v1") resource.SetKind("Deployment") if err := manifest.Client.Delete(resource); err != nil { return err } resource.SetAPIVersion("autoscaling/v1") resource.SetKind("HorizontalPodAutoscaler") if err := manifest.Client.Delete(resource); err != nil { return err } // config-controller from 0.5 resource.SetNamespace(instance.GetNamespace()) resource.SetName("config-controller") resource.SetAPIVersion("v1") resource.SetKind("ConfigMap") if err := manifest.Client.Delete(resource); err != nil { return err } return nil } Delete (Cluster)Role(Bindings) as a final cleanup step. (#359) * Delete roles and rolebindings as the final step of cleanup. This allows us to take advantage of the permissions granted to cluster admins when performing cleanup. If the approach in https://github.com/knative/serving-operator/pull/291 is approved, this will become necessary. Additionally, it makes sense to remove roles as the final step to allow human Operators to modify any resources they may have permissions on as a result of the Knative installation (that is, we should not remove any access until we are 'almost done' cleaning up). * Gofmt * More golang idiomatic var names. * Remove redundant All() /* Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package knativeserving import ( "context" "fmt" mf "github.com/manifestival/manifestival" "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/tools/cache" "knative.dev/pkg/controller" servingv1alpha1 "knative.dev/serving-operator/pkg/apis/serving/v1alpha1" listers "knative.dev/serving-operator/pkg/client/listers/serving/v1alpha1" "knative.dev/serving-operator/pkg/reconciler" "knative.dev/serving-operator/pkg/reconciler/knativeserving/common" "knative.dev/serving-operator/version" ) const ( finalizerName = "delete-knative-serving-manifest" creationChange = "creation" editChange = "edit" deletionChange = "deletion" ) // Reconciler implements controller.Reconciler for Knativeserving resources. type Reconciler struct { *reconciler.Base // Listers index properties about resources knativeServingLister listers.KnativeServingLister config mf.Manifest servings map[string]int64 // Platform-specific behavior to affect the transform platform common.Platforms } // Check that our Reconciler implements controller.Reconciler var _ controller.Reconciler = (*Reconciler)(nil) // Reconcile compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the Knativeserving resource // with the current status of the resource. func (r *Reconciler) Reconcile(ctx context.Context, key string) error { // Convert the namespace/name string into a distinct namespace and name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { r.Logger.Errorf("invalid resource key: %s", key) return nil } // Get the KnativeServing resource with this namespace/name. original, err := r.knativeServingLister.KnativeServings(namespace).Get(name) if apierrs.IsNotFound(err) { return nil } else if err != nil { r.Logger.Error(err, "Error getting KnativeServing") return err } if original.GetDeletionTimestamp() != nil { if _, ok := r.servings[key]; ok { delete(r.servings, key) r.StatsReporter.ReportKnativeservingChange(key, deletionChange) } return r.delete(original) } // Keep track of the number and generation of KnativeServings in the cluster. newGen := original.Generation if oldGen, ok := r.servings[key]; ok { if newGen > oldGen { r.StatsReporter.ReportKnativeservingChange(key, editChange) } else if newGen < oldGen { return fmt.Errorf("reconciling obsolete generation of KnativeServing %s: newGen = %d and oldGen = %d", key, newGen, oldGen) } } else { // No metrics are emitted when newGen > 1: the first reconciling of // a new operator on an existing KnativeServing resource. if newGen == 1 { r.StatsReporter.ReportKnativeservingChange(key, creationChange) } } r.servings[key] = newGen // Don't modify the informers copy. knativeServing := original.DeepCopy() // Reconcile this copy of the KnativeServing resource and then write back any status // updates regardless of whether the reconciliation errored out. reconcileErr := r.reconcile(ctx, knativeServing) if equality.Semantic.DeepEqual(original.Status, knativeServing.Status) { // If we didn't change anything then don't call updateStatus. // This is important because the copy we loaded from the informer's // cache may be stale and we don't want to overwrite a prior update // to status with this stale state. } else if err = r.updateStatus(knativeServing); err != nil { r.Logger.Warnw("Failed to update knativeServing status", zap.Error(err)) r.Recorder.Eventf(knativeServing, corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for KnativeServing %q: %v", knativeServing.Name, err) return err } if reconcileErr != nil { r.Recorder.Event(knativeServing, corev1.EventTypeWarning, "InternalError", reconcileErr.Error()) return reconcileErr } return nil } func (r *Reconciler) reconcile(ctx context.Context, ks *servingv1alpha1.KnativeServing) error { reqLogger := r.Logger.With(zap.String("Request.Namespace", ks.Namespace)).With("Request.Name", ks.Name) reqLogger.Infow("Reconciling KnativeServing", "status", ks.Status) stages := []func(*mf.Manifest, *servingv1alpha1.KnativeServing) error{ r.ensureFinalizer, r.initStatus, r.install, r.checkDeployments, r.deleteObsoleteResources, } manifest, err := r.transform(ks) if err != nil { ks.Status.MarkInstallFailed(err.Error()) return err } for _, stage := range stages { if err := stage(&manifest, ks); err != nil { return err } } reqLogger.Infow("Reconcile stages complete", "status", ks.Status) return nil } // Transform the resources func (r *Reconciler) transform(instance *servingv1alpha1.KnativeServing) (mf.Manifest, error) { r.Logger.Debug("Transforming manifest") transforms, err := r.platform.Transformers(r.KubeClientSet, instance, r.Logger) if err != nil { return mf.Manifest{}, err } return r.config.Transform(transforms...) } // Update the status subresource func (r *Reconciler) updateStatus(instance *servingv1alpha1.KnativeServing) error { afterUpdate, err := r.KnativeServingClientSet.OperatorV1alpha1().KnativeServings(instance.Namespace).UpdateStatus(instance) if err != nil { return err } // TODO: We shouldn't rely on mutability and return the updated entities from functions instead. afterUpdate.DeepCopyInto(instance) return nil } // Initialize status conditions func (r *Reconciler) initStatus(_ *mf.Manifest, instance *servingv1alpha1.KnativeServing) error { r.Logger.Debug("Initializing status") if len(instance.Status.Conditions) == 0 { instance.Status.InitializeConditions() if err := r.updateStatus(instance); err != nil { return err } } return nil } // Apply the manifest resources func (r *Reconciler) install(manifest *mf.Manifest, instance *servingv1alpha1.KnativeServing) error { r.Logger.Debug("Installing manifest") if err := manifest.Apply(); err != nil { instance.Status.MarkInstallFailed(err.Error()) return err } instance.Status.MarkInstallSucceeded() instance.Status.Version = version.Version return nil } // Check for all deployments available func (r *Reconciler) checkDeployments(manifest *mf.Manifest, instance *servingv1alpha1.KnativeServing) error { r.Logger.Debug("Checking deployments") available := func(d *appsv1.Deployment) bool { for _, c := range d.Status.Conditions { if c.Type == appsv1.DeploymentAvailable && c.Status == corev1.ConditionTrue { return true } } return false } for _, u := range manifest.Filter(mf.ByKind("Deployment")).Resources() { deployment, err := r.KubeClientSet.AppsV1().Deployments(u.GetNamespace()).Get(u.GetName(), metav1.GetOptions{}) if err != nil { instance.Status.MarkDeploymentsNotReady() if errors.IsNotFound(err) { return nil } return err } if !available(deployment) { instance.Status.MarkDeploymentsNotReady() return nil } } instance.Status.MarkDeploymentsAvailable() return nil } // ensureFinalizer attaches a "delete manifest" finalizer to the instance func (r *Reconciler) ensureFinalizer(manifest *mf.Manifest, instance *servingv1alpha1.KnativeServing) error { for _, finalizer := range instance.GetFinalizers() { if finalizer == finalizerName { return nil } } instance.SetFinalizers(append(instance.GetFinalizers(), finalizerName)) instance, err := r.KnativeServingClientSet.OperatorV1alpha1().KnativeServings(instance.Namespace).Update(instance) return err } // delete all the resources in the release manifest func (r *Reconciler) delete(instance *servingv1alpha1.KnativeServing) error { if len(instance.GetFinalizers()) == 0 || instance.GetFinalizers()[0] != finalizerName { return nil } var RBAC = mf.Any(mf.ByKind("Role"), mf.ByKind("ClusterRole"), mf.ByKind("RoleBinding"), mf.ByKind("ClusterRoleBinding")) if len(r.servings) == 0 { if err := r.config.Filter(mf.ByKind("Deployment")).Delete(); err != nil { return err } if err := r.config.Filter(mf.NoCRDs, mf.None(RBAC)).Delete(); err != nil { return err } // Delete Roles last, as they may be useful for human operators to clean up. if err := r.config.Filter(RBAC).Delete(); err != nil { return err } } // The deletionTimestamp might've changed. Fetch the resource again. refetched, err := r.knativeServingLister.KnativeServings(instance.Namespace).Get(instance.Name) if err != nil { return err } refetched.SetFinalizers(refetched.GetFinalizers()[1:]) _, err = r.KnativeServingClientSet.OperatorV1alpha1().KnativeServings(refetched.Namespace).Update(refetched) return err } // Delete obsolete resources from previous versions func (r *Reconciler) deleteObsoleteResources(manifest *mf.Manifest, instance *servingv1alpha1.KnativeServing) error { // istio-system resources from 0.3 resource := &unstructured.Unstructured{} resource.SetNamespace("istio-system") resource.SetName("knative-ingressgateway") resource.SetAPIVersion("v1") resource.SetKind("Service") if err := manifest.Client.Delete(resource); err != nil { return err } resource.SetAPIVersion("apps/v1") resource.SetKind("Deployment") if err := manifest.Client.Delete(resource); err != nil { return err } resource.SetAPIVersion("autoscaling/v1") resource.SetKind("HorizontalPodAutoscaler") if err := manifest.Client.Delete(resource); err != nil { return err } // config-controller from 0.5 resource.SetNamespace(instance.GetNamespace()) resource.SetName("config-controller") resource.SetAPIVersion("v1") resource.SetKind("ConfigMap") if err := manifest.Client.Delete(resource); err != nil { return err } return nil }
package migrations import . "github.com/grafana/grafana/pkg/services/sqlstore/migrator" func addDashboardSnapshotMigrations(mg *Migrator) { snapshotV4 := Table{ Name: "dashboard_snapshot", Columns: []*Column{ {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, {Name: "name", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "key", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "dashboard", Type: DB_Text, Nullable: false}, {Name: "expires", Type: DB_DateTime, Nullable: false}, {Name: "created", Type: DB_DateTime, Nullable: false}, {Name: "updated", Type: DB_DateTime, Nullable: false}, }, Indices: []*Index{ {Cols: []string{"key"}, Type: UniqueIndex}, }, } // add v4 mg.AddMigration("create dashboard_snapshot table v4", NewAddTableMigration(snapshotV4)) mg.AddMigration("drop table dashboard_snapshot_v4 #1", NewDropTableMigration("dashboard_snapshot")) snapshotV5 := Table{ Name: "dashboard_snapshot", Columns: []*Column{ {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, {Name: "name", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "key", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "delete_key", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "org_id", Type: DB_BigInt, Nullable: false}, {Name: "user_id", Type: DB_BigInt, Nullable: false}, {Name: "external", Type: DB_Bool, Nullable: false}, {Name: "external_url", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "dashboard", Type: DB_Text, Nullable: false}, {Name: "expires", Type: DB_DateTime, Nullable: false}, {Name: "created", Type: DB_DateTime, Nullable: false}, {Name: "updated", Type: DB_DateTime, Nullable: false}, }, Indices: []*Index{ {Cols: []string{"key"}, Type: UniqueIndex}, {Cols: []string{"delete_key"}, Type: UniqueIndex}, {Cols: []string{"user_id"}}, }, } mg.AddMigration("create dashboard_snapshot table v5 #2", NewAddTableMigration(snapshotV5)) addTableIndicesMigrations(mg, "v5", snapshotV5) // ncrease data type mg.AddMigration("alter dashboard_snapshot.data to mediumtext v1", new(RawSqlMigration). Sqlite("SELECT 0 WHERE 0;"). Postgres("SELECT 0;"). Mysql("ALTER TABLE dashboard_snapshot.data MODIFY data MEDIUMTEXT;")) } fixes #1880 correct mysql statement for modifying column data type package migrations import . "github.com/grafana/grafana/pkg/services/sqlstore/migrator" func addDashboardSnapshotMigrations(mg *Migrator) { snapshotV4 := Table{ Name: "dashboard_snapshot", Columns: []*Column{ {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, {Name: "name", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "key", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "dashboard", Type: DB_Text, Nullable: false}, {Name: "expires", Type: DB_DateTime, Nullable: false}, {Name: "created", Type: DB_DateTime, Nullable: false}, {Name: "updated", Type: DB_DateTime, Nullable: false}, }, Indices: []*Index{ {Cols: []string{"key"}, Type: UniqueIndex}, }, } // add v4 mg.AddMigration("create dashboard_snapshot table v4", NewAddTableMigration(snapshotV4)) mg.AddMigration("drop table dashboard_snapshot_v4 #1", NewDropTableMigration("dashboard_snapshot")) snapshotV5 := Table{ Name: "dashboard_snapshot", Columns: []*Column{ {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, {Name: "name", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "key", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "delete_key", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "org_id", Type: DB_BigInt, Nullable: false}, {Name: "user_id", Type: DB_BigInt, Nullable: false}, {Name: "external", Type: DB_Bool, Nullable: false}, {Name: "external_url", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "dashboard", Type: DB_Text, Nullable: false}, {Name: "expires", Type: DB_DateTime, Nullable: false}, {Name: "created", Type: DB_DateTime, Nullable: false}, {Name: "updated", Type: DB_DateTime, Nullable: false}, }, Indices: []*Index{ {Cols: []string{"key"}, Type: UniqueIndex}, {Cols: []string{"delete_key"}, Type: UniqueIndex}, {Cols: []string{"user_id"}}, }, } mg.AddMigration("create dashboard_snapshot table v5 #2", NewAddTableMigration(snapshotV5)) addTableIndicesMigrations(mg, "v5", snapshotV5) // ncrease data type mg.AddMigration("alter dashboard_snapshot.data to mediumtext v1", new(RawSqlMigration). Sqlite("SELECT 0 WHERE 0;"). Postgres("SELECT 0;"). Mysql("ALTER TABLE dashboard_snapshot MODIFY data MEDIUMTEXT;")) }
package component import ( "github.com/jcelliott/lumber" "github.com/nanobox-io/golang-docker-client" "github.com/nanobox-io/nanobox/models" "github.com/nanobox-io/nanobox/util" "github.com/nanobox-io/nanobox/util/display" ) // Clean purges any components in a dirty or incomplete state func Clean(appModel *models.App) error { // fetch all of the app components components, err := appModel.Components() if err != nil { lumber.Error("component:Clean:models.App{ID:%s}.Components(): %s", appModel.ID, err.Error()) return util.ErrorAppend(err, "failed to fetch app component collection") } if !areComponentsDirty(components) { return nil } display.OpenContext("Cleaning dirty components") defer display.CloseContext() // iterate through the components and clean them for _, componentModel := range components { if err := cleanComponent(appModel, componentModel); err != nil { return util.ErrorAppend(err, "failed to clean component") } } return nil } // cleanComponent will clean a component if it was left in a bad state func cleanComponent(appModel *models.App, componentModel *models.Component) error { // short-circuit if the component is not dirty if !isComponentDirty(componentModel) { return nil } if err := Destroy(appModel, componentModel); err != nil { return util.ErrorAppend(err, "failed to remove component") } return nil } // areComponentsDirty checks to see if any of the components are dirty func areComponentsDirty(componentModels []*models.Component) bool { for _, componentModel := range componentModels { if isComponentDirty(componentModel) { return true } } return false } // isComponentDirty returns true if the container is removed or in a bad state func isComponentDirty(componentModel *models.Component) bool { // short-circuit if this service never made it to active if componentModel.State != "active" { return true } // check to see if the container exists _, err := docker.GetContainer(componentModel.ID) return err != nil } server down will no longer trigger a dirty component package component import ( "strings" "github.com/jcelliott/lumber" "github.com/nanobox-io/golang-docker-client" "github.com/nanobox-io/nanobox/models" "github.com/nanobox-io/nanobox/util" "github.com/nanobox-io/nanobox/util/display" ) // Clean purges any components in a dirty or incomplete state func Clean(appModel *models.App) error { // fetch all of the app components components, err := appModel.Components() if err != nil { lumber.Error("component:Clean:models.App{ID:%s}.Components(): %s", appModel.ID, err.Error()) return util.ErrorAppend(err, "failed to fetch app component collection") } if !areComponentsDirty(components) { return nil } display.OpenContext("Cleaning dirty components") defer display.CloseContext() // iterate through the components and clean them for _, componentModel := range components { if err := cleanComponent(appModel, componentModel); err != nil { return util.ErrorAppend(err, "failed to clean component") } } return nil } // cleanComponent will clean a component if it was left in a bad state func cleanComponent(appModel *models.App, componentModel *models.Component) error { // short-circuit if the component is not dirty if !isComponentDirty(componentModel) { return nil } if err := Destroy(appModel, componentModel); err != nil { return util.ErrorAppend(err, "failed to remove component") } return nil } // areComponentsDirty checks to see if any of the components are dirty func areComponentsDirty(componentModels []*models.Component) bool { for _, componentModel := range componentModels { if isComponentDirty(componentModel) { return true } } return false } // isComponentDirty returns true if the container is removed or in a bad state func isComponentDirty(componentModel *models.Component) bool { // short-circuit if this service never made it to active if componentModel.State != "active" { return true } // check to see if the container exists _, err := docker.GetContainer(componentModel.ID) if err != nil { if strings.Contains(err.Error(), "host is down") { return false } return true } return false }
// Copyright (c) Microsoft Open Technologies, Inc. // All Rights Reserved. // Licensed under the Apache License, Version 2.0. // See License.txt in the project root for license information. package driver_restapi import ( "bytes" "crypto/md5" "encoding/base64" "encoding/xml" "fmt" "github.com/MSOpenTech/packer-azure/packer/builder/azure/driver_restapi/constants" "io/ioutil" "log" "os" "os/exec" "os/user" "path/filepath" "runtime" "strings" ) type SubscriptionInfo struct { Id string CertData []byte } type publishData struct { PublishProfile publishProfile `xml:"PublishProfile"` } type publishProfile struct { SchemaVersion string `xml:",attr"` PublishMethod string `xml:",attr"` Url string `xml:",attr"` ManagementCertificate string `xml:",attr"` Subscriptions []subscription `xml:"Subscription"` } type subscription struct { ServiceManagementUrl string `xml:",attr"` Id string `xml:",attr"` Name string `xml:",attr"` ManagementCertificate string `xml:",attr"` } func ParsePublishSettings(path string, subscriptionName string) (*SubscriptionInfo, error) { var err error if _, err = os.Stat(path); err != nil { err = fmt.Errorf("ParsePublishSettings: '%v' check the path is correct.", path) return nil, err } if len(subscriptionName) == 0 { err = fmt.Errorf("ParsePublishSettings: '%v' subscriptionName is empty.", subscriptionName) return nil, err } log.Println(fmt.Sprintf("Reading file %s", path)) var fileData []byte fileData, err = ioutil.ReadFile(path) if err != nil { return nil, err } log.Println(fmt.Sprintf("culculating md5...")) fileSumMd5Digest := fmt.Sprintf("%x", md5.Sum(fileData)) publishData := publishData{} log.Println(fmt.Sprintf("parsing public settings...")) err = xml.Unmarshal(fileData, &publishData) if err != nil { return nil, err } if len(publishData.PublishProfile.Subscriptions) == 0 { err = fmt.Errorf("ParsePublishSettings: Subscriptions section is empty.") return nil, err } id := "none" certBase64 := publishData.PublishProfile.ManagementCertificate log.Println(fmt.Sprintf("looking for subscription info...")) for _, s := range publishData.PublishProfile.Subscriptions { if s.Name == subscriptionName { if len(s.Id) > 0 { id = s.Id } else { err = fmt.Errorf("ParsePublishSettings: subscription id is empty.") return nil, err } if len(s.ManagementCertificate) > 0 { certBase64 = s.ManagementCertificate } else if len(certBase64) == 0 { err = fmt.Errorf("ParsePublishSettings: ManagementCertificate is empty.") return nil, err } break } } if id == "none" { err = fmt.Errorf("ParsePublishSettings: Can't find subscriptionName '%v' in the file '%v'.", subscriptionName, path) return nil, err } log.Println(fmt.Sprintf("checking certificate...")) packerSubscriptionStoreDirName := ".packer_azure" var usrHome string if runtime.GOOS == constants.Windows { usrHome = os.TempDir() } else { log.Println(fmt.Sprintf("getting user home dir...")) // on Windows this operation takes too long (3+ minutes) usr, err := user.Current() if err != nil { return nil, err } usrHome = usr.HomeDir } log.Println(usrHome) packerSubscriptionStoreDirPath := filepath.Join(usrHome, packerSubscriptionStoreDirName) subscrPath := filepath.Join(packerSubscriptionStoreDirPath, id) tagFilePath := filepath.Join(subscrPath, ".tag") var modeDir os.FileMode = 0700 var modeFile os.FileMode = 0600 if _, err = os.Stat(packerSubscriptionStoreDirPath); err != nil { // create storage dir log.Println(fmt.Sprintf("creating packer folder...")) err = os.Mkdir(packerSubscriptionStoreDirPath, modeDir) if err != nil { return nil, err } } if _, err = os.Stat(subscrPath); err != nil { // create subscr dir log.Println(fmt.Sprintf("creating subscription folder...")) err = os.Mkdir(subscrPath, modeDir) if err != nil { return nil, err } } renewCert := false if _, err = os.Stat(tagFilePath); err != nil { renewCert = true } else { // read tag file log.Println(fmt.Sprintf("reading tag file...")) tagFileData, err := ioutil.ReadFile(tagFilePath) if err != nil { return nil, err } if string(tagFileData) != fileSumMd5Digest { renewCert = true } } certPemFilename := "cert.pem" certPemPath := filepath.Join(subscrPath, certPemFilename) if renewCert { log.Println("creating pemfile...") // put tag file here err := ioutil.WriteFile(tagFilePath, []byte(fileSumMd5Digest), modeFile) if err != nil { return nil, err } certPfxFilename := "cert.pfx" certPfxPath := filepath.Join(subscrPath, certPfxFilename) decBytes, err := base64.StdEncoding.DecodeString(certBase64) if err != nil { return nil, err } // Save data as pfx file err = ioutil.WriteFile(certPfxPath, decBytes, modeFile) if err != nil { return nil, err } // Find openssl progName := "openssl" binary, err := exec.LookPath(progName) if err != nil { err := fmt.Errorf("Can't find '%s' programm: %s", progName, err.Error()) return nil, err } if runtime.GOOS == constants.Linux { log.Println("executing openssl") err = Exec(binary, "pkcs12", "-in", certPfxPath, "-out", certPemPath, "-nodes", "-passin", "pass:") if err != nil { return nil, err } } else if runtime.GOOS == constants.Windows { var blockBuffer bytes.Buffer blockBuffer.WriteString("Invoke-Command -scriptblock {") blockBuffer.WriteString("$binary = '" + binary + "';") blockBuffer.WriteString("$cert_pfx = '" + certPfxPath + "';") blockBuffer.WriteString("$cert_pem = '" + certPemPath + "';") blockBuffer.WriteString("$args = \"pkcs12 -in $cert_pfx -out $cert_pem -nodes -passin pass:\";") blockBuffer.WriteString("Start-Process $binary -NoNewWindow -Wait -Argument $args;") blockBuffer.WriteString("}") err = Exec("powershell", blockBuffer.String()) if err != nil { return nil, err } } } log.Println("reading pemfile: " + certPemPath) var pemData []byte pemData, err = ioutil.ReadFile(certPemPath) if err != nil { return nil, err } si := &SubscriptionInfo{ Id: id, CertData: pemData, } return si, nil } func Exec(name string, arg ...string) error { log.Printf("Executing: %#v\n", arg) var stdout, stderr bytes.Buffer script := exec.Command(name, arg...) script.Stdout = &stdout script.Stderr = &stderr err := script.Run() if _, ok := err.(*exec.ExitError); ok { err = fmt.Errorf("Exec error: %s\n", err) } stderrString := strings.TrimSpace(stderr.String()) stdoutString := strings.TrimSpace(stdout.String()) log.Printf("Exec stdout: %s\n", stdoutString) log.Printf("Exec stderr: %s\n", stderrString) return err } replace os/user with github.com/mitchellh/go-homedir to avoid the use of cgo Otherwise when the binary copied to vanilla debian it fails withe following: $ packer build -var sn=mysubs -var psPath=./my.publishsettings packer-azure.json ==> azure: Preparing builder... azure: Getting subscr info... Build 'azure' errored: ParsePublishSettings error: user: Current not implemented on linux/amd64 // Copyright (c) Microsoft Open Technologies, Inc. // All Rights Reserved. // Licensed under the Apache License, Version 2.0. // See License.txt in the project root for license information. package driver_restapi import ( "bytes" "crypto/md5" "encoding/base64" "encoding/xml" "fmt" "github.com/MSOpenTech/packer-azure/packer/builder/azure/driver_restapi/constants" "github.com/mitchellh/go-homedir" "io/ioutil" "log" "os" "os/exec" "path/filepath" "runtime" "strings" ) type SubscriptionInfo struct { Id string CertData []byte } type publishData struct { PublishProfile publishProfile `xml:"PublishProfile"` } type publishProfile struct { SchemaVersion string `xml:",attr"` PublishMethod string `xml:",attr"` Url string `xml:",attr"` ManagementCertificate string `xml:",attr"` Subscriptions []subscription `xml:"Subscription"` } type subscription struct { ServiceManagementUrl string `xml:",attr"` Id string `xml:",attr"` Name string `xml:",attr"` ManagementCertificate string `xml:",attr"` } func ParsePublishSettings(path string, subscriptionName string) (*SubscriptionInfo, error) { var err error if _, err = os.Stat(path); err != nil { err = fmt.Errorf("ParsePublishSettings: '%v' check the path is correct.", path) return nil, err } if len(subscriptionName) == 0 { err = fmt.Errorf("ParsePublishSettings: '%v' subscriptionName is empty.", subscriptionName) return nil, err } log.Println(fmt.Sprintf("Reading file %s", path)) var fileData []byte fileData, err = ioutil.ReadFile(path) if err != nil { return nil, err } log.Println(fmt.Sprintf("culculating md5...")) fileSumMd5Digest := fmt.Sprintf("%x", md5.Sum(fileData)) publishData := publishData{} log.Println(fmt.Sprintf("parsing public settings...")) err = xml.Unmarshal(fileData, &publishData) if err != nil { return nil, err } if len(publishData.PublishProfile.Subscriptions) == 0 { err = fmt.Errorf("ParsePublishSettings: Subscriptions section is empty.") return nil, err } id := "none" certBase64 := publishData.PublishProfile.ManagementCertificate log.Println(fmt.Sprintf("looking for subscription info...")) for _, s := range publishData.PublishProfile.Subscriptions { if s.Name == subscriptionName { if len(s.Id) > 0 { id = s.Id } else { err = fmt.Errorf("ParsePublishSettings: subscription id is empty.") return nil, err } if len(s.ManagementCertificate) > 0 { certBase64 = s.ManagementCertificate } else if len(certBase64) == 0 { err = fmt.Errorf("ParsePublishSettings: ManagementCertificate is empty.") return nil, err } break } } if id == "none" { err = fmt.Errorf("ParsePublishSettings: Can't find subscriptionName '%v' in the file '%v'.", subscriptionName, path) return nil, err } log.Println(fmt.Sprintf("checking certificate...")) packerSubscriptionStoreDirName := ".packer_azure" usrHome, err := homedir.Dir() if err != nil { return nil, err } log.Println(usrHome) packerSubscriptionStoreDirPath := filepath.Join(usrHome, packerSubscriptionStoreDirName) subscrPath := filepath.Join(packerSubscriptionStoreDirPath, id) tagFilePath := filepath.Join(subscrPath, ".tag") var modeDir os.FileMode = 0700 var modeFile os.FileMode = 0600 if _, err = os.Stat(packerSubscriptionStoreDirPath); err != nil { // create storage dir log.Println(fmt.Sprintf("creating packer folder...")) err = os.Mkdir(packerSubscriptionStoreDirPath, modeDir) if err != nil { return nil, err } } if _, err = os.Stat(subscrPath); err != nil { // create subscr dir log.Println(fmt.Sprintf("creating subscription folder...")) err = os.Mkdir(subscrPath, modeDir) if err != nil { return nil, err } } renewCert := false if _, err = os.Stat(tagFilePath); err != nil { renewCert = true } else { // read tag file log.Println(fmt.Sprintf("reading tag file...")) tagFileData, err := ioutil.ReadFile(tagFilePath) if err != nil { return nil, err } if string(tagFileData) != fileSumMd5Digest { renewCert = true } } certPemFilename := "cert.pem" certPemPath := filepath.Join(subscrPath, certPemFilename) if renewCert { log.Println("creating pemfile...") // put tag file here err := ioutil.WriteFile(tagFilePath, []byte(fileSumMd5Digest), modeFile) if err != nil { return nil, err } certPfxFilename := "cert.pfx" certPfxPath := filepath.Join(subscrPath, certPfxFilename) decBytes, err := base64.StdEncoding.DecodeString(certBase64) if err != nil { return nil, err } // Save data as pfx file err = ioutil.WriteFile(certPfxPath, decBytes, modeFile) if err != nil { return nil, err } // Find openssl progName := "openssl" binary, err := exec.LookPath(progName) if err != nil { err := fmt.Errorf("Can't find '%s' programm: %s", progName, err.Error()) return nil, err } if runtime.GOOS == constants.Linux { log.Println("executing openssl") err = Exec(binary, "pkcs12", "-in", certPfxPath, "-out", certPemPath, "-nodes", "-passin", "pass:") if err != nil { return nil, err } } else if runtime.GOOS == constants.Windows { var blockBuffer bytes.Buffer blockBuffer.WriteString("Invoke-Command -scriptblock {") blockBuffer.WriteString("$binary = '" + binary + "';") blockBuffer.WriteString("$cert_pfx = '" + certPfxPath + "';") blockBuffer.WriteString("$cert_pem = '" + certPemPath + "';") blockBuffer.WriteString("$args = \"pkcs12 -in $cert_pfx -out $cert_pem -nodes -passin pass:\";") blockBuffer.WriteString("Start-Process $binary -NoNewWindow -Wait -Argument $args;") blockBuffer.WriteString("}") err = Exec("powershell", blockBuffer.String()) if err != nil { return nil, err } } } log.Println("reading pemfile: " + certPemPath) var pemData []byte pemData, err = ioutil.ReadFile(certPemPath) if err != nil { return nil, err } si := &SubscriptionInfo{ Id: id, CertData: pemData, } return si, nil } func Exec(name string, arg ...string) error { log.Printf("Executing: %#v\n", arg) var stdout, stderr bytes.Buffer script := exec.Command(name, arg...) script.Stdout = &stdout script.Stderr = &stderr err := script.Run() if _, ok := err.(*exec.ExitError); ok { err = fmt.Errorf("Exec error: %s\n", err) } stderrString := strings.TrimSpace(stderr.String()) stdoutString := strings.TrimSpace(stdout.String()) log.Printf("Exec stdout: %s\n", stdoutString) log.Printf("Exec stderr: %s\n", stderrString) return err }
// Copyright 2013 bee authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain // a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations // under the License. package swaggergen import ( "encoding/json" "errors" "fmt" "go/ast" "go/parser" "go/token" "os" "path" "path/filepath" "reflect" "regexp" "runtime" "strconv" "strings" "unicode" "gopkg.in/yaml.v2" "github.com/astaxie/beego/swagger" "github.com/astaxie/beego/utils" beeLogger "github.com/beego/bee/logger" bu "github.com/beego/bee/utils" ) const ( ajson = "application/json" axml = "application/xml" aplain = "text/plain" ahtml = "text/html" ) var pkgCache map[string]struct{} //pkg:controller:function:comments comments: key:value var controllerComments map[string]string var importlist map[string]string var controllerList map[string]map[string]*swagger.Item //controllername Paths items var modelsList map[string]map[string]swagger.Schema var rootapi swagger.Swagger var astPkgs map[string]*ast.Package // refer to builtin.go var basicTypes = map[string]string{ "bool": "boolean:", "uint": "integer:int32", "uint8": "integer:int32", "uint16": "integer:int32", "uint32": "integer:int32", "uint64": "integer:int64", "int": "integer:int64", "int8": "integer:int32", "int16": "integer:int32", "int32": "integer:int32", "int64": "integer:int64", "uintptr": "integer:int64", "float32": "number:float", "float64": "number:double", "string": "string:", "complex64": "number:float", "complex128": "number:double", "byte": "string:byte", "rune": "string:byte", // builtin golang objects "time.Time": "string:string", } var stdlibObject = map[string]string{ "&{time Time}": "time.Time", } func init() { pkgCache = make(map[string]struct{}) controllerComments = make(map[string]string) importlist = make(map[string]string) controllerList = make(map[string]map[string]*swagger.Item) modelsList = make(map[string]map[string]swagger.Schema) astPkgs = map[string]*ast.Package{} } func ParsePackagesFromDir(dirpath string) { c := make(chan error) go func() { filepath.Walk(dirpath, func(fpath string, fileInfo os.FileInfo, err error) error { if err != nil { return nil } if !fileInfo.IsDir() { return nil } if !strings.Contains(fpath, "vendor") && !strings.Contains(fpath, "tests") { err = parsePackageFromDir(fpath) if err != nil { // Send the error to through the channel and continue walking c <- fmt.Errorf("Error while parsing directory: %s", err.Error()) return nil } } return nil }) close(c) }() for err := range c { beeLogger.Log.Warnf("%s", err) } } func parsePackageFromDir(path string) error { fileSet := token.NewFileSet() folderPkgs, err := parser.ParseDir(fileSet, path, func(info os.FileInfo) bool { name := info.Name() return !info.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") }, parser.ParseComments) if err != nil { return err } for k, v := range folderPkgs { astPkgs[k] = v } return nil } func GenerateDocs(curpath string) { fset := token.NewFileSet() f, err := parser.ParseFile(fset, filepath.Join(curpath, "routers", "router.go"), nil, parser.ParseComments) if err != nil { beeLogger.Log.Fatalf("Error while parsing router.go: %s", err) } rootapi.Infos = swagger.Information{} rootapi.SwaggerVersion = "2.0" // Analyse API comments if f.Comments != nil { for _, c := range f.Comments { for _, s := range strings.Split(c.Text(), "\n") { if strings.HasPrefix(s, "@APIVersion") { rootapi.Infos.Version = strings.TrimSpace(s[len("@APIVersion"):]) } else if strings.HasPrefix(s, "@Title") { rootapi.Infos.Title = strings.TrimSpace(s[len("@Title"):]) } else if strings.HasPrefix(s, "@Description") { rootapi.Infos.Description = strings.TrimSpace(s[len("@Description"):]) } else if strings.HasPrefix(s, "@TermsOfServiceUrl") { rootapi.Infos.TermsOfService = strings.TrimSpace(s[len("@TermsOfServiceUrl"):]) } else if strings.HasPrefix(s, "@Contact") { rootapi.Infos.Contact.EMail = strings.TrimSpace(s[len("@Contact"):]) } else if strings.HasPrefix(s, "@Name") { rootapi.Infos.Contact.Name = strings.TrimSpace(s[len("@Name"):]) } else if strings.HasPrefix(s, "@URL") { rootapi.Infos.Contact.URL = strings.TrimSpace(s[len("@URL"):]) } else if strings.HasPrefix(s, "@LicenseUrl") { if rootapi.Infos.License == nil { rootapi.Infos.License = &swagger.License{URL: strings.TrimSpace(s[len("@LicenseUrl"):])} } else { rootapi.Infos.License.URL = strings.TrimSpace(s[len("@LicenseUrl"):]) } } else if strings.HasPrefix(s, "@License") { if rootapi.Infos.License == nil { rootapi.Infos.License = &swagger.License{Name: strings.TrimSpace(s[len("@License"):])} } else { rootapi.Infos.License.Name = strings.TrimSpace(s[len("@License"):]) } } else if strings.HasPrefix(s, "@Schemes") { rootapi.Schemes = strings.Split(strings.TrimSpace(s[len("@Schemes"):]), ",") } else if strings.HasPrefix(s, "@Host") { rootapi.Host = strings.TrimSpace(s[len("@Host"):]) } } } } // Analyse controller package for _, im := range f.Imports { localName := "" if im.Name != nil { localName = im.Name.Name } analyseControllerPkg(path.Join(curpath, "vendor"), localName, im.Path.Value) } for _, d := range f.Decls { switch specDecl := d.(type) { case *ast.FuncDecl: for _, l := range specDecl.Body.List { switch stmt := l.(type) { case *ast.AssignStmt: for _, l := range stmt.Rhs { if v, ok := l.(*ast.CallExpr); ok { // Analyse NewNamespace, it will return version and the subfunction if selName := v.Fun.(*ast.SelectorExpr).Sel.String(); selName != "NewNamespace" { continue } version, params := analyseNewNamespace(v) if rootapi.BasePath == "" && version != "" { rootapi.BasePath = version } for _, p := range params { switch pp := p.(type) { case *ast.CallExpr: controllerName := "" if selname := pp.Fun.(*ast.SelectorExpr).Sel.String(); selname == "NSNamespace" { s, params := analyseNewNamespace(pp) for _, sp := range params { switch pp := sp.(type) { case *ast.CallExpr: if pp.Fun.(*ast.SelectorExpr).Sel.String() == "NSInclude" { controllerName = analyseNSInclude(s, pp) if v, ok := controllerComments[controllerName]; ok { rootapi.Tags = append(rootapi.Tags, swagger.Tag{ Name: strings.Trim(s, "/"), Description: v, }) } } } } } else if selname == "NSInclude" { controllerName = analyseNSInclude("", pp) if v, ok := controllerComments[controllerName]; ok { rootapi.Tags = append(rootapi.Tags, swagger.Tag{ Name: controllerName, // if the NSInclude has no prefix, we use the controllername as the tag Description: v, }) } } } } } } } } } } os.Mkdir(path.Join(curpath, "swagger"), 0755) fd, err := os.Create(path.Join(curpath, "swagger", "swagger.json")) if err != nil { panic(err) } fdyml, err := os.Create(path.Join(curpath, "swagger", "swagger.yml")) if err != nil { panic(err) } defer fdyml.Close() defer fd.Close() dt, err := json.MarshalIndent(rootapi, "", " ") dtyml, erryml := yaml.Marshal(rootapi) if err != nil || erryml != nil { panic(err) } _, err = fd.Write(dt) _, erryml = fdyml.Write(dtyml) if err != nil || erryml != nil { panic(err) } } // analyseNewNamespace returns version and the others params func analyseNewNamespace(ce *ast.CallExpr) (first string, others []ast.Expr) { for i, p := range ce.Args { if i == 0 { switch pp := p.(type) { case *ast.BasicLit: first = strings.Trim(pp.Value, `"`) } continue } others = append(others, p) } return } func analyseNSInclude(baseurl string, ce *ast.CallExpr) string { cname := "" for _, p := range ce.Args { x := p.(*ast.UnaryExpr).X.(*ast.CompositeLit).Type.(*ast.SelectorExpr) if v, ok := importlist[fmt.Sprint(x.X)]; ok { cname = v + x.Sel.Name } if apis, ok := controllerList[cname]; ok { for rt, item := range apis { tag := "" if baseurl != "" { rt = baseurl + rt tag = strings.Trim(baseurl, "/") } else { tag = cname } if item.Get != nil { item.Get.Tags = []string{tag} } if item.Post != nil { item.Post.Tags = []string{tag} } if item.Put != nil { item.Put.Tags = []string{tag} } if item.Patch != nil { item.Patch.Tags = []string{tag} } if item.Head != nil { item.Head.Tags = []string{tag} } if item.Delete != nil { item.Delete.Tags = []string{tag} } if item.Options != nil { item.Options.Tags = []string{tag} } if len(rootapi.Paths) == 0 { rootapi.Paths = make(map[string]*swagger.Item) } rt = urlReplace(rt) rootapi.Paths[rt] = item } } } return cname } func analyseControllerPkg(vendorPath, localName, pkgpath string) { pkgpath = strings.Trim(pkgpath, "\"") if isSystemPackage(pkgpath) { return } if pkgpath == "github.com/astaxie/beego" { return } if localName != "" { importlist[localName] = pkgpath } else { pps := strings.Split(pkgpath, "/") importlist[pps[len(pps)-1]] = pkgpath } gopaths := bu.GetGOPATHs() if len(gopaths) == 0 { beeLogger.Log.Fatal("GOPATH environment variable is not set or empty") } pkgRealpath := "" wg, _ := filepath.EvalSymlinks(filepath.Join(vendorPath, pkgpath)) if utils.FileExists(wg) { pkgRealpath = wg } else { wgopath := gopaths for _, wg := range wgopath { wg, _ = filepath.EvalSymlinks(filepath.Join(wg, "src", pkgpath)) if utils.FileExists(wg) { pkgRealpath = wg break } } } if pkgRealpath != "" { if _, ok := pkgCache[pkgpath]; ok { return } pkgCache[pkgpath] = struct{}{} } else { beeLogger.Log.Fatalf("Package '%s' does not exist in the GOPATH or vendor path", pkgpath) } fileSet := token.NewFileSet() astPkgs, err := parser.ParseDir(fileSet, pkgRealpath, func(info os.FileInfo) bool { name := info.Name() return !info.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") }, parser.ParseComments) if err != nil { beeLogger.Log.Fatalf("Error while parsing dir at '%s': %s", pkgpath, err) } for _, pkg := range astPkgs { for _, fl := range pkg.Files { for _, d := range fl.Decls { switch specDecl := d.(type) { case *ast.FuncDecl: if specDecl.Recv != nil && len(specDecl.Recv.List) > 0 { if t, ok := specDecl.Recv.List[0].Type.(*ast.StarExpr); ok { // Parse controller method parserComments(specDecl, fmt.Sprint(t.X), pkgpath) } } case *ast.GenDecl: if specDecl.Tok == token.TYPE { for _, s := range specDecl.Specs { switch tp := s.(*ast.TypeSpec).Type.(type) { case *ast.StructType: _ = tp.Struct // Parse controller definition comments if strings.TrimSpace(specDecl.Doc.Text()) != "" { controllerComments[pkgpath+s.(*ast.TypeSpec).Name.String()] = specDecl.Doc.Text() } } } } } } } } } func isSystemPackage(pkgpath string) bool { goroot := os.Getenv("GOROOT") if goroot == "" { goroot = runtime.GOROOT() } if goroot == "" { beeLogger.Log.Fatalf("GOROOT environment variable is not set or empty") } wg, _ := filepath.EvalSymlinks(filepath.Join(goroot, "src", "pkg", pkgpath)) if utils.FileExists(wg) { return true } //TODO(zh):support go1.4 wg, _ = filepath.EvalSymlinks(filepath.Join(goroot, "src", pkgpath)) return utils.FileExists(wg) } func peekNextSplitString(ss string) (s string, spacePos int) { spacePos = strings.IndexFunc(ss, unicode.IsSpace) if spacePos < 0 { s = ss spacePos = len(ss) } else { s = strings.TrimSpace(ss[:spacePos]) } return } // parse the func comments func parserComments(f *ast.FuncDecl, controllerName, pkgpath string) error { var routerPath string var HTTPMethod string opts := swagger.Operation{ Responses: make(map[string]swagger.Response), } funcName := f.Name.String() comments := f.Doc funcParamMap := buildParamMap(f.Type.Params) //TODO: resultMap := buildParamMap(f.Type.Results) if comments != nil && comments.List != nil { for _, c := range comments.List { t := strings.TrimSpace(strings.TrimLeft(c.Text, "//")) if strings.HasPrefix(t, "@router") { elements := strings.TrimSpace(t[len("@router"):]) e1 := strings.SplitN(elements, " ", 2) if len(e1) < 1 { return errors.New("you should has router infomation") } routerPath = e1[0] if len(e1) == 2 && e1[1] != "" { e1 = strings.SplitN(e1[1], " ", 2) HTTPMethod = strings.ToUpper(strings.Trim(e1[0], "[]")) } else { HTTPMethod = "GET" } } else if strings.HasPrefix(t, "@Title") { opts.OperationID = controllerName + "." + strings.TrimSpace(t[len("@Title"):]) } else if strings.HasPrefix(t, "@Description") { opts.Description = strings.TrimSpace(t[len("@Description"):]) } else if strings.HasPrefix(t, "@Summary") { opts.Summary = strings.TrimSpace(t[len("@Summary"):]) } else if strings.HasPrefix(t, "@Success") { ss := strings.TrimSpace(t[len("@Success"):]) rs := swagger.Response{} respCode, pos := peekNextSplitString(ss) ss = strings.TrimSpace(ss[pos:]) respType, pos := peekNextSplitString(ss) if respType == "{object}" || respType == "{array}" { isArray := respType == "{array}" ss = strings.TrimSpace(ss[pos:]) schemaName, pos := peekNextSplitString(ss) if schemaName == "" { beeLogger.Log.Fatalf("[%s.%s] Schema must follow {object} or {array}", controllerName, funcName) } if strings.HasPrefix(schemaName, "[]") { schemaName = schemaName[2:] isArray = true } schema := swagger.Schema{} if sType, ok := basicTypes[schemaName]; ok { typeFormat := strings.Split(sType, ":") schema.Type = typeFormat[0] schema.Format = typeFormat[1] } else { m, mod, realTypes := getModel(schemaName) schema.Ref = "#/definitions/" + m if _, ok := modelsList[pkgpath+controllerName]; !ok { modelsList[pkgpath+controllerName] = make(map[string]swagger.Schema) } modelsList[pkgpath+controllerName][schemaName] = mod appendModels(pkgpath, controllerName, realTypes) } if isArray { rs.Schema = &swagger.Schema{ Type: "array", Items: &schema, } } else { rs.Schema = &schema } rs.Description = strings.TrimSpace(ss[pos:]) } else { rs.Description = strings.TrimSpace(ss) } opts.Responses[respCode] = rs } else if strings.HasPrefix(t, "@Param") { para := swagger.Parameter{} p := getparams(strings.TrimSpace(t[len("@Param "):])) if len(p) < 4 { beeLogger.Log.Fatal(controllerName + "_" + funcName + "'s comments @Param should have at least 4 params") } paramNames := strings.SplitN(p[0], "=>", 2) para.Name = paramNames[0] funcParamName := para.Name if len(paramNames) > 1 { funcParamName = paramNames[1] } paramType, ok := funcParamMap[funcParamName] if ok { delete(funcParamMap, funcParamName) } switch p[1] { case "query": fallthrough case "header": fallthrough case "path": fallthrough case "formData": fallthrough case "body": break default: beeLogger.Log.Warnf("[%s.%s] Unknown param location: %s. Possible values are `query`, `header`, `path`, `formData` or `body`.\n", controllerName, funcName, p[1]) } para.In = p[1] pp := strings.Split(p[2], ".") typ := pp[len(pp)-1] if len(pp) >= 2 { m, mod, realTypes := getModel(p[2]) para.Schema = &swagger.Schema{ Ref: "#/definitions/" + m, } if _, ok := modelsList[pkgpath+controllerName]; !ok { modelsList[pkgpath+controllerName] = make(map[string]swagger.Schema) } modelsList[pkgpath+controllerName][typ] = mod appendModels(pkgpath, controllerName, realTypes) } else { if typ == "auto" { typ = paramType } setParamType(&para, typ, pkgpath, controllerName) } switch len(p) { case 5: para.Required, _ = strconv.ParseBool(p[3]) para.Description = strings.Trim(p[4], `" `) case 6: para.Default = str2RealType(p[3], para.Type) para.Required, _ = strconv.ParseBool(p[4]) para.Description = strings.Trim(p[5], `" `) default: para.Description = strings.Trim(p[3], `" `) } opts.Parameters = append(opts.Parameters, para) } else if strings.HasPrefix(t, "@Failure") { rs := swagger.Response{} st := strings.TrimSpace(t[len("@Failure"):]) var cd []rune var start bool for i, s := range st { if unicode.IsSpace(s) { if start { rs.Description = strings.TrimSpace(st[i+1:]) break } else { continue } } start = true cd = append(cd, s) } opts.Responses[string(cd)] = rs } else if strings.HasPrefix(t, "@Deprecated") { opts.Deprecated, _ = strconv.ParseBool(strings.TrimSpace(t[len("@Deprecated"):])) } else if strings.HasPrefix(t, "@Accept") { accepts := strings.Split(strings.TrimSpace(strings.TrimSpace(t[len("@Accept"):])), ",") for _, a := range accepts { switch a { case "json": opts.Consumes = append(opts.Consumes, ajson) opts.Produces = append(opts.Produces, ajson) case "xml": opts.Consumes = append(opts.Consumes, axml) opts.Produces = append(opts.Produces, axml) case "plain": opts.Consumes = append(opts.Consumes, aplain) opts.Produces = append(opts.Produces, aplain) case "html": opts.Consumes = append(opts.Consumes, ahtml) opts.Produces = append(opts.Produces, ahtml) } } } } } if routerPath != "" { //Go over function parameters which were not mapped and create swagger params for them for name, typ := range funcParamMap { para := swagger.Parameter{} para.Name = name setParamType(&para, typ, pkgpath, controllerName) if paramInPath(name, routerPath) { para.In = "path" } else { para.In = "query" } opts.Parameters = append(opts.Parameters, para) } var item *swagger.Item if itemList, ok := controllerList[pkgpath+controllerName]; ok { if it, ok := itemList[routerPath]; !ok { item = &swagger.Item{} } else { item = it } } else { controllerList[pkgpath+controllerName] = make(map[string]*swagger.Item) item = &swagger.Item{} } switch HTTPMethod { case "GET": item.Get = &opts case "POST": item.Post = &opts case "PUT": item.Put = &opts case "PATCH": item.Patch = &opts case "DELETE": item.Delete = &opts case "HEAD": item.Head = &opts case "OPTIONS": item.Options = &opts } controllerList[pkgpath+controllerName][routerPath] = item } return nil } func setParamType(para *swagger.Parameter, typ string, pkgpath, controllerName string) { isArray := false paraType := "" paraFormat := "" if strings.HasPrefix(typ, "[]") { typ = typ[2:] isArray = true } if typ == "string" || typ == "number" || typ == "integer" || typ == "boolean" || typ == "array" || typ == "file" { paraType = typ } else if sType, ok := basicTypes[typ]; ok { typeFormat := strings.Split(sType, ":") paraType = typeFormat[0] paraFormat = typeFormat[1] } else { m, mod, realTypes := getModel(typ) para.Schema = &swagger.Schema{ Ref: "#/definitions/" + m, } if _, ok := modelsList[pkgpath+controllerName]; !ok { modelsList[pkgpath+controllerName] = make(map[string]swagger.Schema) } modelsList[pkgpath+controllerName][typ] = mod appendModels(pkgpath, controllerName, realTypes) } if isArray { para.Type = "array" para.Items = &swagger.ParameterItems{ Type: paraType, Format: paraFormat, } } else { para.Type = paraType para.Format = paraFormat } } func paramInPath(name, route string) bool { return strings.HasSuffix(route, ":"+name) || strings.Contains(route, ":"+name+"/") } func getFunctionParamType(t ast.Expr) string { switch paramType := t.(type) { case *ast.Ident: return paramType.Name // case *ast.Ellipsis: // result := getFunctionParamType(paramType.Elt) // result.array = true // return result case *ast.ArrayType: return "[]" + getFunctionParamType(paramType.Elt) case *ast.StarExpr: return getFunctionParamType(paramType.X) case *ast.SelectorExpr: return getFunctionParamType(paramType.X) + "." + paramType.Sel.Name default: return "" } } func buildParamMap(list *ast.FieldList) map[string]string { i := 0 result := map[string]string{} if list != nil { funcParams := list.List for _, fparam := range funcParams { param := getFunctionParamType(fparam.Type) var paramName string if len(fparam.Names) > 0 { paramName = fparam.Names[0].Name } else { paramName = fmt.Sprint(i) i++ } result[paramName] = param } } return result } // analisys params return []string // @Param query form string true "The email for login" // [query form string true "The email for login"] func getparams(str string) []string { var s []rune var j int var start bool var r []string var quoted int8 for _, c := range []rune(str) { if unicode.IsSpace(c) && quoted == 0 { if !start { continue } else { start = false j++ r = append(r, string(s)) s = make([]rune, 0) continue } } start = true if c == '"' { quoted ^= 1 continue } s = append(s, c) } if len(s) > 0 { r = append(r, string(s)) } return r } func getModel(str string) (objectname string, m swagger.Schema, realTypes []string) { strs := strings.Split(str, ".") objectname = strs[len(strs)-1] packageName := "" m.Type = "object" for _, pkg := range astPkgs { for _, fl := range pkg.Files { for k, d := range fl.Scope.Objects { if d.Kind == ast.Typ { if k != objectname { continue } packageName = pkg.Name parseObject(d, k, &m, &realTypes, astPkgs, pkg.Name) } } } } if m.Title == "" { beeLogger.Log.Warnf("Cannot find the object: %s", str) // TODO remove when all type have been supported //os.Exit(1) } if len(rootapi.Definitions) == 0 { rootapi.Definitions = make(map[string]swagger.Schema) } objectname = packageName + "." + objectname rootapi.Definitions[objectname] = m return } func parseObject(d *ast.Object, k string, m *swagger.Schema, realTypes *[]string, astPkgs map[string]*ast.Package, packageName string) { ts, ok := d.Decl.(*ast.TypeSpec) if !ok { beeLogger.Log.Fatalf("Unknown type without TypeSec: %v\n", d) } // TODO support other types, such as `ArrayType`, `MapType`, `InterfaceType` etc... st, ok := ts.Type.(*ast.StructType) if !ok { return } m.Title = k if st.Fields.List != nil { m.Properties = make(map[string]swagger.Propertie) for _, field := range st.Fields.List { realType := "" isSlice, realType, sType := typeAnalyser(field) if (isSlice && isBasicType(realType)) || sType == "object" { if len(strings.Split(realType, " ")) > 1 { realType = strings.Replace(realType, " ", ".", -1) realType = strings.Replace(realType, "&", "", -1) realType = strings.Replace(realType, "{", "", -1) realType = strings.Replace(realType, "}", "", -1) } else { realType = packageName + "." + realType } } *realTypes = append(*realTypes, realType) mp := swagger.Propertie{} if isSlice { mp.Type = "array" if isBasicType(realType) { typeFormat := strings.Split(sType, ":") mp.Items = &swagger.Propertie{ Type: typeFormat[0], Format: typeFormat[1], } } else { mp.Items = &swagger.Propertie{ Ref: "#/definitions/" + realType, } } } else { if sType == "object" { mp.Ref = "#/definitions/" + realType } else if isBasicType(realType) { typeFormat := strings.Split(sType, ":") mp.Type = typeFormat[0] mp.Format = typeFormat[1] } else if realType == "map" { typeFormat := strings.Split(sType, ":") mp.AdditionalProperties = &swagger.Propertie{ Type: typeFormat[0], Format: typeFormat[1], } } } if field.Names != nil { // set property name as field name var name = field.Names[0].Name // if no tag skip tag processing if field.Tag == nil { m.Properties[name] = mp continue } var tagValues []string stag := reflect.StructTag(strings.Trim(field.Tag.Value, "`")) defaultValue := stag.Get("doc") if defaultValue != "" { r, _ := regexp.Compile(`default\((.*)\)`) if r.MatchString(defaultValue) { res := r.FindStringSubmatch(defaultValue) mp.Default = str2RealType(res[1], realType) } else { beeLogger.Log.Warnf("Invalid default value: %s", defaultValue) } } tag := stag.Get("json") if tag != "" { tagValues = strings.Split(tag, ",") } // dont add property if json tag first value is "-" if len(tagValues) == 0 || tagValues[0] != "-" { // set property name to the left most json tag value only if is not omitempty if len(tagValues) > 0 && tagValues[0] != "omitempty" { name = tagValues[0] } if thrifttag := stag.Get("thrift"); thrifttag != "" { ts := strings.Split(thrifttag, ",") if ts[0] != "" { name = ts[0] } } if required := stag.Get("required"); required != "" { m.Required = append(m.Required, name) } if desc := stag.Get("description"); desc != "" { mp.Description = desc } m.Properties[name] = mp } if ignore := stag.Get("ignore"); ignore != "" { continue } } else { for _, pkg := range astPkgs { for _, fl := range pkg.Files { for nameOfObj, obj := range fl.Scope.Objects { if obj.Name == fmt.Sprint(field.Type) { parseObject(obj, nameOfObj, m, realTypes, astPkgs, pkg.Name) } } } } } } } } func typeAnalyser(f *ast.Field) (isSlice bool, realType, swaggerType string) { if arr, ok := f.Type.(*ast.ArrayType); ok { if isBasicType(fmt.Sprint(arr.Elt)) { return false, fmt.Sprintf("[]%v", arr.Elt), basicTypes[fmt.Sprint(arr.Elt)] } if mp, ok := arr.Elt.(*ast.MapType); ok { return false, fmt.Sprintf("map[%v][%v]", mp.Key, mp.Value), "object" } if star, ok := arr.Elt.(*ast.StarExpr); ok { return true, fmt.Sprint(star.X), "object" } return true, fmt.Sprint(arr.Elt), "object" } switch t := f.Type.(type) { case *ast.StarExpr: return false, fmt.Sprint(t.X), "object" case *ast.MapType: val := fmt.Sprintf("%v", t.Value) if isBasicType(val) { return false, "map", basicTypes[val] } return false, val, "object" } basicType := fmt.Sprint(f.Type) if object, isStdLibObject := stdlibObject[basicType]; isStdLibObject { basicType = object } if k, ok := basicTypes[basicType]; ok { return false, basicType, k } return false, basicType, "object" } func isBasicType(Type string) bool { if _, ok := basicTypes[Type]; ok { return true } return false } // append models func appendModels(pkgpath, controllerName string, realTypes []string) { for _, realType := range realTypes { if realType != "" && !isBasicType(strings.TrimLeft(realType, "[]")) && !strings.HasPrefix(realType, "map") && !strings.HasPrefix(realType, "&") { if _, ok := modelsList[pkgpath+controllerName][realType]; ok { continue } _, mod, newRealTypes := getModel(realType) modelsList[pkgpath+controllerName][realType] = mod appendModels(pkgpath, controllerName, newRealTypes) } } } func urlReplace(src string) string { pt := strings.Split(src, "/") for i, p := range pt { if len(p) > 0 { if p[0] == ':' { pt[i] = "{" + p[1:] + "}" } else if p[0] == '?' && p[1] == ':' { pt[i] = "{" + p[2:] + "}" } } } return strings.Join(pt, "/") } func str2RealType(s string, typ string) interface{} { var err error var ret interface{} switch typ { case "int", "int64", "int32", "int16", "int8": ret, err = strconv.Atoi(s) case "bool": ret, err = strconv.ParseBool(s) case "float64": ret, err = strconv.ParseFloat(s, 64) case "float32": ret, err = strconv.ParseFloat(s, 32) default: return s } if err != nil { beeLogger.Log.Warnf("Invalid default value type '%s': %s", typ, s) return s } return ret } go fmt // Copyright 2013 bee authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain // a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations // under the License. package swaggergen import ( "encoding/json" "errors" "fmt" "go/ast" "go/parser" "go/token" "os" "path" "path/filepath" "reflect" "regexp" "runtime" "strconv" "strings" "unicode" "gopkg.in/yaml.v2" "github.com/astaxie/beego/swagger" "github.com/astaxie/beego/utils" beeLogger "github.com/beego/bee/logger" bu "github.com/beego/bee/utils" ) const ( ajson = "application/json" axml = "application/xml" aplain = "text/plain" ahtml = "text/html" ) var pkgCache map[string]struct{} //pkg:controller:function:comments comments: key:value var controllerComments map[string]string var importlist map[string]string var controllerList map[string]map[string]*swagger.Item //controllername Paths items var modelsList map[string]map[string]swagger.Schema var rootapi swagger.Swagger var astPkgs map[string]*ast.Package // refer to builtin.go var basicTypes = map[string]string{ "bool": "boolean:", "uint": "integer:int32", "uint8": "integer:int32", "uint16": "integer:int32", "uint32": "integer:int32", "uint64": "integer:int64", "int": "integer:int64", "int8": "integer:int32", "int16": "integer:int32", "int32": "integer:int32", "int64": "integer:int64", "uintptr": "integer:int64", "float32": "number:float", "float64": "number:double", "string": "string:", "complex64": "number:float", "complex128": "number:double", "byte": "string:byte", "rune": "string:byte", // builtin golang objects "time.Time": "string:string", } var stdlibObject = map[string]string{ "&{time Time}": "time.Time", } func init() { pkgCache = make(map[string]struct{}) controllerComments = make(map[string]string) importlist = make(map[string]string) controllerList = make(map[string]map[string]*swagger.Item) modelsList = make(map[string]map[string]swagger.Schema) astPkgs = map[string]*ast.Package{} } func ParsePackagesFromDir(dirpath string) { c := make(chan error) go func() { filepath.Walk(dirpath, func(fpath string, fileInfo os.FileInfo, err error) error { if err != nil { return nil } if !fileInfo.IsDir() { return nil } if !strings.Contains(fpath, "vendor") && !strings.Contains(fpath, "tests") { err = parsePackageFromDir(fpath) if err != nil { // Send the error to through the channel and continue walking c <- fmt.Errorf("Error while parsing directory: %s", err.Error()) return nil } } return nil }) close(c) }() for err := range c { beeLogger.Log.Warnf("%s", err) } } func parsePackageFromDir(path string) error { fileSet := token.NewFileSet() folderPkgs, err := parser.ParseDir(fileSet, path, func(info os.FileInfo) bool { name := info.Name() return !info.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") }, parser.ParseComments) if err != nil { return err } for k, v := range folderPkgs { astPkgs[k] = v } return nil } func GenerateDocs(curpath string) { fset := token.NewFileSet() f, err := parser.ParseFile(fset, filepath.Join(curpath, "routers", "router.go"), nil, parser.ParseComments) if err != nil { beeLogger.Log.Fatalf("Error while parsing router.go: %s", err) } rootapi.Infos = swagger.Information{} rootapi.SwaggerVersion = "2.0" // Analyse API comments if f.Comments != nil { for _, c := range f.Comments { for _, s := range strings.Split(c.Text(), "\n") { if strings.HasPrefix(s, "@APIVersion") { rootapi.Infos.Version = strings.TrimSpace(s[len("@APIVersion"):]) } else if strings.HasPrefix(s, "@Title") { rootapi.Infos.Title = strings.TrimSpace(s[len("@Title"):]) } else if strings.HasPrefix(s, "@Description") { rootapi.Infos.Description = strings.TrimSpace(s[len("@Description"):]) } else if strings.HasPrefix(s, "@TermsOfServiceUrl") { rootapi.Infos.TermsOfService = strings.TrimSpace(s[len("@TermsOfServiceUrl"):]) } else if strings.HasPrefix(s, "@Contact") { rootapi.Infos.Contact.EMail = strings.TrimSpace(s[len("@Contact"):]) } else if strings.HasPrefix(s, "@Name") { rootapi.Infos.Contact.Name = strings.TrimSpace(s[len("@Name"):]) } else if strings.HasPrefix(s, "@URL") { rootapi.Infos.Contact.URL = strings.TrimSpace(s[len("@URL"):]) } else if strings.HasPrefix(s, "@LicenseUrl") { if rootapi.Infos.License == nil { rootapi.Infos.License = &swagger.License{URL: strings.TrimSpace(s[len("@LicenseUrl"):])} } else { rootapi.Infos.License.URL = strings.TrimSpace(s[len("@LicenseUrl"):]) } } else if strings.HasPrefix(s, "@License") { if rootapi.Infos.License == nil { rootapi.Infos.License = &swagger.License{Name: strings.TrimSpace(s[len("@License"):])} } else { rootapi.Infos.License.Name = strings.TrimSpace(s[len("@License"):]) } } else if strings.HasPrefix(s, "@Schemes") { rootapi.Schemes = strings.Split(strings.TrimSpace(s[len("@Schemes"):]), ",") } else if strings.HasPrefix(s, "@Host") { rootapi.Host = strings.TrimSpace(s[len("@Host"):]) } } } } // Analyse controller package for _, im := range f.Imports { localName := "" if im.Name != nil { localName = im.Name.Name } analyseControllerPkg(path.Join(curpath, "vendor"), localName, im.Path.Value) } for _, d := range f.Decls { switch specDecl := d.(type) { case *ast.FuncDecl: for _, l := range specDecl.Body.List { switch stmt := l.(type) { case *ast.AssignStmt: for _, l := range stmt.Rhs { if v, ok := l.(*ast.CallExpr); ok { // Analyse NewNamespace, it will return version and the subfunction if selName := v.Fun.(*ast.SelectorExpr).Sel.String(); selName != "NewNamespace" { continue } version, params := analyseNewNamespace(v) if rootapi.BasePath == "" && version != "" { rootapi.BasePath = version } for _, p := range params { switch pp := p.(type) { case *ast.CallExpr: controllerName := "" if selname := pp.Fun.(*ast.SelectorExpr).Sel.String(); selname == "NSNamespace" { s, params := analyseNewNamespace(pp) for _, sp := range params { switch pp := sp.(type) { case *ast.CallExpr: if pp.Fun.(*ast.SelectorExpr).Sel.String() == "NSInclude" { controllerName = analyseNSInclude(s, pp) if v, ok := controllerComments[controllerName]; ok { rootapi.Tags = append(rootapi.Tags, swagger.Tag{ Name: strings.Trim(s, "/"), Description: v, }) } } } } } else if selname == "NSInclude" { controllerName = analyseNSInclude("", pp) if v, ok := controllerComments[controllerName]; ok { rootapi.Tags = append(rootapi.Tags, swagger.Tag{ Name: controllerName, // if the NSInclude has no prefix, we use the controllername as the tag Description: v, }) } } } } } } } } } } os.Mkdir(path.Join(curpath, "swagger"), 0755) fd, err := os.Create(path.Join(curpath, "swagger", "swagger.json")) if err != nil { panic(err) } fdyml, err := os.Create(path.Join(curpath, "swagger", "swagger.yml")) if err != nil { panic(err) } defer fdyml.Close() defer fd.Close() dt, err := json.MarshalIndent(rootapi, "", " ") dtyml, erryml := yaml.Marshal(rootapi) if err != nil || erryml != nil { panic(err) } _, err = fd.Write(dt) _, erryml = fdyml.Write(dtyml) if err != nil || erryml != nil { panic(err) } } // analyseNewNamespace returns version and the others params func analyseNewNamespace(ce *ast.CallExpr) (first string, others []ast.Expr) { for i, p := range ce.Args { if i == 0 { switch pp := p.(type) { case *ast.BasicLit: first = strings.Trim(pp.Value, `"`) } continue } others = append(others, p) } return } func analyseNSInclude(baseurl string, ce *ast.CallExpr) string { cname := "" for _, p := range ce.Args { x := p.(*ast.UnaryExpr).X.(*ast.CompositeLit).Type.(*ast.SelectorExpr) if v, ok := importlist[fmt.Sprint(x.X)]; ok { cname = v + x.Sel.Name } if apis, ok := controllerList[cname]; ok { for rt, item := range apis { tag := "" if baseurl != "" { rt = baseurl + rt tag = strings.Trim(baseurl, "/") } else { tag = cname } if item.Get != nil { item.Get.Tags = []string{tag} } if item.Post != nil { item.Post.Tags = []string{tag} } if item.Put != nil { item.Put.Tags = []string{tag} } if item.Patch != nil { item.Patch.Tags = []string{tag} } if item.Head != nil { item.Head.Tags = []string{tag} } if item.Delete != nil { item.Delete.Tags = []string{tag} } if item.Options != nil { item.Options.Tags = []string{tag} } if len(rootapi.Paths) == 0 { rootapi.Paths = make(map[string]*swagger.Item) } rt = urlReplace(rt) rootapi.Paths[rt] = item } } } return cname } func analyseControllerPkg(vendorPath, localName, pkgpath string) { pkgpath = strings.Trim(pkgpath, "\"") if isSystemPackage(pkgpath) { return } if pkgpath == "github.com/astaxie/beego" { return } if localName != "" { importlist[localName] = pkgpath } else { pps := strings.Split(pkgpath, "/") importlist[pps[len(pps)-1]] = pkgpath } gopaths := bu.GetGOPATHs() if len(gopaths) == 0 { beeLogger.Log.Fatal("GOPATH environment variable is not set or empty") } pkgRealpath := "" wg, _ := filepath.EvalSymlinks(filepath.Join(vendorPath, pkgpath)) if utils.FileExists(wg) { pkgRealpath = wg } else { wgopath := gopaths for _, wg := range wgopath { wg, _ = filepath.EvalSymlinks(filepath.Join(wg, "src", pkgpath)) if utils.FileExists(wg) { pkgRealpath = wg break } } } if pkgRealpath != "" { if _, ok := pkgCache[pkgpath]; ok { return } pkgCache[pkgpath] = struct{}{} } else { beeLogger.Log.Fatalf("Package '%s' does not exist in the GOPATH or vendor path", pkgpath) } fileSet := token.NewFileSet() astPkgs, err := parser.ParseDir(fileSet, pkgRealpath, func(info os.FileInfo) bool { name := info.Name() return !info.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") }, parser.ParseComments) if err != nil { beeLogger.Log.Fatalf("Error while parsing dir at '%s': %s", pkgpath, err) } for _, pkg := range astPkgs { for _, fl := range pkg.Files { for _, d := range fl.Decls { switch specDecl := d.(type) { case *ast.FuncDecl: if specDecl.Recv != nil && len(specDecl.Recv.List) > 0 { if t, ok := specDecl.Recv.List[0].Type.(*ast.StarExpr); ok { // Parse controller method parserComments(specDecl, fmt.Sprint(t.X), pkgpath) } } case *ast.GenDecl: if specDecl.Tok == token.TYPE { for _, s := range specDecl.Specs { switch tp := s.(*ast.TypeSpec).Type.(type) { case *ast.StructType: _ = tp.Struct // Parse controller definition comments if strings.TrimSpace(specDecl.Doc.Text()) != "" { controllerComments[pkgpath+s.(*ast.TypeSpec).Name.String()] = specDecl.Doc.Text() } } } } } } } } } func isSystemPackage(pkgpath string) bool { goroot := os.Getenv("GOROOT") if goroot == "" { goroot = runtime.GOROOT() } if goroot == "" { beeLogger.Log.Fatalf("GOROOT environment variable is not set or empty") } wg, _ := filepath.EvalSymlinks(filepath.Join(goroot, "src", "pkg", pkgpath)) if utils.FileExists(wg) { return true } //TODO(zh):support go1.4 wg, _ = filepath.EvalSymlinks(filepath.Join(goroot, "src", pkgpath)) return utils.FileExists(wg) } func peekNextSplitString(ss string) (s string, spacePos int) { spacePos = strings.IndexFunc(ss, unicode.IsSpace) if spacePos < 0 { s = ss spacePos = len(ss) } else { s = strings.TrimSpace(ss[:spacePos]) } return } // parse the func comments func parserComments(f *ast.FuncDecl, controllerName, pkgpath string) error { var routerPath string var HTTPMethod string opts := swagger.Operation{ Responses: make(map[string]swagger.Response), } funcName := f.Name.String() comments := f.Doc funcParamMap := buildParamMap(f.Type.Params) //TODO: resultMap := buildParamMap(f.Type.Results) if comments != nil && comments.List != nil { for _, c := range comments.List { t := strings.TrimSpace(strings.TrimLeft(c.Text, "//")) if strings.HasPrefix(t, "@router") { elements := strings.TrimSpace(t[len("@router"):]) e1 := strings.SplitN(elements, " ", 2) if len(e1) < 1 { return errors.New("you should has router infomation") } routerPath = e1[0] if len(e1) == 2 && e1[1] != "" { e1 = strings.SplitN(e1[1], " ", 2) HTTPMethod = strings.ToUpper(strings.Trim(e1[0], "[]")) } else { HTTPMethod = "GET" } } else if strings.HasPrefix(t, "@Title") { opts.OperationID = controllerName + "." + strings.TrimSpace(t[len("@Title"):]) } else if strings.HasPrefix(t, "@Description") { opts.Description = strings.TrimSpace(t[len("@Description"):]) } else if strings.HasPrefix(t, "@Summary") { opts.Summary = strings.TrimSpace(t[len("@Summary"):]) } else if strings.HasPrefix(t, "@Success") { ss := strings.TrimSpace(t[len("@Success"):]) rs := swagger.Response{} respCode, pos := peekNextSplitString(ss) ss = strings.TrimSpace(ss[pos:]) respType, pos := peekNextSplitString(ss) if respType == "{object}" || respType == "{array}" { isArray := respType == "{array}" ss = strings.TrimSpace(ss[pos:]) schemaName, pos := peekNextSplitString(ss) if schemaName == "" { beeLogger.Log.Fatalf("[%s.%s] Schema must follow {object} or {array}", controllerName, funcName) } if strings.HasPrefix(schemaName, "[]") { schemaName = schemaName[2:] isArray = true } schema := swagger.Schema{} if sType, ok := basicTypes[schemaName]; ok { typeFormat := strings.Split(sType, ":") schema.Type = typeFormat[0] schema.Format = typeFormat[1] } else { m, mod, realTypes := getModel(schemaName) schema.Ref = "#/definitions/" + m if _, ok := modelsList[pkgpath+controllerName]; !ok { modelsList[pkgpath+controllerName] = make(map[string]swagger.Schema) } modelsList[pkgpath+controllerName][schemaName] = mod appendModels(pkgpath, controllerName, realTypes) } if isArray { rs.Schema = &swagger.Schema{ Type: "array", Items: &schema, } } else { rs.Schema = &schema } rs.Description = strings.TrimSpace(ss[pos:]) } else { rs.Description = strings.TrimSpace(ss) } opts.Responses[respCode] = rs } else if strings.HasPrefix(t, "@Param") { para := swagger.Parameter{} p := getparams(strings.TrimSpace(t[len("@Param "):])) if len(p) < 4 { beeLogger.Log.Fatal(controllerName + "_" + funcName + "'s comments @Param should have at least 4 params") } paramNames := strings.SplitN(p[0], "=>", 2) para.Name = paramNames[0] funcParamName := para.Name if len(paramNames) > 1 { funcParamName = paramNames[1] } paramType, ok := funcParamMap[funcParamName] if ok { delete(funcParamMap, funcParamName) } switch p[1] { case "query": fallthrough case "header": fallthrough case "path": fallthrough case "formData": fallthrough case "body": break default: beeLogger.Log.Warnf("[%s.%s] Unknown param location: %s. Possible values are `query`, `header`, `path`, `formData` or `body`.\n", controllerName, funcName, p[1]) } para.In = p[1] pp := strings.Split(p[2], ".") typ := pp[len(pp)-1] if len(pp) >= 2 { m, mod, realTypes := getModel(p[2]) para.Schema = &swagger.Schema{ Ref: "#/definitions/" + m, } if _, ok := modelsList[pkgpath+controllerName]; !ok { modelsList[pkgpath+controllerName] = make(map[string]swagger.Schema) } modelsList[pkgpath+controllerName][typ] = mod appendModels(pkgpath, controllerName, realTypes) } else { if typ == "auto" { typ = paramType } setParamType(&para, typ, pkgpath, controllerName) } switch len(p) { case 5: para.Required, _ = strconv.ParseBool(p[3]) para.Description = strings.Trim(p[4], `" `) case 6: para.Default = str2RealType(p[3], para.Type) para.Required, _ = strconv.ParseBool(p[4]) para.Description = strings.Trim(p[5], `" `) default: para.Description = strings.Trim(p[3], `" `) } opts.Parameters = append(opts.Parameters, para) } else if strings.HasPrefix(t, "@Failure") { rs := swagger.Response{} st := strings.TrimSpace(t[len("@Failure"):]) var cd []rune var start bool for i, s := range st { if unicode.IsSpace(s) { if start { rs.Description = strings.TrimSpace(st[i+1:]) break } else { continue } } start = true cd = append(cd, s) } opts.Responses[string(cd)] = rs } else if strings.HasPrefix(t, "@Deprecated") { opts.Deprecated, _ = strconv.ParseBool(strings.TrimSpace(t[len("@Deprecated"):])) } else if strings.HasPrefix(t, "@Accept") { accepts := strings.Split(strings.TrimSpace(strings.TrimSpace(t[len("@Accept"):])), ",") for _, a := range accepts { switch a { case "json": opts.Consumes = append(opts.Consumes, ajson) opts.Produces = append(opts.Produces, ajson) case "xml": opts.Consumes = append(opts.Consumes, axml) opts.Produces = append(opts.Produces, axml) case "plain": opts.Consumes = append(opts.Consumes, aplain) opts.Produces = append(opts.Produces, aplain) case "html": opts.Consumes = append(opts.Consumes, ahtml) opts.Produces = append(opts.Produces, ahtml) } } } } } if routerPath != "" { //Go over function parameters which were not mapped and create swagger params for them for name, typ := range funcParamMap { para := swagger.Parameter{} para.Name = name setParamType(&para, typ, pkgpath, controllerName) if paramInPath(name, routerPath) { para.In = "path" } else { para.In = "query" } opts.Parameters = append(opts.Parameters, para) } var item *swagger.Item if itemList, ok := controllerList[pkgpath+controllerName]; ok { if it, ok := itemList[routerPath]; !ok { item = &swagger.Item{} } else { item = it } } else { controllerList[pkgpath+controllerName] = make(map[string]*swagger.Item) item = &swagger.Item{} } switch HTTPMethod { case "GET": item.Get = &opts case "POST": item.Post = &opts case "PUT": item.Put = &opts case "PATCH": item.Patch = &opts case "DELETE": item.Delete = &opts case "HEAD": item.Head = &opts case "OPTIONS": item.Options = &opts } controllerList[pkgpath+controllerName][routerPath] = item } return nil } func setParamType(para *swagger.Parameter, typ string, pkgpath, controllerName string) { isArray := false paraType := "" paraFormat := "" if strings.HasPrefix(typ, "[]") { typ = typ[2:] isArray = true } if typ == "string" || typ == "number" || typ == "integer" || typ == "boolean" || typ == "array" || typ == "file" { paraType = typ } else if sType, ok := basicTypes[typ]; ok { typeFormat := strings.Split(sType, ":") paraType = typeFormat[0] paraFormat = typeFormat[1] } else { m, mod, realTypes := getModel(typ) para.Schema = &swagger.Schema{ Ref: "#/definitions/" + m, } if _, ok := modelsList[pkgpath+controllerName]; !ok { modelsList[pkgpath+controllerName] = make(map[string]swagger.Schema) } modelsList[pkgpath+controllerName][typ] = mod appendModels(pkgpath, controllerName, realTypes) } if isArray { para.Type = "array" para.Items = &swagger.ParameterItems{ Type: paraType, Format: paraFormat, } } else { para.Type = paraType para.Format = paraFormat } } func paramInPath(name, route string) bool { return strings.HasSuffix(route, ":"+name) || strings.Contains(route, ":"+name+"/") } func getFunctionParamType(t ast.Expr) string { switch paramType := t.(type) { case *ast.Ident: return paramType.Name // case *ast.Ellipsis: // result := getFunctionParamType(paramType.Elt) // result.array = true // return result case *ast.ArrayType: return "[]" + getFunctionParamType(paramType.Elt) case *ast.StarExpr: return getFunctionParamType(paramType.X) case *ast.SelectorExpr: return getFunctionParamType(paramType.X) + "." + paramType.Sel.Name default: return "" } } func buildParamMap(list *ast.FieldList) map[string]string { i := 0 result := map[string]string{} if list != nil { funcParams := list.List for _, fparam := range funcParams { param := getFunctionParamType(fparam.Type) var paramName string if len(fparam.Names) > 0 { paramName = fparam.Names[0].Name } else { paramName = fmt.Sprint(i) i++ } result[paramName] = param } } return result } // analisys params return []string // @Param query form string true "The email for login" // [query form string true "The email for login"] func getparams(str string) []string { var s []rune var j int var start bool var r []string var quoted int8 for _, c := range []rune(str) { if unicode.IsSpace(c) && quoted == 0 { if !start { continue } else { start = false j++ r = append(r, string(s)) s = make([]rune, 0) continue } } start = true if c == '"' { quoted ^= 1 continue } s = append(s, c) } if len(s) > 0 { r = append(r, string(s)) } return r } func getModel(str string) (objectname string, m swagger.Schema, realTypes []string) { strs := strings.Split(str, ".") objectname = strs[len(strs)-1] packageName := "" m.Type = "object" for _, pkg := range astPkgs { for _, fl := range pkg.Files { for k, d := range fl.Scope.Objects { if d.Kind == ast.Typ { if k != objectname { continue } packageName = pkg.Name parseObject(d, k, &m, &realTypes, astPkgs, pkg.Name) } } } } if m.Title == "" { beeLogger.Log.Warnf("Cannot find the object: %s", str) // TODO remove when all type have been supported //os.Exit(1) } if len(rootapi.Definitions) == 0 { rootapi.Definitions = make(map[string]swagger.Schema) } objectname = packageName + "." + objectname rootapi.Definitions[objectname] = m return } func parseObject(d *ast.Object, k string, m *swagger.Schema, realTypes *[]string, astPkgs map[string]*ast.Package, packageName string) { ts, ok := d.Decl.(*ast.TypeSpec) if !ok { beeLogger.Log.Fatalf("Unknown type without TypeSec: %v\n", d) } // TODO support other types, such as `ArrayType`, `MapType`, `InterfaceType` etc... st, ok := ts.Type.(*ast.StructType) if !ok { return } m.Title = k if st.Fields.List != nil { m.Properties = make(map[string]swagger.Propertie) for _, field := range st.Fields.List { realType := "" isSlice, realType, sType := typeAnalyser(field) if (isSlice && isBasicType(realType)) || sType == "object" { if len(strings.Split(realType, " ")) > 1 { realType = strings.Replace(realType, " ", ".", -1) realType = strings.Replace(realType, "&", "", -1) realType = strings.Replace(realType, "{", "", -1) realType = strings.Replace(realType, "}", "", -1) } else { realType = packageName + "." + realType } } *realTypes = append(*realTypes, realType) mp := swagger.Propertie{} if isSlice { mp.Type = "array" if isBasicType(realType) { typeFormat := strings.Split(sType, ":") mp.Items = &swagger.Propertie{ Type: typeFormat[0], Format: typeFormat[1], } } else { mp.Items = &swagger.Propertie{ Ref: "#/definitions/" + realType, } } } else { if sType == "object" { mp.Ref = "#/definitions/" + realType } else if isBasicType(realType) { typeFormat := strings.Split(sType, ":") mp.Type = typeFormat[0] mp.Format = typeFormat[1] } else if realType == "map" { typeFormat := strings.Split(sType, ":") mp.AdditionalProperties = &swagger.Propertie{ Type: typeFormat[0], Format: typeFormat[1], } } } if field.Names != nil { // set property name as field name var name = field.Names[0].Name // if no tag skip tag processing if field.Tag == nil { m.Properties[name] = mp continue } var tagValues []string stag := reflect.StructTag(strings.Trim(field.Tag.Value, "`")) defaultValue := stag.Get("doc") if defaultValue != "" { r, _ := regexp.Compile(`default\((.*)\)`) if r.MatchString(defaultValue) { res := r.FindStringSubmatch(defaultValue) mp.Default = str2RealType(res[1], realType) } else { beeLogger.Log.Warnf("Invalid default value: %s", defaultValue) } } tag := stag.Get("json") if tag != "" { tagValues = strings.Split(tag, ",") } // dont add property if json tag first value is "-" if len(tagValues) == 0 || tagValues[0] != "-" { // set property name to the left most json tag value only if is not omitempty if len(tagValues) > 0 && tagValues[0] != "omitempty" { name = tagValues[0] } if thrifttag := stag.Get("thrift"); thrifttag != "" { ts := strings.Split(thrifttag, ",") if ts[0] != "" { name = ts[0] } } if required := stag.Get("required"); required != "" { m.Required = append(m.Required, name) } if desc := stag.Get("description"); desc != "" { mp.Description = desc } m.Properties[name] = mp } if ignore := stag.Get("ignore"); ignore != "" { continue } } else { for _, pkg := range astPkgs { for _, fl := range pkg.Files { for nameOfObj, obj := range fl.Scope.Objects { if obj.Name == fmt.Sprint(field.Type) { parseObject(obj, nameOfObj, m, realTypes, astPkgs, pkg.Name) } } } } } } } } func typeAnalyser(f *ast.Field) (isSlice bool, realType, swaggerType string) { if arr, ok := f.Type.(*ast.ArrayType); ok { if isBasicType(fmt.Sprint(arr.Elt)) { return false, fmt.Sprintf("[]%v", arr.Elt), basicTypes[fmt.Sprint(arr.Elt)] } if mp, ok := arr.Elt.(*ast.MapType); ok { return false, fmt.Sprintf("map[%v][%v]", mp.Key, mp.Value), "object" } if star, ok := arr.Elt.(*ast.StarExpr); ok { return true, fmt.Sprint(star.X), "object" } return true, fmt.Sprint(arr.Elt), "object" } switch t := f.Type.(type) { case *ast.StarExpr: return false, fmt.Sprint(t.X), "object" case *ast.MapType: val := fmt.Sprintf("%v", t.Value) if isBasicType(val) { return false, "map", basicTypes[val] } return false, val, "object" } basicType := fmt.Sprint(f.Type) if object, isStdLibObject := stdlibObject[basicType]; isStdLibObject { basicType = object } if k, ok := basicTypes[basicType]; ok { return false, basicType, k } return false, basicType, "object" } func isBasicType(Type string) bool { if _, ok := basicTypes[Type]; ok { return true } return false } // append models func appendModels(pkgpath, controllerName string, realTypes []string) { for _, realType := range realTypes { if realType != "" && !isBasicType(strings.TrimLeft(realType, "[]")) && !strings.HasPrefix(realType, "map") && !strings.HasPrefix(realType, "&") { if _, ok := modelsList[pkgpath+controllerName][realType]; ok { continue } _, mod, newRealTypes := getModel(realType) modelsList[pkgpath+controllerName][realType] = mod appendModels(pkgpath, controllerName, newRealTypes) } } } func urlReplace(src string) string { pt := strings.Split(src, "/") for i, p := range pt { if len(p) > 0 { if p[0] == ':' { pt[i] = "{" + p[1:] + "}" } else if p[0] == '?' && p[1] == ':' { pt[i] = "{" + p[2:] + "}" } } } return strings.Join(pt, "/") } func str2RealType(s string, typ string) interface{} { var err error var ret interface{} switch typ { case "int", "int64", "int32", "int16", "int8": ret, err = strconv.Atoi(s) case "bool": ret, err = strconv.ParseBool(s) case "float64": ret, err = strconv.ParseFloat(s, 64) case "float32": ret, err = strconv.ParseFloat(s, 32) default: return s } if err != nil { beeLogger.Log.Warnf("Invalid default value type '%s': %s", typ, s) return s } return ret }
package irc import ( "bytes" "strings" "testing" ) func s2b(s string) []byte { return []byte(s) } var messageTests = [...]*struct { rawMsg string parsed *Msg hostmask bool server bool }{ { ":syrk!kalt@millennium.stealth.net QUIT :Gone to have lunch", &Msg{cmd: s2b("QUIT"), trailing: s2b("Gone to have lunch"), name: s2b("syrk"), user: s2b("kalt"), host: s2b("millennium.stealth.net"), }, true, false, }, { ":Trillian SQUIT cm22.eng.umd.edu :Server out of control", &Msg{cmd: s2b("SQUIT"), trailing: s2b("Server out of control"), name: s2b("Trillian"), user: nil, host: nil, params: [16][]byte{s2b("cm22.eng.umd.edu")}, }, false, true, }, { ":WiZ!jto@tolsun.oulu.fi PART #playzone :I lost", &Msg{cmd: s2b("PART"), trailing: s2b("I lost"), name: s2b("WiZ"), user: s2b("jto"), host: s2b("tolsun.oulu.fi"), params: [16][]byte{s2b("#playzone")}, }, true, false, }, { ":WiZ!jto@tolsun.oulu.fi MODE #eu-opers -l", &Msg{cmd: s2b("MODE"), trailing: nil, name: s2b("WiZ"), user: s2b("jto"), host: s2b("tolsun.oulu.fi"), params: [16][]byte{s2b("#eu-opers"), s2b("-l")}, }, true, false, }, { "MODE &oulu +b *!*@*.edu +e *!*@*.bu.edu", &Msg{cmd: s2b("MODE"), trailing: nil, name: nil, user: nil, host: nil, params: [16][]byte{s2b("&oulu"), s2b("+b"), s2b("*!*@*.edu"), s2b("+e"), s2b("*!*@*.bu.edu"), }, }, false, true, }, { "PRIVMSG #channel :Message with :colons!", &Msg{cmd: s2b("PRIVMSG"), trailing: s2b("Message with :colons!"), name: nil, user: nil, host: nil, params: [16][]byte{s2b("#channel")}, }, false, true, // TODO PRIVMSG should not be server }, { ":irc.vives.lan 251 test :There are 2 users and 0 services on 1 servers", &Msg{cmd: s2b("251"), trailing: s2b("There are 2 users and 0 services on 1 servers"), name: s2b("irc.vives.lan"), user: nil, host: nil, params: [16][]byte{s2b("test")}, }, false, true, }, { ":irc.vives.lan 376 test :End of MOTD command", &Msg{cmd: s2b("376"), trailing: s2b("End of MOTD command"), name: s2b("irc.vives.lan"), user: nil, host: nil, params: [16][]byte{s2b("test")}, }, false, true, }, { ":irc.vives.lan 250 test :Highest connection count: 1 (1 connections received)", &Msg{cmd: s2b("250"), trailing: s2b("Highest connection count: 1 (1 connections received)"), name: s2b("irc.vives.lan"), user: nil, host: nil, params: [16][]byte{s2b("test")}, }, false, true, }, { ":sorcix!~sorcix@sorcix.users.quakenet.org PRIVMSG #viveslan :\001ACTION is testing CTCP messages!\001", &Msg{cmd: s2b("PRIVMSG"), trailing: s2b("\001ACTION is testing CTCP messages!\001"), name: s2b("sorcix"), user: s2b("~sorcix"), host: s2b("sorcix.users.quakenet.org"), params: [16][]byte{s2b("#viveslan")}, }, true, false, }, { ":sorcix!~sorcix@sorcix.users.quakenet.org NOTICE midnightfox :\001PONG 1234567890\001", &Msg{cmd: s2b("NOTICE"), trailing: s2b("\001PONG 1234567890\001"), name: s2b("sorcix"), user: s2b("~sorcix"), host: s2b("sorcix.users.quakenet.org"), params: [16][]byte{s2b("midnightfox")}, }, true, false, }, { ":a!b@c QUIT", &Msg{cmd: s2b("QUIT"), trailing: nil, name: s2b("a"), user: s2b("b"), host: s2b("c"), params: [16][]byte{}, }, true, false, }, { ":a!b PRIVMSG :message", &Msg{cmd: s2b("PRIVMSG"), trailing: s2b("message"), name: s2b("a"), user: s2b("b"), host: nil, params: [16][]byte{}, }, false, false, }, { ":a@c NOTICE ::::Hey!", &Msg{cmd: s2b("NOTICE"), trailing: s2b(":::Hey!"), name: s2b("a"), user: nil, host: s2b("c"), params: [16][]byte{}, }, false, false, }, { ":nick PRIVMSG $@ :This message contains a\ttab!", &Msg{cmd: s2b("PRIVMSG"), trailing: s2b("This message contains a\ttab!"), name: s2b("nick"), user: nil, host: nil, params: [16][]byte{s2b("$@")}, }, false, true, // TODO }, { "TEST $@ param :Trailing", &Msg{cmd: s2b("TEST"), trailing: s2b("Trailing"), name: nil, user: nil, host: nil, params: [16][]byte{s2b("$@"), s2b("param")}, }, false, true, }, { "TOPIC #foo", &Msg{cmd: s2b("TOPIC"), trailing: nil, name: nil, user: nil, host: nil, params: [16][]byte{s2b("#foo")}, }, false, true, }, { ":name!user@example.org PRIVMSG #test :Message with spaces at the end! ", &Msg{cmd: s2b("PRIVMSG"), trailing: s2b("Message with spaces at the end! "), name: s2b("name"), user: s2b("user"), host: s2b("example.org"), params: [16][]byte{s2b("#test")}, }, true, false, }, } func TestInvaildMsg(t *testing.T) { invalid := []string{ ": PRIVMSG test :Invalid message with empty prefix.", ": PRIVMSG test :Invalid message with space prefix", } for _, s := range invalid { m, err := NewMsg(s2b(s)) if err == nil || m.Cmd() != nil { t.Error(s, "is valid") } } } func TestMsgCmd(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() p := z.parsed if err != nil || !bytes.Equal(m.Cmd(), p.cmd) { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } func TestMsgPrefixName(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() p := z.parsed if err != nil || !bytes.Equal(m.Name(), p.name) { t.Errorf("failed:%s\nparsed:%s\nm=%s p=%s", z.rawMsg, m.String(), m.name, p.name) } } } func TestMsgUser(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() p := z.parsed if err != nil || !bytes.Equal(m.User(), p.user) { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } func TestMsgHost(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() p := z.parsed if err != nil || !bytes.Equal(m.Host(), p.host) { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } func TestMsgTrailing(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() p := z.parsed if err != nil || !bytes.Equal(m.Trailing(), p.trailing) { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } func TestMsgParams(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() p := z.parsed if err != nil { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } var j int var zz []byte for j, zz = range z.parsed.params { if zz == nil { break } } for i := 0; i < j; i++ { if !bytes.Equal(m.Params()[i], p.params[i]) { t.Log(z, "i=", i, "j=", j, m.Params()[i], p.params[i]) t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } } func TestMsgServer(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() if err != nil || m.IsServer() != z.server { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } func TestMsgHostMask(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() if err != nil || m.IsHostMask() != z.hostmask { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } func TestMsgString(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() if err != nil || strings.HasPrefix(m.String(), "CMD: ") { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } func TestSetCmd(t *testing.T) { m := new(Msg) m.SetCmd(s2b("XXX")) if string(m.Cmd()) != "XXX" { t.Error(m.Cmd(), "!=", "XXX") } } func TestSetHost(t *testing.T) { m := new(Msg) m.SetHost(s2b("XXX")) if string(m.host) != "XXX" { t.Error(m.host, "!=", "XXX") } } func TestSetUser(t *testing.T) { m := new(Msg) m.SetUser(s2b("XXX")) if string(m.user) != "XXX" { t.Error(m.user, "!=", "XXX") } } func TestSetName(t *testing.T) { m := new(Msg) m.SetName(s2b("XXX")) if string(m.name) != "XXX" { t.Error(m.name, "!=", "XXX") } } func TestSetTrailing(t *testing.T) { m := new(Msg) m.SetTrailing(s2b("XXX")) if string(m.Trailing()) != "XXX" { t.Error(m.trailing, "!=", "XXX") } } func BenchmarkParseMessage_short(b *testing.B) { src := s2b("COMMAND arg1 :Message\r\n") m := new(Msg) b.ResetTimer() for i := 0; i < b.N; i++ { m.Data = src m.ParseAll() m.Reset() } } func BenchmarkParseMessage_medium(b *testing.B) { src := s2b(":Namename COMMAND arg6 arg7 :Message message message\r\n") m := new(Msg) b.ResetTimer() for i := 0; i < b.N; i++ { m.Data = src m.ParseAll() m.Reset() } } func BenchmarkParseMessage_long(b *testing.B) { src := s2b(":Namename!username@hostname COMMAND arg1 arg2 arg3 arg4 arg5 arg6 arg7 :Message message message message message\r\n") m := new(Msg) b.ResetTimer() for i := 0; i < b.N; i++ { m.Data = src m.ParseAll() m.Reset() } } var params [][]byte func BenchmarkParamsAlloc(b *testing.B) { src := s2b(":Namename!username@hostname COMMAND arg1 arg2 arg3 arg4 arg5 arg6 arg7 :Message message message message message\r\n") m := new(Msg) b.ResetTimer() for i := 0; i < b.N; i++ { m.Data = src m.PeekCmd() params = m.Params() m.Reset() } } func TestAppendParams(t *testing.T) { m := new(Msg) m.AppendParams(s2b("#ERR")) if m.paramsCount != 1 || string(m.params[0]) != "#ERR" { t.Error(m) } } func TestAppendParamsOverflow(t *testing.T) { n := bytes.Split(bytes.Repeat([]byte(" param"), 16)[1:], []byte{space}) m := new(Msg) m.prefixParsed = true m.SetCmd([]byte("hehe")) if err := m.SetParams(n...); err != nil || m.paramsCount != 16 { t.Error(err, n, m) } if err := m.AppendParams([]byte("$")); err == nil { t.Error(err, m, n) } } func TestSetParams(t *testing.T) { m := new(Msg) m.SetParams(s2b("1"), s2b("2")) if m.paramsCount != 2 || string(m.params[0]) != "1" || string(m.params[1]) != "2" { t.Error(m) } } func TestSetParamsOver(t *testing.T) { n := bytes.Split(bytes.Repeat([]byte("param "), 17), []byte{space}) m := new(Msg) m.prefixParsed = true m.SetCmd([]byte("hehe")) err := m.SetParams(n...) if err == nil || m.params[0] != nil { t.Error(m, n) } } add parsed test package irc import ( "bytes" "strings" "testing" ) func s2b(s string) []byte { return []byte(s) } var messageTests = [...]*struct { rawMsg string parsed *Msg hostmask bool server bool }{ { ":syrk!kalt@millennium.stealth.net QUIT :Gone to have lunch", &Msg{cmd: s2b("QUIT"), trailing: s2b("Gone to have lunch"), name: s2b("syrk"), user: s2b("kalt"), host: s2b("millennium.stealth.net"), }, true, false, }, { ":Trillian SQUIT cm22.eng.umd.edu :Server out of control", &Msg{cmd: s2b("SQUIT"), trailing: s2b("Server out of control"), name: s2b("Trillian"), user: nil, host: nil, params: [16][]byte{s2b("cm22.eng.umd.edu")}, }, false, true, }, { ":WiZ!jto@tolsun.oulu.fi PART #playzone :I lost", &Msg{cmd: s2b("PART"), trailing: s2b("I lost"), name: s2b("WiZ"), user: s2b("jto"), host: s2b("tolsun.oulu.fi"), params: [16][]byte{s2b("#playzone")}, }, true, false, }, { ":WiZ!jto@tolsun.oulu.fi MODE #eu-opers -l", &Msg{cmd: s2b("MODE"), trailing: nil, name: s2b("WiZ"), user: s2b("jto"), host: s2b("tolsun.oulu.fi"), params: [16][]byte{s2b("#eu-opers"), s2b("-l")}, }, true, false, }, { "MODE &oulu +b *!*@*.edu +e *!*@*.bu.edu", &Msg{cmd: s2b("MODE"), trailing: nil, name: nil, user: nil, host: nil, params: [16][]byte{s2b("&oulu"), s2b("+b"), s2b("*!*@*.edu"), s2b("+e"), s2b("*!*@*.bu.edu"), }, }, false, true, }, { "PRIVMSG #channel :Message with :colons!", &Msg{cmd: s2b("PRIVMSG"), trailing: s2b("Message with :colons!"), name: nil, user: nil, host: nil, params: [16][]byte{s2b("#channel")}, }, false, true, // TODO PRIVMSG should not be server }, { ":irc.vives.lan 251 test :There are 2 users and 0 services on 1 servers", &Msg{cmd: s2b("251"), trailing: s2b("There are 2 users and 0 services on 1 servers"), name: s2b("irc.vives.lan"), user: nil, host: nil, params: [16][]byte{s2b("test")}, }, false, true, }, { ":irc.vives.lan 376 test :End of MOTD command", &Msg{cmd: s2b("376"), trailing: s2b("End of MOTD command"), name: s2b("irc.vives.lan"), user: nil, host: nil, params: [16][]byte{s2b("test")}, }, false, true, }, { ":irc.vives.lan 250 test :Highest connection count: 1 (1 connections received)", &Msg{cmd: s2b("250"), trailing: s2b("Highest connection count: 1 (1 connections received)"), name: s2b("irc.vives.lan"), user: nil, host: nil, params: [16][]byte{s2b("test")}, }, false, true, }, { ":sorcix!~sorcix@sorcix.users.quakenet.org PRIVMSG #viveslan :\001ACTION is testing CTCP messages!\001", &Msg{cmd: s2b("PRIVMSG"), trailing: s2b("\001ACTION is testing CTCP messages!\001"), name: s2b("sorcix"), user: s2b("~sorcix"), host: s2b("sorcix.users.quakenet.org"), params: [16][]byte{s2b("#viveslan")}, }, true, false, }, { ":sorcix!~sorcix@sorcix.users.quakenet.org NOTICE midnightfox :\001PONG 1234567890\001", &Msg{cmd: s2b("NOTICE"), trailing: s2b("\001PONG 1234567890\001"), name: s2b("sorcix"), user: s2b("~sorcix"), host: s2b("sorcix.users.quakenet.org"), params: [16][]byte{s2b("midnightfox")}, }, true, false, }, { ":a!b@c QUIT", &Msg{cmd: s2b("QUIT"), trailing: nil, name: s2b("a"), user: s2b("b"), host: s2b("c"), params: [16][]byte{}, }, true, false, }, { ":a!b PRIVMSG :message", &Msg{cmd: s2b("PRIVMSG"), trailing: s2b("message"), name: s2b("a"), user: s2b("b"), host: nil, params: [16][]byte{}, }, false, false, }, { ":a@c NOTICE ::::Hey!", &Msg{cmd: s2b("NOTICE"), trailing: s2b(":::Hey!"), name: s2b("a"), user: nil, host: s2b("c"), params: [16][]byte{}, }, false, false, }, { ":nick PRIVMSG $@ :This message contains a\ttab!", &Msg{cmd: s2b("PRIVMSG"), trailing: s2b("This message contains a\ttab!"), name: s2b("nick"), user: nil, host: nil, params: [16][]byte{s2b("$@")}, }, false, true, // TODO }, { "TEST $@ param :Trailing", &Msg{cmd: s2b("TEST"), trailing: s2b("Trailing"), name: nil, user: nil, host: nil, params: [16][]byte{s2b("$@"), s2b("param")}, }, false, true, }, { "TOPIC #foo", &Msg{cmd: s2b("TOPIC"), trailing: nil, name: nil, user: nil, host: nil, params: [16][]byte{s2b("#foo")}, }, false, true, }, { ":name!user@example.org PRIVMSG #test :Message with spaces at the end! ", &Msg{cmd: s2b("PRIVMSG"), trailing: s2b("Message with spaces at the end! "), name: s2b("name"), user: s2b("user"), host: s2b("example.org"), params: [16][]byte{s2b("#test")}, }, true, false, }, } func TestInvaildMsg(t *testing.T) { invalid := []string{ ": PRIVMSG test :Invalid message with empty prefix.", ": PRIVMSG test :Invalid message with space prefix", } for _, s := range invalid { m, err := NewMsg(s2b(s)) if err == nil || m.Cmd() != nil { t.Error(s, "is valid") } } } func TestMsgCmd(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() p := z.parsed if err != nil || !bytes.Equal(m.Cmd(), p.cmd) { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } func TestMsgPrefixName(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() p := z.parsed if err != nil || !bytes.Equal(m.Name(), p.name) { t.Errorf("failed:%s\nparsed:%s\nm=%s p=%s", z.rawMsg, m.String(), m.name, p.name) } } } func TestMsgUser(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() p := z.parsed if err != nil || !bytes.Equal(m.User(), p.user) { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } func TestMsgHost(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() p := z.parsed if err != nil || !bytes.Equal(m.Host(), p.host) { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } func TestMsgTrailing(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() p := z.parsed if err != nil || !bytes.Equal(m.Trailing(), p.trailing) { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } func TestMsgParams(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() p := z.parsed if err != nil { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } var j int var zz []byte for j, zz = range z.parsed.params { if zz == nil { break } } for i := 0; i < j; i++ { if !bytes.Equal(m.Params()[i], p.params[i]) { t.Log(z, "i=", i, "j=", j, m.Params()[i], p.params[i]) t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } } func TestMsgServer(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() if err != nil || m.IsServer() != z.server { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } func TestMsgReset(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() m.Reset() if err != nil || m.params[0] != nil { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } func TestMsgHostMask(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() if err != nil || m.IsHostMask() != z.hostmask { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } func TestMsgString(t *testing.T) { for _, z := range messageTests { m, err := NewMsg(s2b(z.rawMsg)) m.ParseAll() if err != nil || strings.HasPrefix(m.String(), "CMD: ") { t.Errorf("failed:%s\nparsed:%s", z.rawMsg, m.String()) } } } func TestSetCmd(t *testing.T) { m := new(Msg) m.SetCmd(s2b("XXX")) if string(m.Cmd()) != "XXX" { t.Error(m.Cmd(), "!=", "XXX") } } func TestSetHost(t *testing.T) { m := new(Msg) m.SetHost(s2b("XXX")) if string(m.host) != "XXX" { t.Error(m.host, "!=", "XXX") } } func TestSetUser(t *testing.T) { m := new(Msg) m.SetUser(s2b("XXX")) if string(m.user) != "XXX" { t.Error(m.user, "!=", "XXX") } } func TestSetName(t *testing.T) { m := new(Msg) m.SetName(s2b("XXX")) if string(m.name) != "XXX" { t.Error(m.name, "!=", "XXX") } } func TestSetTrailing(t *testing.T) { m := new(Msg) m.SetTrailing(s2b("XXX")) if string(m.Trailing()) != "XXX" { t.Error(m.trailing, "!=", "XXX") } } func BenchmarkParseMessage_short(b *testing.B) { src := s2b("COMMAND arg1 :Message\r\n") m := new(Msg) b.ResetTimer() for i := 0; i < b.N; i++ { m.Data = src m.ParseAll() m.Reset() } } func BenchmarkParseMessage_medium(b *testing.B) { src := s2b(":Namename COMMAND arg6 arg7 :Message message message\r\n") m := new(Msg) b.ResetTimer() for i := 0; i < b.N; i++ { m.Data = src m.ParseAll() m.Reset() } } func BenchmarkParseMessage_long(b *testing.B) { src := s2b(":Namename!username@hostname COMMAND arg1 arg2 arg3 arg4 arg5 arg6 arg7 :Message message message message message\r\n") m := new(Msg) b.ResetTimer() for i := 0; i < b.N; i++ { m.Data = src m.ParseAll() m.Reset() } } var params [][]byte func BenchmarkParamsAlloc(b *testing.B) { src := s2b(":Namename!username@hostname COMMAND arg1 arg2 arg3 arg4 arg5 arg6 arg7 :Message message message message message\r\n") m := new(Msg) b.ResetTimer() for i := 0; i < b.N; i++ { m.Data = src m.PeekCmd() params = m.Params() m.Reset() } } func TestAppendParams(t *testing.T) { m := new(Msg) m.AppendParams(s2b("#ERR")) if m.paramsCount != 1 || string(m.params[0]) != "#ERR" { t.Error(m) } } func TestAppendParamsOverflow(t *testing.T) { n := bytes.Split(bytes.Repeat([]byte(" param"), 16)[1:], []byte{space}) m := new(Msg) m.prefixParsed = true m.SetCmd([]byte("hehe")) if err := m.SetParams(n...); err != nil || m.paramsCount != 16 { t.Error(err, n, m) } if err := m.AppendParams([]byte("$")); err == nil { t.Error(err, m, n) } } func TestSetParams(t *testing.T) { m := new(Msg) m.SetParams(s2b("1"), s2b("2")) if m.paramsCount != 2 || string(m.params[0]) != "1" || string(m.params[1]) != "2" { t.Error(m) } } func TestSetParamsOver(t *testing.T) { n := bytes.Split(bytes.Repeat([]byte("param "), 17), []byte{space}) m := new(Msg) m.prefixParsed = true m.SetCmd([]byte("hehe")) err := m.SetParams(n...) if err == nil || m.params[0] != nil { t.Error(m, n) } }
package main import "fmt" import "os" import "net/http" import "io/ioutil" import "log" import "github.com/moovweb/gokogiri" import "github.com/moovweb/gokogiri/xml" import "github.com/moovweb/gokogiri/xpath" import "github.com/codegangsta/cli" func load(url string, fromFile bool) *xml.XmlDocument{ var data []byte if fromFile{ file, _ := os.Open(url) data, _ = ioutil.ReadAll(file) } else { resp, _ := http.Get(url) data, _ = ioutil.ReadAll(resp.Body) } doc, _ := gokogiri.ParseXml(data) return(doc) } func itf(vrouter string) { url := "http://" + vrouter + ":8085/Snh_ItfReq" var doc = load(url, false) xps := xpath.Compile("//name/text()") ss, _ := doc.Root().Search(xps) for _, s := range ss { fmt.Printf("%s\n", s) } } func multiple(vrouter string, vrf_name string) { url := "http://" + vrouter + ":8085" + "/Snh_PageReq?x=begin:-1,end:-1,table:" + vrf_name + ".uc.route.0," var doc = load(url, false) defer doc.Free() xps := xpath.Compile("//route_list/list/RouteUcSandeshData/path_list/list/PathSandeshData/nh/NhSandeshData/mc_list/../../../../../../src_ip/text()") ss, _ := doc.Root().Search(xps) for _, s := range ss { fmt.Printf("%s\n", s) } } func vrf(vrouter string) { var url = "http://" + vrouter + ":8085" + "/Snh_VrfListReq" var doc = load(url, false) defer doc.Free() xps := xpath.Compile("//vrf_list/list//name/text()") ss, _ := doc.Root().Search(xps) for _, s := range ss { fmt.Printf("%s\n", s) } } func routeFromFile(filePath string) Collection { var doc = load(filePath, true) return route(doc) } func routeFromUrl(vrouter string, vrfName string) Collection { var url = "http://" + vrouter + ":8085" + "/Snh_PageReq?x=begin:-1,end:-1,table:" + vrfName + ".uc.route.0," var doc = load(url, false) return route(doc) } func route(doc *xml.XmlDocument) Collection { ss, err := doc.Root().Search("/__Inet4UcRouteResp_list/Inet4UcRouteResp/route_list/list") if err != nil { log.Fatal(err) } col := Collection{node: ss[0]} return col } type Collection struct { doc *xml.XmlDocument node xml.Node } func routeList(col Collection) { ss, _ := col.node.Search("RouteUcSandeshData/src_ip/text()") for _, s := range ss { fmt.Printf("%s\n", s) } } func routeGet(c Collection, srcIp string) xml.Node { route, _ := c.node.Search("RouteUcSandeshData/src_ip[text()='" + srcIp + "']/..") return route[0] } func routeDetail(n xml.Node) { srcIp, _ := n.Search("src_ip/text()") fmt.Printf("%s\n", srcIp[0]) paths, _ := n.Search("path_list/list/PathSandeshData") for _, path := range paths { nhs, _ := path.Search("nh/NhSandeshData//dip/text()") peers, _ := path.Search("peer/text()") label, _ := path.Search("label/text()") itf, _ := path.Search("nh/NhSandeshData/itf/text()") fmt.Printf(" %s %s %s %s\n", nhs, peers, label, itf) } } func main() { var vrouter string; var showAsXml bool; app := cli.NewApp() app.Name = "contrail-introspect-cli" app.Usage = "CLI on contrail introspects" app.Flags = []cli.Flag{ cli.BoolFlag{ Name: "format-xml", Destination: &showAsXml, }, } app.Commands = []cli.Command{ { Name: "itf", Aliases: []string{"a"}, Usage: "list interfaces", Flags: []cli.Flag{ cli.StringFlag{ Name: "vrouter", Destination: &vrouter, }, }, Action: func(c *cli.Context) { if c.NArg() != 1 { log.Fatal("Wrong argument number!") } vrouter := c.Args()[0] itf(vrouter) }, }, { Name: "multiple", Usage: "vrouter vrf_name", Action: func(c *cli.Context) { if c.NArg() != 2 { log.Fatal("Wrong argument number!") } vrouter := c.Args()[0] vrf_name := c.Args()[1] multiple(vrouter, vrf_name) }, }, { Name: "vrf", Usage: "vrf <vrouterUrl>", Action: func(c *cli.Context) { if c.NArg() != 1 { log.Fatal("Wrong argument number!") } vrouter := c.Args()[0] vrf(vrouter) }, }, { Name: "route", Usage: "route <vrouterUrl> <vrfName> [<srcIp>]", Action: func(c *cli.Context) { col := routeFromUrl(c.Args()[0], c.Args()[1]) switch c.NArg() { case 2: if showAsXml { fmt.Printf("%s\n", col.node) return } routeList(col) case 3: route := routeGet(col, c.Args()[2]) if showAsXml { fmt.Printf("%s\n", route) return } routeDetail(route) } }, }, { Name: "route-from-file", Action: func(c *cli.Context) { col := routeFromFile(c.Args()[0]) switch c.NArg() { case 1: if showAsXml { fmt.Printf("%s\n", col.node) return } routeList(col) case 2: route := routeGet(col, c.Args()[1]) if showAsXml { fmt.Printf("%s\n", route) return } routeDetail(route) } }, }, } app.Run(os.Args) } Error if route not found package main import "fmt" import "os" import "net/http" import "io/ioutil" import "log" import "github.com/moovweb/gokogiri" import "github.com/moovweb/gokogiri/xml" import "github.com/moovweb/gokogiri/xpath" import "github.com/codegangsta/cli" func load(url string, fromFile bool) *xml.XmlDocument{ var data []byte if fromFile{ file, _ := os.Open(url) data, _ = ioutil.ReadAll(file) } else { resp, _ := http.Get(url) data, _ = ioutil.ReadAll(resp.Body) } doc, _ := gokogiri.ParseXml(data) return(doc) } func itf(vrouter string) { url := "http://" + vrouter + ":8085/Snh_ItfReq" var doc = load(url, false) xps := xpath.Compile("//name/text()") ss, _ := doc.Root().Search(xps) for _, s := range ss { fmt.Printf("%s\n", s) } } func multiple(vrouter string, vrf_name string) { url := "http://" + vrouter + ":8085" + "/Snh_PageReq?x=begin:-1,end:-1,table:" + vrf_name + ".uc.route.0," var doc = load(url, false) defer doc.Free() xps := xpath.Compile("//route_list/list/RouteUcSandeshData/path_list/list/PathSandeshData/nh/NhSandeshData/mc_list/../../../../../../src_ip/text()") ss, _ := doc.Root().Search(xps) for _, s := range ss { fmt.Printf("%s\n", s) } } func vrf(vrouter string) { var url = "http://" + vrouter + ":8085" + "/Snh_VrfListReq" var doc = load(url, false) defer doc.Free() xps := xpath.Compile("//vrf_list/list//name/text()") ss, _ := doc.Root().Search(xps) for _, s := range ss { fmt.Printf("%s\n", s) } } func routeFromFile(filePath string) Collection { var doc = load(filePath, true) return route(doc) } func routeFromUrl(vrouter string, vrfName string) Collection { var url = "http://" + vrouter + ":8085" + "/Snh_PageReq?x=begin:-1,end:-1,table:" + vrfName + ".uc.route.0," var doc = load(url, false) return route(doc) } func route(doc *xml.XmlDocument) Collection { ss, err := doc.Root().Search("/__Inet4UcRouteResp_list/Inet4UcRouteResp/route_list/list") if err != nil { log.Fatal(err) } col := Collection{node: ss[0]} return col } type Collection struct { doc *xml.XmlDocument node xml.Node } func routeList(col Collection) { ss, _ := col.node.Search("RouteUcSandeshData/src_ip/text()") for _, s := range ss { fmt.Printf("%s\n", s) } } func routeGet(c Collection, srcIp string) xml.Node { route, _ := c.node.Search("RouteUcSandeshData/src_ip[text()='" + srcIp + "']/..") if len(route) == 0 { log.Fatal("Route to " + srcIp + " was not found") } return route[0] } func routeDetail(n xml.Node) { srcIp, _ := n.Search("src_ip/text()") fmt.Printf("%s\n", srcIp[0]) paths, _ := n.Search("path_list/list/PathSandeshData") for _, path := range paths { nhs, _ := path.Search("nh/NhSandeshData//dip/text()") peers, _ := path.Search("peer/text()") label, _ := path.Search("label/text()") itf, _ := path.Search("nh/NhSandeshData/itf/text()") fmt.Printf(" %s %s %s %s\n", nhs, peers, label, itf) } } func main() { var vrouter string; var showAsXml bool; app := cli.NewApp() app.Name = "contrail-introspect-cli" app.Usage = "CLI on contrail introspects" app.Flags = []cli.Flag{ cli.BoolFlag{ Name: "format-xml", Destination: &showAsXml, }, } app.Commands = []cli.Command{ { Name: "itf", Aliases: []string{"a"}, Usage: "list interfaces", Flags: []cli.Flag{ cli.StringFlag{ Name: "vrouter", Destination: &vrouter, }, }, Action: func(c *cli.Context) { if c.NArg() != 1 { log.Fatal("Wrong argument number!") } vrouter := c.Args()[0] itf(vrouter) }, }, { Name: "multiple", Usage: "vrouter vrf_name", Action: func(c *cli.Context) { if c.NArg() != 2 { log.Fatal("Wrong argument number!") } vrouter := c.Args()[0] vrf_name := c.Args()[1] multiple(vrouter, vrf_name) }, }, { Name: "vrf", Usage: "vrf <vrouterUrl>", Action: func(c *cli.Context) { if c.NArg() != 1 { log.Fatal("Wrong argument number!") } vrouter := c.Args()[0] vrf(vrouter) }, }, { Name: "route", Usage: "route <vrouterUrl> <vrfName> [<srcIp>]", Action: func(c *cli.Context) { col := routeFromUrl(c.Args()[0], c.Args()[1]) switch c.NArg() { case 2: if showAsXml { fmt.Printf("%s\n", col.node) return } routeList(col) case 3: route := routeGet(col, c.Args()[2]) if showAsXml { fmt.Printf("%s\n", route) return } routeDetail(route) } }, }, { Name: "route-from-file", Action: func(c *cli.Context) { col := routeFromFile(c.Args()[0]) switch c.NArg() { case 1: if showAsXml { fmt.Printf("%s\n", col.node) return } routeList(col) case 2: route := routeGet(col, c.Args()[1]) if showAsXml { fmt.Printf("%s\n", route) return } routeDetail(route) } }, }, } app.Run(os.Args) }
package clusterprovisioner import ( "context" "fmt" "net/url" "path" "reflect" "sort" "strings" "time" "github.com/mitchellh/mapstructure" "github.com/rancher/kontainer-engine/drivers/rke" "github.com/rancher/kontainer-engine/service" "github.com/rancher/norman/controller" "github.com/rancher/norman/types/convert" "github.com/rancher/norman/types/slice" "github.com/rancher/norman/types/values" util "github.com/rancher/rancher/pkg/cluster" kd "github.com/rancher/rancher/pkg/controllers/management/kontainerdrivermetadata" "github.com/rancher/rancher/pkg/ref" "github.com/rancher/rancher/pkg/rkedialerfactory" "github.com/rancher/rancher/pkg/settings" "github.com/rancher/rke/services" v3 "github.com/rancher/types/apis/management.cattle.io/v3" "github.com/rancher/types/config" "github.com/sirupsen/logrus" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/flowcontrol" ) const ( RKEDriverKey = "rancherKubernetesEngineConfig" KontainerEngineUpdate = "provisioner.cattle.io/ke-driver-update" ) type Provisioner struct { ClusterController v3.ClusterController Clusters v3.ClusterInterface NodeLister v3.NodeLister engineService *service.EngineService backoff *flowcontrol.Backoff KontainerDriverLister v3.KontainerDriverLister DynamicSchemasLister v3.DynamicSchemaLister Backups v3.EtcdBackupLister RKESystemImages v3.RKEK8sSystemImageInterface RKESystemImagesLister v3.RKEK8sSystemImageLister } func Register(ctx context.Context, management *config.ManagementContext) { p := &Provisioner{ engineService: service.NewEngineService(NewPersistentStore(management.Core.Namespaces(""), management.Core)), Clusters: management.Management.Clusters(""), ClusterController: management.Management.Clusters("").Controller(), NodeLister: management.Management.Nodes("").Controller().Lister(), backoff: flowcontrol.NewBackOff(30*time.Second, 10*time.Minute), KontainerDriverLister: management.Management.KontainerDrivers("").Controller().Lister(), DynamicSchemasLister: management.Management.DynamicSchemas("").Controller().Lister(), Backups: management.Management.EtcdBackups("").Controller().Lister(), RKESystemImagesLister: management.Management.RKEK8sSystemImages("").Controller().Lister(), RKESystemImages: management.Management.RKEK8sSystemImages(""), } // Add handlers p.Clusters.AddLifecycle(ctx, "cluster-provisioner-controller", p) management.Management.Nodes("").AddHandler(ctx, "cluster-provisioner-controller", p.machineChanged) local := &rkedialerfactory.RKEDialerFactory{ Factory: management.Dialer, } docker := &rkedialerfactory.RKEDialerFactory{ Factory: management.Dialer, Docker: true, } driver := service.Drivers[service.RancherKubernetesEngineDriverName] rkeDriver := driver.(*rke.Driver) rkeDriver.DockerDialer = docker.Build rkeDriver.LocalDialer = local.Build rkeDriver.WrapTransportFactory = docker.WrapTransport mgmt := management.Management rkeDriver.DataStore = NewDataStore(mgmt.RKEAddons("").Controller().Lister(), mgmt.RKEAddons(""), mgmt.RKEK8sServiceOptions("").Controller().Lister(), mgmt.RKEK8sServiceOptions(""), mgmt.RKEK8sSystemImages("").Controller().Lister(), mgmt.RKEK8sSystemImages("")) } func (p *Provisioner) Remove(cluster *v3.Cluster) (runtime.Object, error) { logrus.Infof("Deleting cluster [%s]", cluster.Name) if skipLocalAndImported(cluster) || cluster.Status.Driver == "" { return nil, nil } for i := 0; i < 4; i++ { // cluster will be forcefully removed on last attempt err := p.driverRemove(cluster, i == 3) if err == nil { break } if i == 3 { return cluster, fmt.Errorf("failed to remove the cluster [%s]: %v", cluster.Name, err) } time.Sleep(1 * time.Second) } logrus.Infof("Deleted cluster [%s]", cluster.Name) // cluster object will definitely have changed, reload return p.Clusters.Get(cluster.Name, metav1.GetOptions{}) } func (p *Provisioner) Updated(cluster *v3.Cluster) (runtime.Object, error) { obj, err := v3.ClusterConditionUpdated.Do(cluster, func() (runtime.Object, error) { anno, _ := cluster.Annotations[KontainerEngineUpdate] if anno == "updated" { // Cluster has already been updated proceed as usual setVersion(cluster) return p.update(cluster, false) } else if strings.HasPrefix(anno, "updating/") { // Check if it's been updating for more than 20 seconds, this lets // the controller take over attempting to update the cluster pieces := strings.Split(anno, "/") t, err := time.Parse(time.RFC3339, pieces[1]) if err != nil || int(time.Since(t)/time.Second) > 20 { cluster.Annotations[KontainerEngineUpdate] = "updated" return p.Clusters.Update(cluster) } // Go routine is already running to update the cluster so wait return nil, nil } // Set the annotation and kickoff the update c, err := p.setKontainerEngineUpdate(cluster, "updating") if err != nil { return cluster, err } go p.waitForSchema(c) return nil, nil }) return obj.(*v3.Cluster), err } // waitForSchema waits for the driver and schema to be populated for the cluster func (p *Provisioner) waitForSchema(cluster *v3.Cluster) { var driver string if cluster.Spec.GenericEngineConfig == nil { if cluster.Spec.AmazonElasticContainerServiceConfig != nil { driver = "amazonelasticcontainerservice" } if cluster.Spec.AzureKubernetesServiceConfig != nil { driver = "azurekubernetesservice" } if cluster.Spec.GoogleKubernetesEngineConfig != nil { driver = "googlekubernetesengine" } } else { if d, ok := (*cluster.Spec.GenericEngineConfig)["driverName"]; ok { driver = d.(string) } } if driver != "" { var schemaName string backoff := wait.Backoff{ Duration: 2 * time.Second, Factor: 1, Jitter: 0, Steps: 7, } err := wait.ExponentialBackoff(backoff, func() (bool, error) { driver, err := p.KontainerDriverLister.Get("", driver) if err != nil { if !apierrors.IsNotFound(err) { return false, err } return false, nil } if driver.Spec.BuiltIn { schemaName = driver.Status.DisplayName + "Config" } else { schemaName = driver.Status.DisplayName + "EngineConfig" } _, err = p.DynamicSchemasLister.Get("", strings.ToLower(schemaName)) if err != nil { if !apierrors.IsNotFound(err) { return false, err } return false, nil } return true, nil }) if err != nil { logrus.Warnf("[cluster-provisioner-controller] Failed to find driver %v and schema %v for cluster %v on upgrade: %v", driver, schemaName, cluster.Name, err) } } _, err := p.setKontainerEngineUpdate(cluster, "updated") if err != nil { logrus.Warnf("[cluster-provisioner-controller] Failed to set annotation on cluster %v on upgrade: %v", cluster.Name, err) } p.ClusterController.Enqueue(cluster.Namespace, cluster.Name) } func (p *Provisioner) setKontainerEngineUpdate(cluster *v3.Cluster, anno string) (*v3.Cluster, error) { backoff := wait.Backoff{ Duration: 500 * time.Millisecond, Factor: 1, Jitter: 0, Steps: 6, } err := wait.ExponentialBackoff(backoff, func() (bool, error) { newCluster, err := p.Clusters.Get(cluster.Name, metav1.GetOptions{}) if err != nil { if !apierrors.IsNotFound(err) { return false, err } return false, nil } if anno == "updating" { // Add a timestamp for comparison since this anno was added anno = anno + "/" + time.Now().Format(time.RFC3339) } newCluster.Annotations[KontainerEngineUpdate] = anno newCluster, err = p.Clusters.Update(newCluster) if err != nil { if apierrors.IsConflict(err) { return false, nil } return false, err } cluster = newCluster return true, nil }) if err != nil { return cluster, fmt.Errorf("[setKontainerEngineUpdate] Failed to update cluster [%s]: %v", cluster.Name, err) } return cluster, nil } func setVersion(cluster *v3.Cluster) { if cluster.Spec.RancherKubernetesEngineConfig != nil { if cluster.Spec.RancherKubernetesEngineConfig.Version == "" { //set version from the applied spec if cluster.Status.AppliedSpec.RancherKubernetesEngineConfig != nil { if cluster.Status.AppliedSpec.RancherKubernetesEngineConfig.Version != "" { cluster.Spec.RancherKubernetesEngineConfig.Version = cluster.Status.AppliedSpec.RancherKubernetesEngineConfig.Version } else { cluster.Spec.RancherKubernetesEngineConfig.Version = settings.KubernetesVersion.Get() } } } } else if cluster.Spec.AmazonElasticContainerServiceConfig != nil { if cluster.Status.Version != nil { setConfigVersion := func(config *v3.MapStringInterface) { v, found := values.GetValue(*config, "kubernetesVersion") if !found || convert.ToString(v) == "" && cluster.Status.Version != nil && cluster.Status.Version.Major != "" && len(cluster.Status.Version.Minor) > 1 { values.PutValue(*config, fmt.Sprintf("%s.%s", cluster.Status.Version.Major, cluster.Status.Version.Minor[:2]), "kubernetesVersion") } } // during upgrade it is possible genericEngineConfig has not been set if newConfig := cluster.Spec.AmazonElasticContainerServiceConfig; newConfig != nil { setConfigVersion(newConfig) } if oldConfig := cluster.Status.AppliedSpec.AmazonElasticContainerServiceConfig; oldConfig != nil { setConfigVersion(oldConfig) } } } } func (p *Provisioner) update(cluster *v3.Cluster, create bool) (*v3.Cluster, error) { cluster, err := p.reconcileCluster(cluster, create) if err != nil { return cluster, err } v3.ClusterConditionProvisioned.True(cluster) v3.ClusterConditionProvisioned.Message(cluster, "") v3.ClusterConditionProvisioned.Reason(cluster, "") v3.ClusterConditionPending.True(cluster) if cluster.Spec.RancherKubernetesEngineConfig != nil || cluster.Spec.GenericEngineConfig != nil { return cluster, nil } err = k3sClusterConfig(cluster) if err != nil { return cluster, err } return cluster, nil } func (p *Provisioner) machineChanged(key string, machine *v3.Node) (runtime.Object, error) { parts := strings.SplitN(key, "/", 2) p.ClusterController.Enqueue("", parts[0]) return machine, nil } func (p *Provisioner) Create(cluster *v3.Cluster) (runtime.Object, error) { var err error // Initialize conditions, be careful to not continually update them v3.ClusterConditionPending.CreateUnknownIfNotExists(cluster) v3.ClusterConditionProvisioned.CreateUnknownIfNotExists(cluster) if v3.ClusterConditionWaiting.GetStatus(cluster) == "" { v3.ClusterConditionWaiting.Unknown(cluster) } if v3.ClusterConditionWaiting.GetMessage(cluster) == "" { v3.ClusterConditionWaiting.Message(cluster, "Waiting for API to be available") } cluster, err = p.pending(cluster) if err != nil { return cluster, err } return p.provision(cluster) } func (p *Provisioner) provision(cluster *v3.Cluster) (*v3.Cluster, error) { obj, err := v3.ClusterConditionProvisioned.Do(cluster, func() (runtime.Object, error) { return p.update(cluster, true) }) return obj.(*v3.Cluster), err } func (p *Provisioner) pending(cluster *v3.Cluster) (*v3.Cluster, error) { if skipLocalAndImported(cluster) { return cluster, nil } driver, err := p.validateDriver(cluster) if err != nil { return cluster, err } if driver == "" { return cluster, &controller.ForgetError{ Err: fmt.Errorf("waiting for full cluster configuration"), Reason: "Pending"} } if driver != cluster.Status.Driver { cluster.Status.Driver = driver if driver == v3.ClusterDriverRKE && cluster.Spec.RancherKubernetesEngineConfig == nil { cluster.Spec.RancherKubernetesEngineConfig = &v3.RancherKubernetesEngineConfig{} } return p.Clusters.Update(cluster) } return cluster, nil } func (p *Provisioner) backoffFailure(cluster *v3.Cluster, spec *v3.ClusterSpec) (bool, time.Duration) { if cluster.Status.FailedSpec == nil { return false, 0 } if !reflect.DeepEqual(cluster.Status.FailedSpec, spec) { return false, 0 } if p.backoff.IsInBackOffSinceUpdate(cluster.Name, time.Now()) { go func() { time.Sleep(p.backoff.Get(cluster.Name)) p.ClusterController.Enqueue("", cluster.Name) }() return true, p.backoff.Get(cluster.Name) } return false, 0 } func (p *Provisioner) reconcileCluster(cluster *v3.Cluster, create bool) (*v3.Cluster, error) { if skipLocalAndImported(cluster) { return cluster, nil } var ( apiEndpoint, serviceAccountToken, caCert string err error ) if cluster.Name != "local" && !v3.ClusterConditionServiceAccountMigrated.IsTrue(cluster) && v3.ClusterConditionProvisioned.IsTrue(cluster) { driverName, err := p.validateDriver(cluster) if err != nil { return nil, err } spec, _, err := p.getConfig(true, cluster.Spec, driverName, cluster.Name) if err != nil { return nil, err } serviceAccountToken, err = p.generateServiceAccount(cluster, *spec) if err != nil { return nil, err } cluster.Status.ServiceAccountToken = serviceAccountToken v3.ClusterConditionServiceAccountMigrated.True(cluster) // Update the cluster in k8s cluster, err = p.Clusters.Update(cluster) if err != nil { return nil, err } err = p.removeLegacyServiceAccount(cluster, *spec) if err != nil { return nil, err } } p.setGenericConfigs(cluster) spec, err := p.getSpec(cluster) if err != nil || spec == nil { return cluster, err } if ok, delay := p.backoffFailure(cluster, spec); ok { return cluster, &controller.ForgetError{Err: fmt.Errorf("backing off failure, delay: %v", delay)} } logrus.Infof("Provisioning cluster [%s]", cluster.Name) var updateTriggered bool if create { logrus.Infof("Creating cluster [%s]", cluster.Name) // setting updateTriggered to true since rke up will be called on cluster create updateTriggered = true apiEndpoint, serviceAccountToken, caCert, err = p.driverCreate(cluster, *spec) if err != nil && err.Error() == "cluster already exists" { logrus.Infof("Create done, Updating cluster [%s]", cluster.Name) apiEndpoint, serviceAccountToken, caCert, updateTriggered, err = p.driverUpdate(cluster, *spec) } } else if spec.RancherKubernetesEngineConfig != nil && spec.RancherKubernetesEngineConfig.Restore.Restore { logrus.Infof("Restoring cluster [%s] from backup", cluster.Name) apiEndpoint, serviceAccountToken, caCert, err = p.restoreClusterBackup(cluster, *spec) } else if spec.RancherKubernetesEngineConfig != nil && spec.RancherKubernetesEngineConfig.RotateCertificates != nil { logrus.Infof("Rotating certificates for cluster [%s]", cluster.Name) apiEndpoint, serviceAccountToken, caCert, updateTriggered, err = p.driverUpdate(cluster, *spec) } else { logrus.Infof("Updating cluster [%s]", cluster.Name) // Attempt to manually trigger updating, otherwise it will not be triggered until after exiting reconcile v3.ClusterConditionUpdated.Unknown(cluster) cluster, err = p.Clusters.Update(cluster) if err != nil { return cluster, fmt.Errorf("[reconcileCluster] Failed to update cluster [%s]: %v", cluster.Name, err) } apiEndpoint, serviceAccountToken, caCert, updateTriggered, err = p.driverUpdate(cluster, *spec) } // at this point we know the cluster has been modified in driverCreate/Update so reload if newCluster, reloadErr := p.Clusters.Get(cluster.Name, metav1.GetOptions{}); reloadErr == nil { cluster = newCluster } cluster, recordErr := p.recordFailure(cluster, *spec, err) if recordErr != nil { return cluster, recordErr } // for here out we want to always return the cluster, not just nil, so that the error can be properly // recorded if needs be if err != nil { return cluster, err } err = p.removeLegacyServiceAccount(cluster, *spec) if err != nil { return nil, err } v3.ClusterConditionServiceAccountMigrated.True(cluster) saved := false for i := 0; i < 20; i++ { cluster, err = p.Clusters.Get(cluster.Name, metav1.GetOptions{}) if err != nil { return cluster, err } censoredSpec, err := p.censorGenericEngineConfig(*spec) if err != nil { return cluster, err } cluster.Status.AppliedSpec = censoredSpec cluster.Status.APIEndpoint = apiEndpoint cluster.Status.ServiceAccountToken = serviceAccountToken cluster.Status.CACert = caCert resetRkeConfigFlags(cluster, updateTriggered) if cluster, err = p.Clusters.Update(cluster); err == nil { saved = true break } else { logrus.Errorf("failed to update cluster [%s]: %v", cluster.Name, err) time.Sleep(2) } } if !saved { return cluster, fmt.Errorf("failed to update cluster") } logrus.Infof("Provisioned cluster [%s]", cluster.Name) return cluster, nil } func (p *Provisioner) setGenericConfigs(cluster *v3.Cluster) { if cluster.Spec.GenericEngineConfig == nil || cluster.Status.AppliedSpec.GenericEngineConfig == nil { setGenericConfig := func(spec *v3.ClusterSpec) { if spec.GenericEngineConfig == nil { if spec.AmazonElasticContainerServiceConfig != nil { spec.GenericEngineConfig = spec.AmazonElasticContainerServiceConfig (*spec.GenericEngineConfig)["driverName"] = "amazonelasticcontainerservice" spec.AmazonElasticContainerServiceConfig = nil } if spec.AzureKubernetesServiceConfig != nil { spec.GenericEngineConfig = spec.AzureKubernetesServiceConfig (*spec.GenericEngineConfig)["driverName"] = "azurekubernetesservice" spec.AzureKubernetesServiceConfig = nil } if spec.GoogleKubernetesEngineConfig != nil { spec.GenericEngineConfig = spec.GoogleKubernetesEngineConfig (*spec.GenericEngineConfig)["driverName"] = "googlekubernetesengine" spec.GoogleKubernetesEngineConfig = nil } } } setGenericConfig(&cluster.Spec) setGenericConfig(&cluster.Status.AppliedSpec) } } func resetRkeConfigFlags(cluster *v3.Cluster, updateTriggered bool) { if cluster.Spec.RancherKubernetesEngineConfig != nil { cluster.Spec.RancherKubernetesEngineConfig.RotateCertificates = nil cluster.Spec.RancherKubernetesEngineConfig.Restore = v3.RestoreConfig{} if cluster.Status.AppliedSpec.RancherKubernetesEngineConfig != nil { cluster.Status.AppliedSpec.RancherKubernetesEngineConfig.RotateCertificates = nil cluster.Status.AppliedSpec.RancherKubernetesEngineConfig.Restore = v3.RestoreConfig{} } if !updateTriggered { return } if cluster.Status.Capabilities.TaintSupport == nil || !*cluster.Status.Capabilities.TaintSupport { supportsTaints := true cluster.Status.Capabilities.TaintSupport = &supportsTaints } } } func copyMap(toCopy v3.MapStringInterface) v3.MapStringInterface { newMap := v3.MapStringInterface{} for k, v := range toCopy { newMap[k] = v } return newMap } func (p *Provisioner) censorGenericEngineConfig(input v3.ClusterSpec) (v3.ClusterSpec, error) { if input.GenericEngineConfig == nil { // nothing to do return input, nil } config := copyMap(*input.GenericEngineConfig) driverName, ok := config[DriverNameField].(string) if !ok { // can't figure out driver type so blank out the whole thing logrus.Warnf("cluster %v has a generic engine config but no driver type field; can't hide password "+ "fields so removing the entire config", input.DisplayName) input.GenericEngineConfig = nil return input, nil } driver, err := p.KontainerDriverLister.Get("", driverName) if err != nil { return v3.ClusterSpec{}, err } var schemaName string if driver.Spec.BuiltIn { schemaName = driver.Status.DisplayName + "Config" } else { schemaName = driver.Status.DisplayName + "EngineConfig" } kontainerDriverSchema, err := p.DynamicSchemasLister.Get("", strings.ToLower(schemaName)) if err != nil { return v3.ClusterSpec{}, fmt.Errorf("error getting dynamic schema %v", err) } for key := range config { field := kontainerDriverSchema.Spec.ResourceFields[key] if field.Type == "password" { delete(config, key) } } input.GenericEngineConfig = &config return input, nil } func skipLocalAndImported(cluster *v3.Cluster) bool { return cluster.Status.Driver == v3.ClusterDriverLocal || cluster.Status.Driver == v3.ClusterDriverImported || cluster.Status.Driver == v3.ClusterDriverK3s } func (p *Provisioner) getConfig(reconcileRKE bool, spec v3.ClusterSpec, driverName, clusterName string) (*v3.ClusterSpec, interface{}, error) { var v interface{} if spec.GenericEngineConfig == nil { if spec.RancherKubernetesEngineConfig != nil { var err error v, err = convert.EncodeToMap(spec.RancherKubernetesEngineConfig) if err != nil { return nil, nil, err } } else { v = map[string]interface{}{} } } else { v = *spec.GenericEngineConfig } if driverName == v3.ClusterDriverRKE && reconcileRKE { nodes, err := p.reconcileRKENodes(clusterName) if err != nil { return nil, nil, err } systemImages, err := p.getSystemImages(spec) if err != nil { return nil, nil, err } rkeCopy := *spec.RancherKubernetesEngineConfig spec.RancherKubernetesEngineConfig = &rkeCopy spec.RancherKubernetesEngineConfig.Nodes = nodes spec.RancherKubernetesEngineConfig.SystemImages = *systemImages data, _ := convert.EncodeToMap(spec) v, _ = data[RKEDriverKey] } return &spec, v, nil } func GetDriver(cluster *v3.Cluster, driverLister v3.KontainerDriverLister) (string, error) { var driver *v3.KontainerDriver var err error if cluster.Spec.GenericEngineConfig != nil { kontainerDriverName := (*cluster.Spec.GenericEngineConfig)["driverName"].(string) driver, err = driverLister.Get("", kontainerDriverName) if err != nil { return "", err } } if cluster.Spec.RancherKubernetesEngineConfig != nil { return v3.ClusterDriverRKE, nil } if driver == nil { return "", nil } return driver.Status.DisplayName, nil } func (p *Provisioner) validateDriver(cluster *v3.Cluster) (string, error) { oldDriver := cluster.Status.Driver if oldDriver == v3.ClusterDriverImported { return v3.ClusterDriverImported, nil } newDriver, err := GetDriver(cluster, p.KontainerDriverLister) if err != nil { return "", err } if oldDriver == "" && newDriver == "" { return newDriver, nil } if oldDriver == "" { return newDriver, nil } if newDriver == "" { return "", &controller.ForgetError{ Err: fmt.Errorf("waiting for nodes"), Reason: "Pending", } } if oldDriver != newDriver { return newDriver, fmt.Errorf("driver change from %s to %s not allowed", oldDriver, newDriver) } return newDriver, nil } func (p *Provisioner) getSystemImages(spec v3.ClusterSpec) (*v3.RKESystemImages, error) { // fetch system images from settings version := spec.RancherKubernetesEngineConfig.Version systemImages, err := kd.GetRKESystemImages(version, p.RKESystemImagesLister, p.RKESystemImages) if err != nil { return nil, fmt.Errorf("failed to find system images for version %s: %v", version, err) } privateRegistry := util.GetPrivateRepoURL(&v3.Cluster{Spec: spec}) if privateRegistry == "" { return &systemImages, nil } // prepend private repo imagesMap, err := convert.EncodeToMap(systemImages) if err != nil { return nil, err } updatedMap := make(map[string]interface{}) for key, value := range imagesMap { newValue := fmt.Sprintf("%s/%s", privateRegistry, value) updatedMap[key] = newValue } if err := mapstructure.Decode(updatedMap, &systemImages); err != nil { return nil, err } return &systemImages, nil } func (p *Provisioner) getSpec(cluster *v3.Cluster) (*v3.ClusterSpec, error) { driverName, err := p.validateDriver(cluster) if err != nil { return nil, err } censoredOldSpec, err := p.censorGenericEngineConfig(cluster.Status.AppliedSpec) if err != nil { return nil, err } _, oldConfig, err := p.getConfig(false, censoredOldSpec, driverName, cluster.Name) if err != nil { return nil, err } censoredSpec, err := p.censorGenericEngineConfig(cluster.Spec) if err != nil { return nil, err } newSpec, newConfig, err := p.getConfig(true, censoredSpec, driverName, cluster.Name) if err != nil { return nil, err } // Version is the only parameter that can be updated for EKS, if they is equal we do not need to update // TODO: Replace with logic that is more adaptable if cluster.Spec.GenericEngineConfig != nil && (*cluster.Spec.GenericEngineConfig)["driverName"] == "amazonelasticcontainerservice" && cluster.Status.AppliedSpec.GenericEngineConfig != nil && (*cluster.Spec.GenericEngineConfig)["kubernetesVersion"] == (*cluster.Status.AppliedSpec.GenericEngineConfig)["kubernetesVersion"] { return nil, nil } if reflect.DeepEqual(oldConfig, newConfig) { return nil, nil } newSpec, _, err = p.getConfig(true, cluster.Spec, driverName, cluster.Name) return newSpec, err } func (p *Provisioner) reconcileRKENodes(clusterName string) ([]v3.RKEConfigNode, error) { machines, err := p.NodeLister.List(clusterName, labels.Everything()) if err != nil { return nil, err } etcd := false controlplane := false var nodes []v3.RKEConfigNode for _, machine := range machines { if machine.DeletionTimestamp != nil { continue } if v3.NodeConditionProvisioned.IsUnknown(machine) && (machine.Spec.Etcd || machine.Spec.ControlPlane) { return nil, &controller.ForgetError{ Err: fmt.Errorf("waiting for %s to finish provisioning", machine.Spec.RequestedHostname), Reason: "Provisioning", } } if machine.Status.NodeConfig == nil { continue } if len(machine.Status.NodeConfig.Role) == 0 { continue } if !v3.NodeConditionProvisioned.IsTrue(machine) { continue } if slice.ContainsString(machine.Status.NodeConfig.Role, services.ETCDRole) { etcd = true } if slice.ContainsString(machine.Status.NodeConfig.Role, services.ControlRole) { controlplane = true } node := *machine.Status.NodeConfig if node.User == "" { node.User = "root" } if node.Port == "" { node.Port = "22" } if node.NodeName == "" { node.NodeName = ref.FromStrings(machine.Namespace, machine.Name) } nodes = append(nodes, node) } if !etcd || !controlplane { return nil, &controller.ForgetError{ Err: fmt.Errorf("waiting for etcd and controlplane nodes to be registered"), Reason: "Provisioning", } } sort.Slice(nodes, func(i, j int) bool { return nodes[i].NodeName < nodes[j].NodeName }) return nodes, nil } func (p *Provisioner) recordFailure(cluster *v3.Cluster, spec v3.ClusterSpec, err error) (*v3.Cluster, error) { if err == nil { p.backoff.DeleteEntry(cluster.Name) if cluster.Status.FailedSpec == nil { return cluster, nil } cluster.Status.FailedSpec = nil return p.Clusters.Update(cluster) } p.backoff.Next(cluster.Name, time.Now()) cluster.Status.FailedSpec = &spec newCluster, _ := p.Clusters.Update(cluster) // mask the error return newCluster, nil } func (p *Provisioner) restoreClusterBackup(cluster *v3.Cluster, spec v3.ClusterSpec) (api string, token string, cert string, err error) { snapshot := strings.Split(spec.RancherKubernetesEngineConfig.Restore.SnapshotName, ":")[1] backup, err := p.Backups.Get(cluster.Name, snapshot) if err != nil { return "", "", "", err } if backup.Spec.ClusterID != cluster.Name { return "", "", "", fmt.Errorf("snapshot [%s] is not a backup of cluster [%s]", backup.Name, cluster.Name) } api, token, cert, err = p.driverRestore(cluster, spec, GetBackupFilename(backup)) if err != nil { return "", "", "", err } // checking if we have s3 config and that it's not inconsistent. This happens // when restore is performed with invalid credentials and then the cluster is updated to fix it. if spec.RancherKubernetesEngineConfig.Services.Etcd.BackupConfig.S3BackupConfig != nil { s3Config := cluster.Spec.RancherKubernetesEngineConfig.Services.Etcd.BackupConfig.S3BackupConfig appliedS3Conf := cluster.Status.AppliedSpec.RancherKubernetesEngineConfig.Services.Etcd.BackupConfig.S3BackupConfig if !reflect.DeepEqual(s3Config, appliedS3Conf) { logrus.Infof("updated spec during restore detected for cluster [%s], update is required", cluster.Name) api, token, cert, _, err = p.driverUpdate(cluster, spec) } } return api, token, cert, err } func GetBackupFilenameFromURL(URL string) (string, error) { if !isValidURL(URL) { return "", fmt.Errorf("URL is not valid: [%s]", URL) } parsedURL, err := url.Parse(URL) if err != nil { return "", err } if parsedURL.Path == "" { return "", fmt.Errorf("No path found in URL: [%s]", URL) } extractedPath := path.Base(parsedURL.Path) return extractedPath, nil } // isValidURL tests a string to determine if it is a url or not. // https://golangcode.com/how-to-check-if-a-string-is-a-url/ func isValidURL(URL string) bool { _, err := url.ParseRequestURI(URL) if err != nil { return false } return true } func GetBackupFilename(backup *v3.EtcdBackup) string { snapshot := backup.Name if filename, err := GetBackupFilenameFromURL(backup.Spec.Filename); err == nil { // s3 file // need to remove extension snapshot = strings.TrimSuffix(filename, path.Ext(filename)) } else if len(backup.Spec.Filename) != 0 { // not s3 url snapshot = strings.TrimSuffix(backup.Spec.Filename, path.Ext(backup.Spec.Filename)) } return snapshot } // transform an imported cluster into a k3s cluster using its discovered version func k3sClusterConfig(cluster *v3.Cluster) error { // version is not found until cluster is provisioned if cluster.Status.Driver == "" || cluster.Status.Version == nil { return &controller.ForgetError{ Err: fmt.Errorf("waiting for full cluster configuration"), Reason: "Pending"} } // TODO Rancher may support upgrading the local cluster if its k3s in the future if cluster.Name == v3.ClusterDriverLocal || cluster.Status.Driver == v3.ClusterDriverK3s { return nil } if strings.Contains(cluster.Status.Version.String(), "k3s") { cluster.Status.Driver = v3.ClusterDriverK3s // only set these values on init if cluster.Spec.K3sConfig == nil { cluster.Spec.K3sConfig = &v3.K3sConfig{ Version: cluster.Status.Version.String(), K3sUpgradeStrategy: v3.K3sUpgradeStrategy{ ServerConcurrency: 1, WorkerConcurrency: 1, }, } } } return nil } Able to recongize local cluster as k3s This allows Rancher to recongize local clusters as k3s if they contain "k3s" in the gitVersion response of "kubectl get version" package clusterprovisioner import ( "context" "fmt" "net/url" "path" "reflect" "sort" "strings" "time" "github.com/mitchellh/mapstructure" "github.com/rancher/kontainer-engine/drivers/rke" "github.com/rancher/kontainer-engine/service" "github.com/rancher/norman/controller" "github.com/rancher/norman/types/convert" "github.com/rancher/norman/types/slice" "github.com/rancher/norman/types/values" util "github.com/rancher/rancher/pkg/cluster" kd "github.com/rancher/rancher/pkg/controllers/management/kontainerdrivermetadata" "github.com/rancher/rancher/pkg/ref" "github.com/rancher/rancher/pkg/rkedialerfactory" "github.com/rancher/rancher/pkg/settings" "github.com/rancher/rke/services" v3 "github.com/rancher/types/apis/management.cattle.io/v3" "github.com/rancher/types/config" "github.com/sirupsen/logrus" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/flowcontrol" ) const ( RKEDriverKey = "rancherKubernetesEngineConfig" KontainerEngineUpdate = "provisioner.cattle.io/ke-driver-update" ) type Provisioner struct { ClusterController v3.ClusterController Clusters v3.ClusterInterface NodeLister v3.NodeLister engineService *service.EngineService backoff *flowcontrol.Backoff KontainerDriverLister v3.KontainerDriverLister DynamicSchemasLister v3.DynamicSchemaLister Backups v3.EtcdBackupLister RKESystemImages v3.RKEK8sSystemImageInterface RKESystemImagesLister v3.RKEK8sSystemImageLister } func Register(ctx context.Context, management *config.ManagementContext) { p := &Provisioner{ engineService: service.NewEngineService(NewPersistentStore(management.Core.Namespaces(""), management.Core)), Clusters: management.Management.Clusters(""), ClusterController: management.Management.Clusters("").Controller(), NodeLister: management.Management.Nodes("").Controller().Lister(), backoff: flowcontrol.NewBackOff(30*time.Second, 10*time.Minute), KontainerDriverLister: management.Management.KontainerDrivers("").Controller().Lister(), DynamicSchemasLister: management.Management.DynamicSchemas("").Controller().Lister(), Backups: management.Management.EtcdBackups("").Controller().Lister(), RKESystemImagesLister: management.Management.RKEK8sSystemImages("").Controller().Lister(), RKESystemImages: management.Management.RKEK8sSystemImages(""), } // Add handlers p.Clusters.AddLifecycle(ctx, "cluster-provisioner-controller", p) management.Management.Nodes("").AddHandler(ctx, "cluster-provisioner-controller", p.machineChanged) local := &rkedialerfactory.RKEDialerFactory{ Factory: management.Dialer, } docker := &rkedialerfactory.RKEDialerFactory{ Factory: management.Dialer, Docker: true, } driver := service.Drivers[service.RancherKubernetesEngineDriverName] rkeDriver := driver.(*rke.Driver) rkeDriver.DockerDialer = docker.Build rkeDriver.LocalDialer = local.Build rkeDriver.WrapTransportFactory = docker.WrapTransport mgmt := management.Management rkeDriver.DataStore = NewDataStore(mgmt.RKEAddons("").Controller().Lister(), mgmt.RKEAddons(""), mgmt.RKEK8sServiceOptions("").Controller().Lister(), mgmt.RKEK8sServiceOptions(""), mgmt.RKEK8sSystemImages("").Controller().Lister(), mgmt.RKEK8sSystemImages("")) } func (p *Provisioner) Remove(cluster *v3.Cluster) (runtime.Object, error) { logrus.Infof("Deleting cluster [%s]", cluster.Name) if skipLocalAndImported(cluster) || cluster.Status.Driver == "" { return nil, nil } for i := 0; i < 4; i++ { // cluster will be forcefully removed on last attempt err := p.driverRemove(cluster, i == 3) if err == nil { break } if i == 3 { return cluster, fmt.Errorf("failed to remove the cluster [%s]: %v", cluster.Name, err) } time.Sleep(1 * time.Second) } logrus.Infof("Deleted cluster [%s]", cluster.Name) // cluster object will definitely have changed, reload return p.Clusters.Get(cluster.Name, metav1.GetOptions{}) } func (p *Provisioner) Updated(cluster *v3.Cluster) (runtime.Object, error) { obj, err := v3.ClusterConditionUpdated.Do(cluster, func() (runtime.Object, error) { anno, _ := cluster.Annotations[KontainerEngineUpdate] if anno == "updated" { // Cluster has already been updated proceed as usual setVersion(cluster) return p.update(cluster, false) } else if strings.HasPrefix(anno, "updating/") { // Check if it's been updating for more than 20 seconds, this lets // the controller take over attempting to update the cluster pieces := strings.Split(anno, "/") t, err := time.Parse(time.RFC3339, pieces[1]) if err != nil || int(time.Since(t)/time.Second) > 20 { cluster.Annotations[KontainerEngineUpdate] = "updated" return p.Clusters.Update(cluster) } // Go routine is already running to update the cluster so wait return nil, nil } // Set the annotation and kickoff the update c, err := p.setKontainerEngineUpdate(cluster, "updating") if err != nil { return cluster, err } go p.waitForSchema(c) return nil, nil }) return obj.(*v3.Cluster), err } // waitForSchema waits for the driver and schema to be populated for the cluster func (p *Provisioner) waitForSchema(cluster *v3.Cluster) { var driver string if cluster.Spec.GenericEngineConfig == nil { if cluster.Spec.AmazonElasticContainerServiceConfig != nil { driver = "amazonelasticcontainerservice" } if cluster.Spec.AzureKubernetesServiceConfig != nil { driver = "azurekubernetesservice" } if cluster.Spec.GoogleKubernetesEngineConfig != nil { driver = "googlekubernetesengine" } } else { if d, ok := (*cluster.Spec.GenericEngineConfig)["driverName"]; ok { driver = d.(string) } } if driver != "" { var schemaName string backoff := wait.Backoff{ Duration: 2 * time.Second, Factor: 1, Jitter: 0, Steps: 7, } err := wait.ExponentialBackoff(backoff, func() (bool, error) { driver, err := p.KontainerDriverLister.Get("", driver) if err != nil { if !apierrors.IsNotFound(err) { return false, err } return false, nil } if driver.Spec.BuiltIn { schemaName = driver.Status.DisplayName + "Config" } else { schemaName = driver.Status.DisplayName + "EngineConfig" } _, err = p.DynamicSchemasLister.Get("", strings.ToLower(schemaName)) if err != nil { if !apierrors.IsNotFound(err) { return false, err } return false, nil } return true, nil }) if err != nil { logrus.Warnf("[cluster-provisioner-controller] Failed to find driver %v and schema %v for cluster %v on upgrade: %v", driver, schemaName, cluster.Name, err) } } _, err := p.setKontainerEngineUpdate(cluster, "updated") if err != nil { logrus.Warnf("[cluster-provisioner-controller] Failed to set annotation on cluster %v on upgrade: %v", cluster.Name, err) } p.ClusterController.Enqueue(cluster.Namespace, cluster.Name) } func (p *Provisioner) setKontainerEngineUpdate(cluster *v3.Cluster, anno string) (*v3.Cluster, error) { backoff := wait.Backoff{ Duration: 500 * time.Millisecond, Factor: 1, Jitter: 0, Steps: 6, } err := wait.ExponentialBackoff(backoff, func() (bool, error) { newCluster, err := p.Clusters.Get(cluster.Name, metav1.GetOptions{}) if err != nil { if !apierrors.IsNotFound(err) { return false, err } return false, nil } if anno == "updating" { // Add a timestamp for comparison since this anno was added anno = anno + "/" + time.Now().Format(time.RFC3339) } newCluster.Annotations[KontainerEngineUpdate] = anno newCluster, err = p.Clusters.Update(newCluster) if err != nil { if apierrors.IsConflict(err) { return false, nil } return false, err } cluster = newCluster return true, nil }) if err != nil { return cluster, fmt.Errorf("[setKontainerEngineUpdate] Failed to update cluster [%s]: %v", cluster.Name, err) } return cluster, nil } func setVersion(cluster *v3.Cluster) { if cluster.Spec.RancherKubernetesEngineConfig != nil { if cluster.Spec.RancherKubernetesEngineConfig.Version == "" { //set version from the applied spec if cluster.Status.AppliedSpec.RancherKubernetesEngineConfig != nil { if cluster.Status.AppliedSpec.RancherKubernetesEngineConfig.Version != "" { cluster.Spec.RancherKubernetesEngineConfig.Version = cluster.Status.AppliedSpec.RancherKubernetesEngineConfig.Version } else { cluster.Spec.RancherKubernetesEngineConfig.Version = settings.KubernetesVersion.Get() } } } } else if cluster.Spec.AmazonElasticContainerServiceConfig != nil { if cluster.Status.Version != nil { setConfigVersion := func(config *v3.MapStringInterface) { v, found := values.GetValue(*config, "kubernetesVersion") if !found || convert.ToString(v) == "" && cluster.Status.Version != nil && cluster.Status.Version.Major != "" && len(cluster.Status.Version.Minor) > 1 { values.PutValue(*config, fmt.Sprintf("%s.%s", cluster.Status.Version.Major, cluster.Status.Version.Minor[:2]), "kubernetesVersion") } } // during upgrade it is possible genericEngineConfig has not been set if newConfig := cluster.Spec.AmazonElasticContainerServiceConfig; newConfig != nil { setConfigVersion(newConfig) } if oldConfig := cluster.Status.AppliedSpec.AmazonElasticContainerServiceConfig; oldConfig != nil { setConfigVersion(oldConfig) } } } } func (p *Provisioner) update(cluster *v3.Cluster, create bool) (*v3.Cluster, error) { cluster, err := p.reconcileCluster(cluster, create) if err != nil { return cluster, err } v3.ClusterConditionProvisioned.True(cluster) v3.ClusterConditionProvisioned.Message(cluster, "") v3.ClusterConditionProvisioned.Reason(cluster, "") v3.ClusterConditionPending.True(cluster) if cluster.Spec.RancherKubernetesEngineConfig != nil || cluster.Spec.GenericEngineConfig != nil { return cluster, nil } err = k3sClusterConfig(cluster) if err != nil { return cluster, err } return cluster, nil } func (p *Provisioner) machineChanged(key string, machine *v3.Node) (runtime.Object, error) { parts := strings.SplitN(key, "/", 2) p.ClusterController.Enqueue("", parts[0]) return machine, nil } func (p *Provisioner) Create(cluster *v3.Cluster) (runtime.Object, error) { var err error // Initialize conditions, be careful to not continually update them v3.ClusterConditionPending.CreateUnknownIfNotExists(cluster) v3.ClusterConditionProvisioned.CreateUnknownIfNotExists(cluster) if v3.ClusterConditionWaiting.GetStatus(cluster) == "" { v3.ClusterConditionWaiting.Unknown(cluster) } if v3.ClusterConditionWaiting.GetMessage(cluster) == "" { v3.ClusterConditionWaiting.Message(cluster, "Waiting for API to be available") } cluster, err = p.pending(cluster) if err != nil { return cluster, err } return p.provision(cluster) } func (p *Provisioner) provision(cluster *v3.Cluster) (*v3.Cluster, error) { obj, err := v3.ClusterConditionProvisioned.Do(cluster, func() (runtime.Object, error) { return p.update(cluster, true) }) return obj.(*v3.Cluster), err } func (p *Provisioner) pending(cluster *v3.Cluster) (*v3.Cluster, error) { if skipLocalAndImported(cluster) { return cluster, nil } driver, err := p.validateDriver(cluster) if err != nil { return cluster, err } if driver == "" { return cluster, &controller.ForgetError{ Err: fmt.Errorf("waiting for full cluster configuration"), Reason: "Pending"} } if driver != cluster.Status.Driver { cluster.Status.Driver = driver if driver == v3.ClusterDriverRKE && cluster.Spec.RancherKubernetesEngineConfig == nil { cluster.Spec.RancherKubernetesEngineConfig = &v3.RancherKubernetesEngineConfig{} } return p.Clusters.Update(cluster) } return cluster, nil } func (p *Provisioner) backoffFailure(cluster *v3.Cluster, spec *v3.ClusterSpec) (bool, time.Duration) { if cluster.Status.FailedSpec == nil { return false, 0 } if !reflect.DeepEqual(cluster.Status.FailedSpec, spec) { return false, 0 } if p.backoff.IsInBackOffSinceUpdate(cluster.Name, time.Now()) { go func() { time.Sleep(p.backoff.Get(cluster.Name)) p.ClusterController.Enqueue("", cluster.Name) }() return true, p.backoff.Get(cluster.Name) } return false, 0 } func (p *Provisioner) reconcileCluster(cluster *v3.Cluster, create bool) (*v3.Cluster, error) { if skipLocalAndImported(cluster) { return cluster, nil } var ( apiEndpoint, serviceAccountToken, caCert string err error ) if cluster.Name != "local" && !v3.ClusterConditionServiceAccountMigrated.IsTrue(cluster) && v3.ClusterConditionProvisioned.IsTrue(cluster) { driverName, err := p.validateDriver(cluster) if err != nil { return nil, err } spec, _, err := p.getConfig(true, cluster.Spec, driverName, cluster.Name) if err != nil { return nil, err } serviceAccountToken, err = p.generateServiceAccount(cluster, *spec) if err != nil { return nil, err } cluster.Status.ServiceAccountToken = serviceAccountToken v3.ClusterConditionServiceAccountMigrated.True(cluster) // Update the cluster in k8s cluster, err = p.Clusters.Update(cluster) if err != nil { return nil, err } err = p.removeLegacyServiceAccount(cluster, *spec) if err != nil { return nil, err } } p.setGenericConfigs(cluster) spec, err := p.getSpec(cluster) if err != nil || spec == nil { return cluster, err } if ok, delay := p.backoffFailure(cluster, spec); ok { return cluster, &controller.ForgetError{Err: fmt.Errorf("backing off failure, delay: %v", delay)} } logrus.Infof("Provisioning cluster [%s]", cluster.Name) var updateTriggered bool if create { logrus.Infof("Creating cluster [%s]", cluster.Name) // setting updateTriggered to true since rke up will be called on cluster create updateTriggered = true apiEndpoint, serviceAccountToken, caCert, err = p.driverCreate(cluster, *spec) if err != nil && err.Error() == "cluster already exists" { logrus.Infof("Create done, Updating cluster [%s]", cluster.Name) apiEndpoint, serviceAccountToken, caCert, updateTriggered, err = p.driverUpdate(cluster, *spec) } } else if spec.RancherKubernetesEngineConfig != nil && spec.RancherKubernetesEngineConfig.Restore.Restore { logrus.Infof("Restoring cluster [%s] from backup", cluster.Name) apiEndpoint, serviceAccountToken, caCert, err = p.restoreClusterBackup(cluster, *spec) } else if spec.RancherKubernetesEngineConfig != nil && spec.RancherKubernetesEngineConfig.RotateCertificates != nil { logrus.Infof("Rotating certificates for cluster [%s]", cluster.Name) apiEndpoint, serviceAccountToken, caCert, updateTriggered, err = p.driverUpdate(cluster, *spec) } else { logrus.Infof("Updating cluster [%s]", cluster.Name) // Attempt to manually trigger updating, otherwise it will not be triggered until after exiting reconcile v3.ClusterConditionUpdated.Unknown(cluster) cluster, err = p.Clusters.Update(cluster) if err != nil { return cluster, fmt.Errorf("[reconcileCluster] Failed to update cluster [%s]: %v", cluster.Name, err) } apiEndpoint, serviceAccountToken, caCert, updateTriggered, err = p.driverUpdate(cluster, *spec) } // at this point we know the cluster has been modified in driverCreate/Update so reload if newCluster, reloadErr := p.Clusters.Get(cluster.Name, metav1.GetOptions{}); reloadErr == nil { cluster = newCluster } cluster, recordErr := p.recordFailure(cluster, *spec, err) if recordErr != nil { return cluster, recordErr } // for here out we want to always return the cluster, not just nil, so that the error can be properly // recorded if needs be if err != nil { return cluster, err } err = p.removeLegacyServiceAccount(cluster, *spec) if err != nil { return nil, err } v3.ClusterConditionServiceAccountMigrated.True(cluster) saved := false for i := 0; i < 20; i++ { cluster, err = p.Clusters.Get(cluster.Name, metav1.GetOptions{}) if err != nil { return cluster, err } censoredSpec, err := p.censorGenericEngineConfig(*spec) if err != nil { return cluster, err } cluster.Status.AppliedSpec = censoredSpec cluster.Status.APIEndpoint = apiEndpoint cluster.Status.ServiceAccountToken = serviceAccountToken cluster.Status.CACert = caCert resetRkeConfigFlags(cluster, updateTriggered) if cluster, err = p.Clusters.Update(cluster); err == nil { saved = true break } else { logrus.Errorf("failed to update cluster [%s]: %v", cluster.Name, err) time.Sleep(2) } } if !saved { return cluster, fmt.Errorf("failed to update cluster") } logrus.Infof("Provisioned cluster [%s]", cluster.Name) return cluster, nil } func (p *Provisioner) setGenericConfigs(cluster *v3.Cluster) { if cluster.Spec.GenericEngineConfig == nil || cluster.Status.AppliedSpec.GenericEngineConfig == nil { setGenericConfig := func(spec *v3.ClusterSpec) { if spec.GenericEngineConfig == nil { if spec.AmazonElasticContainerServiceConfig != nil { spec.GenericEngineConfig = spec.AmazonElasticContainerServiceConfig (*spec.GenericEngineConfig)["driverName"] = "amazonelasticcontainerservice" spec.AmazonElasticContainerServiceConfig = nil } if spec.AzureKubernetesServiceConfig != nil { spec.GenericEngineConfig = spec.AzureKubernetesServiceConfig (*spec.GenericEngineConfig)["driverName"] = "azurekubernetesservice" spec.AzureKubernetesServiceConfig = nil } if spec.GoogleKubernetesEngineConfig != nil { spec.GenericEngineConfig = spec.GoogleKubernetesEngineConfig (*spec.GenericEngineConfig)["driverName"] = "googlekubernetesengine" spec.GoogleKubernetesEngineConfig = nil } } } setGenericConfig(&cluster.Spec) setGenericConfig(&cluster.Status.AppliedSpec) } } func resetRkeConfigFlags(cluster *v3.Cluster, updateTriggered bool) { if cluster.Spec.RancherKubernetesEngineConfig != nil { cluster.Spec.RancherKubernetesEngineConfig.RotateCertificates = nil cluster.Spec.RancherKubernetesEngineConfig.Restore = v3.RestoreConfig{} if cluster.Status.AppliedSpec.RancherKubernetesEngineConfig != nil { cluster.Status.AppliedSpec.RancherKubernetesEngineConfig.RotateCertificates = nil cluster.Status.AppliedSpec.RancherKubernetesEngineConfig.Restore = v3.RestoreConfig{} } if !updateTriggered { return } if cluster.Status.Capabilities.TaintSupport == nil || !*cluster.Status.Capabilities.TaintSupport { supportsTaints := true cluster.Status.Capabilities.TaintSupport = &supportsTaints } } } func copyMap(toCopy v3.MapStringInterface) v3.MapStringInterface { newMap := v3.MapStringInterface{} for k, v := range toCopy { newMap[k] = v } return newMap } func (p *Provisioner) censorGenericEngineConfig(input v3.ClusterSpec) (v3.ClusterSpec, error) { if input.GenericEngineConfig == nil { // nothing to do return input, nil } config := copyMap(*input.GenericEngineConfig) driverName, ok := config[DriverNameField].(string) if !ok { // can't figure out driver type so blank out the whole thing logrus.Warnf("cluster %v has a generic engine config but no driver type field; can't hide password "+ "fields so removing the entire config", input.DisplayName) input.GenericEngineConfig = nil return input, nil } driver, err := p.KontainerDriverLister.Get("", driverName) if err != nil { return v3.ClusterSpec{}, err } var schemaName string if driver.Spec.BuiltIn { schemaName = driver.Status.DisplayName + "Config" } else { schemaName = driver.Status.DisplayName + "EngineConfig" } kontainerDriverSchema, err := p.DynamicSchemasLister.Get("", strings.ToLower(schemaName)) if err != nil { return v3.ClusterSpec{}, fmt.Errorf("error getting dynamic schema %v", err) } for key := range config { field := kontainerDriverSchema.Spec.ResourceFields[key] if field.Type == "password" { delete(config, key) } } input.GenericEngineConfig = &config return input, nil } func skipLocalAndImported(cluster *v3.Cluster) bool { return cluster.Status.Driver == v3.ClusterDriverLocal || cluster.Status.Driver == v3.ClusterDriverImported || cluster.Status.Driver == v3.ClusterDriverK3s } func (p *Provisioner) getConfig(reconcileRKE bool, spec v3.ClusterSpec, driverName, clusterName string) (*v3.ClusterSpec, interface{}, error) { var v interface{} if spec.GenericEngineConfig == nil { if spec.RancherKubernetesEngineConfig != nil { var err error v, err = convert.EncodeToMap(spec.RancherKubernetesEngineConfig) if err != nil { return nil, nil, err } } else { v = map[string]interface{}{} } } else { v = *spec.GenericEngineConfig } if driverName == v3.ClusterDriverRKE && reconcileRKE { nodes, err := p.reconcileRKENodes(clusterName) if err != nil { return nil, nil, err } systemImages, err := p.getSystemImages(spec) if err != nil { return nil, nil, err } rkeCopy := *spec.RancherKubernetesEngineConfig spec.RancherKubernetesEngineConfig = &rkeCopy spec.RancherKubernetesEngineConfig.Nodes = nodes spec.RancherKubernetesEngineConfig.SystemImages = *systemImages data, _ := convert.EncodeToMap(spec) v, _ = data[RKEDriverKey] } return &spec, v, nil } func GetDriver(cluster *v3.Cluster, driverLister v3.KontainerDriverLister) (string, error) { var driver *v3.KontainerDriver var err error if cluster.Spec.GenericEngineConfig != nil { kontainerDriverName := (*cluster.Spec.GenericEngineConfig)["driverName"].(string) driver, err = driverLister.Get("", kontainerDriverName) if err != nil { return "", err } } if cluster.Spec.RancherKubernetesEngineConfig != nil { return v3.ClusterDriverRKE, nil } if driver == nil { return "", nil } return driver.Status.DisplayName, nil } func (p *Provisioner) validateDriver(cluster *v3.Cluster) (string, error) { oldDriver := cluster.Status.Driver if oldDriver == v3.ClusterDriverImported { return v3.ClusterDriverImported, nil } newDriver, err := GetDriver(cluster, p.KontainerDriverLister) if err != nil { return "", err } if oldDriver == "" && newDriver == "" { return newDriver, nil } if oldDriver == "" { return newDriver, nil } if newDriver == "" { return "", &controller.ForgetError{ Err: fmt.Errorf("waiting for nodes"), Reason: "Pending", } } if oldDriver != newDriver { return newDriver, fmt.Errorf("driver change from %s to %s not allowed", oldDriver, newDriver) } return newDriver, nil } func (p *Provisioner) getSystemImages(spec v3.ClusterSpec) (*v3.RKESystemImages, error) { // fetch system images from settings version := spec.RancherKubernetesEngineConfig.Version systemImages, err := kd.GetRKESystemImages(version, p.RKESystemImagesLister, p.RKESystemImages) if err != nil { return nil, fmt.Errorf("failed to find system images for version %s: %v", version, err) } privateRegistry := util.GetPrivateRepoURL(&v3.Cluster{Spec: spec}) if privateRegistry == "" { return &systemImages, nil } // prepend private repo imagesMap, err := convert.EncodeToMap(systemImages) if err != nil { return nil, err } updatedMap := make(map[string]interface{}) for key, value := range imagesMap { newValue := fmt.Sprintf("%s/%s", privateRegistry, value) updatedMap[key] = newValue } if err := mapstructure.Decode(updatedMap, &systemImages); err != nil { return nil, err } return &systemImages, nil } func (p *Provisioner) getSpec(cluster *v3.Cluster) (*v3.ClusterSpec, error) { driverName, err := p.validateDriver(cluster) if err != nil { return nil, err } censoredOldSpec, err := p.censorGenericEngineConfig(cluster.Status.AppliedSpec) if err != nil { return nil, err } _, oldConfig, err := p.getConfig(false, censoredOldSpec, driverName, cluster.Name) if err != nil { return nil, err } censoredSpec, err := p.censorGenericEngineConfig(cluster.Spec) if err != nil { return nil, err } newSpec, newConfig, err := p.getConfig(true, censoredSpec, driverName, cluster.Name) if err != nil { return nil, err } // Version is the only parameter that can be updated for EKS, if they is equal we do not need to update // TODO: Replace with logic that is more adaptable if cluster.Spec.GenericEngineConfig != nil && (*cluster.Spec.GenericEngineConfig)["driverName"] == "amazonelasticcontainerservice" && cluster.Status.AppliedSpec.GenericEngineConfig != nil && (*cluster.Spec.GenericEngineConfig)["kubernetesVersion"] == (*cluster.Status.AppliedSpec.GenericEngineConfig)["kubernetesVersion"] { return nil, nil } if reflect.DeepEqual(oldConfig, newConfig) { return nil, nil } newSpec, _, err = p.getConfig(true, cluster.Spec, driverName, cluster.Name) return newSpec, err } func (p *Provisioner) reconcileRKENodes(clusterName string) ([]v3.RKEConfigNode, error) { machines, err := p.NodeLister.List(clusterName, labels.Everything()) if err != nil { return nil, err } etcd := false controlplane := false var nodes []v3.RKEConfigNode for _, machine := range machines { if machine.DeletionTimestamp != nil { continue } if v3.NodeConditionProvisioned.IsUnknown(machine) && (machine.Spec.Etcd || machine.Spec.ControlPlane) { return nil, &controller.ForgetError{ Err: fmt.Errorf("waiting for %s to finish provisioning", machine.Spec.RequestedHostname), Reason: "Provisioning", } } if machine.Status.NodeConfig == nil { continue } if len(machine.Status.NodeConfig.Role) == 0 { continue } if !v3.NodeConditionProvisioned.IsTrue(machine) { continue } if slice.ContainsString(machine.Status.NodeConfig.Role, services.ETCDRole) { etcd = true } if slice.ContainsString(machine.Status.NodeConfig.Role, services.ControlRole) { controlplane = true } node := *machine.Status.NodeConfig if node.User == "" { node.User = "root" } if node.Port == "" { node.Port = "22" } if node.NodeName == "" { node.NodeName = ref.FromStrings(machine.Namespace, machine.Name) } nodes = append(nodes, node) } if !etcd || !controlplane { return nil, &controller.ForgetError{ Err: fmt.Errorf("waiting for etcd and controlplane nodes to be registered"), Reason: "Provisioning", } } sort.Slice(nodes, func(i, j int) bool { return nodes[i].NodeName < nodes[j].NodeName }) return nodes, nil } func (p *Provisioner) recordFailure(cluster *v3.Cluster, spec v3.ClusterSpec, err error) (*v3.Cluster, error) { if err == nil { p.backoff.DeleteEntry(cluster.Name) if cluster.Status.FailedSpec == nil { return cluster, nil } cluster.Status.FailedSpec = nil return p.Clusters.Update(cluster) } p.backoff.Next(cluster.Name, time.Now()) cluster.Status.FailedSpec = &spec newCluster, _ := p.Clusters.Update(cluster) // mask the error return newCluster, nil } func (p *Provisioner) restoreClusterBackup(cluster *v3.Cluster, spec v3.ClusterSpec) (api string, token string, cert string, err error) { snapshot := strings.Split(spec.RancherKubernetesEngineConfig.Restore.SnapshotName, ":")[1] backup, err := p.Backups.Get(cluster.Name, snapshot) if err != nil { return "", "", "", err } if backup.Spec.ClusterID != cluster.Name { return "", "", "", fmt.Errorf("snapshot [%s] is not a backup of cluster [%s]", backup.Name, cluster.Name) } api, token, cert, err = p.driverRestore(cluster, spec, GetBackupFilename(backup)) if err != nil { return "", "", "", err } // checking if we have s3 config and that it's not inconsistent. This happens // when restore is performed with invalid credentials and then the cluster is updated to fix it. if spec.RancherKubernetesEngineConfig.Services.Etcd.BackupConfig.S3BackupConfig != nil { s3Config := cluster.Spec.RancherKubernetesEngineConfig.Services.Etcd.BackupConfig.S3BackupConfig appliedS3Conf := cluster.Status.AppliedSpec.RancherKubernetesEngineConfig.Services.Etcd.BackupConfig.S3BackupConfig if !reflect.DeepEqual(s3Config, appliedS3Conf) { logrus.Infof("updated spec during restore detected for cluster [%s], update is required", cluster.Name) api, token, cert, _, err = p.driverUpdate(cluster, spec) } } return api, token, cert, err } func GetBackupFilenameFromURL(URL string) (string, error) { if !isValidURL(URL) { return "", fmt.Errorf("URL is not valid: [%s]", URL) } parsedURL, err := url.Parse(URL) if err != nil { return "", err } if parsedURL.Path == "" { return "", fmt.Errorf("No path found in URL: [%s]", URL) } extractedPath := path.Base(parsedURL.Path) return extractedPath, nil } // isValidURL tests a string to determine if it is a url or not. // https://golangcode.com/how-to-check-if-a-string-is-a-url/ func isValidURL(URL string) bool { _, err := url.ParseRequestURI(URL) if err != nil { return false } return true } func GetBackupFilename(backup *v3.EtcdBackup) string { snapshot := backup.Name if filename, err := GetBackupFilenameFromURL(backup.Spec.Filename); err == nil { // s3 file // need to remove extension snapshot = strings.TrimSuffix(filename, path.Ext(filename)) } else if len(backup.Spec.Filename) != 0 { // not s3 url snapshot = strings.TrimSuffix(backup.Spec.Filename, path.Ext(backup.Spec.Filename)) } return snapshot } // transform an imported cluster into a k3s cluster using its discovered version func k3sClusterConfig(cluster *v3.Cluster) error { // version is not found until cluster is provisioned if cluster.Status.Driver == "" || cluster.Status.Version == nil { return &controller.ForgetError{ Err: fmt.Errorf("waiting for full cluster configuration"), Reason: "Pending"} } if cluster.Status.Driver == v3.ClusterDriverK3s { return nil //no-op } if strings.Contains(cluster.Status.Version.String(), "k3s") { cluster.Status.Driver = v3.ClusterDriverK3s // only set these values on init if cluster.Spec.K3sConfig == nil { cluster.Spec.K3sConfig = &v3.K3sConfig{ Version: cluster.Status.Version.String(), K3sUpgradeStrategy: v3.K3sUpgradeStrategy{ ServerConcurrency: 1, WorkerConcurrency: 1, }, } } } return nil }
package machineprovision import ( "fmt" "regexp" "sort" "strings" rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" "github.com/rancher/rancher/pkg/settings" "github.com/rancher/wrangler/pkg/data" "github.com/rancher/wrangler/pkg/generic" name2 "github.com/rancher/wrangler/pkg/name" corev1 "k8s.io/api/core/v1" apierror "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" capi "sigs.k8s.io/cluster-api/api/v1alpha4" ) var ( regExHyphen = regexp.MustCompile("([a-z])([A-Z])") envNameOverride = map[string]string{ "amazonec2": "AWS", "rackspace": "OS", "openstack": "OS", } ) type driverArgs struct { rkev1.RKEMachineStatus DriverName string ImageName string ImagePullPolicy corev1.PullPolicy EnvSecret *corev1.Secret StateSecretName string BootstrapSecretName string BootstrapOptional bool Args []string } func MachineStateSecretName(machineName string) string { return name2.SafeConcatName(machineName, "machine", "state") } func (h *handler) getArgsEnvAndStatus(typeMeta meta.Type, meta metav1.Object, data data.Object, create bool) (driverArgs, error) { var ( url, hash, cloudCredentialSecretName string ) args := data.Map("spec") driver := getNodeDriverName(typeMeta) nd, err := h.nodeDriverCache.Get(driver) if !create && apierror.IsNotFound(err) { url = data.String("status", "driverURL") hash = data.String("status", "driverHash") } else if err != nil { return driverArgs{}, err } else { url = nd.Spec.URL hash = nd.Spec.Checksum } if strings.HasPrefix(url, "local://") { url = "" hash = "" } secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: name2.SafeConcatName(meta.GetName(), "machine", "driver", "secret"), Namespace: meta.GetNamespace(), }, Data: map[string][]byte{}, } bootstrapName, cloudCredentialSecretName, secrets, err := h.getSecretData(meta, data, create) if err != nil { return driverArgs{}, err } for k, v := range secrets { envName := envNameOverride[driver] if envName == "" { envName = driver } k := strings.ToUpper(envName + "_" + regExHyphen.ReplaceAllString(k, "${1}_${2}")) secret.Data[k] = []byte(v) } secretName := MachineStateSecretName(meta.GetName()) cmd := []string{ fmt.Sprintf("--driver-download-url=%s", url), fmt.Sprintf("--driver-hash=%s", hash), fmt.Sprintf("--secret-namespace=%s", meta.GetNamespace()), fmt.Sprintf("--secret-name=%s", secretName), } if create { cmd = append(cmd, "create", fmt.Sprintf("--driver=%s", driver), fmt.Sprintf("--custom-install-script=/run/secrets/machine/value")) cmd = append(cmd, toArgs(driver, args)...) } else { cmd = append(cmd, "rm", "-y") } cmd = append(cmd, meta.GetName()) return driverArgs{ DriverName: driver, ImageName: settings.PrefixPrivateRegistry(settings.MachineProvisionImage.Get()), ImagePullPolicy: corev1.PullAlways, EnvSecret: secret, StateSecretName: secretName, BootstrapSecretName: bootstrapName, BootstrapOptional: !create, Args: cmd, RKEMachineStatus: rkev1.RKEMachineStatus{ Ready: data.String("spec", "providerID") != "" && data.Bool("status", "jobComplete"), DriverHash: hash, DriverURL: url, CloudCredentialSecretName: cloudCredentialSecretName, }, }, nil } func (h *handler) getBootstrapSecret(machine *capi.Machine) (string, error) { if machine == nil || machine.Spec.Bootstrap.ConfigRef == nil { return "", nil } gvk := schema.FromAPIVersionAndKind(machine.Spec.Bootstrap.ConfigRef.APIVersion, machine.Spec.Bootstrap.ConfigRef.Kind) bootstrap, err := h.dynamic.Get(gvk, machine.Namespace, machine.Spec.Bootstrap.ConfigRef.Name) if apierror.IsNotFound(err) { return "", nil } else if err != nil { return "", err } d, err := data.Convert(bootstrap) if err != nil { return "", err } return d.String("status", "dataSecretName"), nil } func (h *handler) getSecretData(meta metav1.Object, obj data.Object, create bool) (string, string, map[string]string, error) { var ( err error machine *capi.Machine result = map[string]string{} ) oldCredential := obj.String("status", "cloudCredentialSecretName") cloudCredentialSecretName := obj.String("spec", "common", "cloudCredentialSecretName") for _, ref := range meta.GetOwnerReferences() { if ref.Kind != "Machine" { continue } machine, err = h.machines.Get(meta.GetNamespace(), ref.Name) if err != nil { return "", "", nil, err } } if machine == nil && create { return "", "", nil, generic.ErrSkip } if cloudCredentialSecretName == "" { cloudCredentialSecretName = oldCredential } if cloudCredentialSecretName != "" { secret, err := h.secrets.Get(meta.GetNamespace(), cloudCredentialSecretName) if err != nil { return "", "", nil, err } for k, v := range secret.Data { result[k] = string(v) } } bootstrapName, err := h.getBootstrapSecret(machine) if err != nil { return "", "", nil, err } return bootstrapName, cloudCredentialSecretName, result, nil } func toArgs(driverName string, args map[string]interface{}) (cmd []string) { for k, v := range args { dmField := "--" + driverName + "-" + strings.ToLower(regExHyphen.ReplaceAllString(k, "${1}-${2}")) if v == nil { continue } switch v.(type) { case float64: cmd = append(cmd, fmt.Sprintf("%s=%v", dmField, v)) case string: if v.(string) != "" { cmd = append(cmd, fmt.Sprintf("%s=%s", dmField, v.(string))) } case bool: if v.(bool) { cmd = append(cmd, dmField) } case []interface{}: for _, s := range v.([]interface{}) { if _, ok := s.(string); ok { cmd = append(cmd, fmt.Sprintf("%s=%s", dmField, s.(string))) } } } } sort.Strings(cmd) return } func getNodeDriverName(typeMeta meta.Type) string { return strings.ToLower(strings.TrimSuffix(typeMeta.GetKind(), "Machine")) } Don't modify EC2 security groups unless the group is "rancher-nodes" package machineprovision import ( "fmt" "regexp" "sort" "strings" rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" "github.com/rancher/rancher/pkg/settings" "github.com/rancher/wrangler/pkg/data" "github.com/rancher/wrangler/pkg/data/convert" "github.com/rancher/wrangler/pkg/generic" name2 "github.com/rancher/wrangler/pkg/name" corev1 "k8s.io/api/core/v1" apierror "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" capi "sigs.k8s.io/cluster-api/api/v1alpha4" ) var ( regExHyphen = regexp.MustCompile("([a-z])([A-Z])") envNameOverride = map[string]string{ "amazonec2": "AWS", "rackspace": "OS", "openstack": "OS", } ) type driverArgs struct { rkev1.RKEMachineStatus DriverName string ImageName string ImagePullPolicy corev1.PullPolicy EnvSecret *corev1.Secret StateSecretName string BootstrapSecretName string BootstrapOptional bool Args []string } func MachineStateSecretName(machineName string) string { return name2.SafeConcatName(machineName, "machine", "state") } func (h *handler) getArgsEnvAndStatus(typeMeta meta.Type, meta metav1.Object, data data.Object, create bool) (driverArgs, error) { var ( url, hash, cloudCredentialSecretName string ) args := data.Map("spec") driver := getNodeDriverName(typeMeta) nd, err := h.nodeDriverCache.Get(driver) if !create && apierror.IsNotFound(err) { url = data.String("status", "driverURL") hash = data.String("status", "driverHash") } else if err != nil { return driverArgs{}, err } else { url = nd.Spec.URL hash = nd.Spec.Checksum } if strings.HasPrefix(url, "local://") { url = "" hash = "" } secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: name2.SafeConcatName(meta.GetName(), "machine", "driver", "secret"), Namespace: meta.GetNamespace(), }, Data: map[string][]byte{}, } bootstrapName, cloudCredentialSecretName, secrets, err := h.getSecretData(meta, data, create) if err != nil { return driverArgs{}, err } for k, v := range secrets { envName := envNameOverride[driver] if envName == "" { envName = driver } k := strings.ToUpper(envName + "_" + regExHyphen.ReplaceAllString(k, "${1}_${2}")) secret.Data[k] = []byte(v) } secretName := MachineStateSecretName(meta.GetName()) cmd := []string{ fmt.Sprintf("--driver-download-url=%s", url), fmt.Sprintf("--driver-hash=%s", hash), fmt.Sprintf("--secret-namespace=%s", meta.GetNamespace()), fmt.Sprintf("--secret-name=%s", secretName), } if create { cmd = append(cmd, "create", fmt.Sprintf("--driver=%s", driver), fmt.Sprintf("--custom-install-script=/run/secrets/machine/value")) cmd = append(cmd, toArgs(driver, args)...) } else { cmd = append(cmd, "rm", "-y") } cmd = append(cmd, meta.GetName()) return driverArgs{ DriverName: driver, ImageName: settings.PrefixPrivateRegistry(settings.MachineProvisionImage.Get()), ImagePullPolicy: corev1.PullAlways, EnvSecret: secret, StateSecretName: secretName, BootstrapSecretName: bootstrapName, BootstrapOptional: !create, Args: cmd, RKEMachineStatus: rkev1.RKEMachineStatus{ Ready: data.String("spec", "providerID") != "" && data.Bool("status", "jobComplete"), DriverHash: hash, DriverURL: url, CloudCredentialSecretName: cloudCredentialSecretName, }, }, nil } func (h *handler) getBootstrapSecret(machine *capi.Machine) (string, error) { if machine == nil || machine.Spec.Bootstrap.ConfigRef == nil { return "", nil } gvk := schema.FromAPIVersionAndKind(machine.Spec.Bootstrap.ConfigRef.APIVersion, machine.Spec.Bootstrap.ConfigRef.Kind) bootstrap, err := h.dynamic.Get(gvk, machine.Namespace, machine.Spec.Bootstrap.ConfigRef.Name) if apierror.IsNotFound(err) { return "", nil } else if err != nil { return "", err } d, err := data.Convert(bootstrap) if err != nil { return "", err } return d.String("status", "dataSecretName"), nil } func (h *handler) getSecretData(meta metav1.Object, obj data.Object, create bool) (string, string, map[string]string, error) { var ( err error machine *capi.Machine result = map[string]string{} ) oldCredential := obj.String("status", "cloudCredentialSecretName") cloudCredentialSecretName := obj.String("spec", "common", "cloudCredentialSecretName") for _, ref := range meta.GetOwnerReferences() { if ref.Kind != "Machine" { continue } machine, err = h.machines.Get(meta.GetNamespace(), ref.Name) if err != nil { return "", "", nil, err } } if machine == nil && create { return "", "", nil, generic.ErrSkip } if cloudCredentialSecretName == "" { cloudCredentialSecretName = oldCredential } if cloudCredentialSecretName != "" { secret, err := h.secrets.Get(meta.GetNamespace(), cloudCredentialSecretName) if err != nil { return "", "", nil, err } for k, v := range secret.Data { result[k] = string(v) } } bootstrapName, err := h.getBootstrapSecret(machine) if err != nil { return "", "", nil, err } return bootstrapName, cloudCredentialSecretName, result, nil } func toArgs(driverName string, args map[string]interface{}) (cmd []string) { for k, v := range args { dmField := "--" + driverName + "-" + strings.ToLower(regExHyphen.ReplaceAllString(k, "${1}-${2}")) if v == nil { continue } switch v.(type) { case float64: cmd = append(cmd, fmt.Sprintf("%s=%v", dmField, v)) case string: if v.(string) != "" { cmd = append(cmd, fmt.Sprintf("%s=%s", dmField, v.(string))) } case bool: if v.(bool) { cmd = append(cmd, dmField) } case []interface{}: for _, s := range v.([]interface{}) { if _, ok := s.(string); ok { cmd = append(cmd, fmt.Sprintf("%s=%s", dmField, s.(string))) } } } } if driverName == "amazonec2" && convert.ToString(args["securityGroup"]) != "rancher-nodes" && args["securityGroupReadonly"] == nil { cmd = append(cmd, "--amazonec2-security-group-readonly") } sort.Strings(cmd) return } func getNodeDriverName(typeMeta meta.Type) string { return strings.ToLower(strings.TrimSuffix(typeMeta.GetKind(), "Machine")) }
/* Copyright 2019 The Tekton Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package resources import ( "fmt" "path/filepath" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/artifacts" "go.uber.org/zap" "golang.org/x/xerrors" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" ) var ( outputDir = "/workspace/output/" // allowedOutputResource checks if an output resource type produces // an output that should be copied to the PVC allowedOutputResources = map[v1alpha1.PipelineResourceType]bool{ v1alpha1.PipelineResourceTypeStorage: true, v1alpha1.PipelineResourceTypeGit: true, } ) // AddOutputResources reads the output resources and adds the corresponding container steps // This function also reads the inputs to check if resources are redeclared in inputs and has any custom // target directory. // Steps executed: // 1. If taskrun has owner reference as pipelinerun then all outputs are copied to parents PVC // and also runs any custom upload steps (upload to blob store) // 2. If taskrun does not have pipelinerun as owner reference then all outputs resources execute their custom // upload steps (like upload to blob store ) // // Resource source path determined // 1. If resource is declared in inputs then target path from input resource is used to identify source path // 2. If resource is declared in outputs only then the default is /output/resource_name func AddOutputResources( kubeclient kubernetes.Interface, taskName string, taskSpec *v1alpha1.TaskSpec, taskRun *v1alpha1.TaskRun, outputResources map[string]v1alpha1.PipelineResourceInterface, logger *zap.SugaredLogger, ) (*v1alpha1.TaskSpec, error) { if taskSpec == nil || taskSpec.Outputs == nil { return taskSpec, nil } taskSpec = taskSpec.DeepCopy() pvcName := taskRun.GetPipelineRunPVCName() as, err := artifacts.GetArtifactStorage(pvcName, kubeclient, logger) if err != nil { return nil, err } // track resources that are present in input of task cuz these resources will be copied onto PVC inputResourceMap := map[string]string{} if taskSpec.Inputs != nil { for _, input := range taskSpec.Inputs.Resources { inputResourceMap[input.Name] = destinationPath(input.Name, input.TargetPath) } } for _, output := range taskSpec.Outputs.Resources { boundResource, err := getBoundResource(output.Name, taskRun.Spec.Outputs.Resources) if err != nil { return nil, xerrors.Errorf("failed to get bound resource: %w", err) } resource, ok := outputResources[boundResource.Name] if !ok || resource == nil { return nil, xerrors.Errorf("failed to get output pipeline Resource for task %q resource %v", taskName, boundResource) } var ( resourceContainers []corev1.Container resourceVolumes []corev1.Volume ) // if resource is declared in input then copy outputs to pvc // To build copy step it needs source path(which is targetpath of input resourcemap) from task input source sourcePath := inputResourceMap[boundResource.Name] if sourcePath == "" { if output.TargetPath == "" { sourcePath = filepath.Join(outputDir, boundResource.Name) } else { sourcePath = output.TargetPath } } resource.SetDestinationDirectory(sourcePath) switch resource.GetType() { case v1alpha1.PipelineResourceTypeStorage: { storageResource, ok := resource.(v1alpha1.PipelineStorageResourceInterface) if !ok { return nil, xerrors.Errorf("task %q invalid storage Pipeline Resource: %q", taskName, boundResource.ResourceRef.Name, ) } resourceContainers, resourceVolumes, err = addStoreUploadStep(taskSpec, storageResource) if err != nil { return nil, xerrors.Errorf("task %q invalid Pipeline Resource: %q; invalid upload steps err: %w", taskName, boundResource.ResourceRef.Name, err) } } default: { resourceContainers, err = resource.GetUploadContainerSpec() if err != nil { return nil, xerrors.Errorf("task %q invalid download spec: %q; error %w", taskName, boundResource.ResourceRef.Name, err) } } } if allowedOutputResources[resource.GetType()] && taskRun.HasPipelineRunOwnerReference() { var newSteps []corev1.Container for _, dPath := range boundResource.Paths { containers := as.GetCopyToStorageFromContainerSpec(resource.GetName(), sourcePath, dPath) newSteps = append(newSteps, containers...) } resourceContainers = append(resourceContainers, newSteps...) resourceVolumes = append(resourceVolumes, as.GetSecretsVolumes()...) } taskSpec.Steps = append(taskSpec.Steps, resourceContainers...) taskSpec.Volumes = append(taskSpec.Volumes, resourceVolumes...) if as.GetType() == v1alpha1.ArtifactStoragePVCType { if pvcName == "" { return taskSpec, nil } // attach pvc volume only if it is not already attached for _, buildVol := range taskSpec.Volumes { if buildVol.Name == pvcName { return taskSpec, nil } } taskSpec.Volumes = append(taskSpec.Volumes, GetPVCVolume(pvcName)) } } return taskSpec, nil } func addStoreUploadStep(spec *v1alpha1.TaskSpec, storageResource v1alpha1.PipelineStorageResourceInterface, ) ([]corev1.Container, []corev1.Volume, error) { gcsContainers, err := storageResource.GetUploadContainerSpec() if err != nil { return nil, nil, err } var storageVol []corev1.Volume mountedSecrets := map[string]string{} for _, volume := range spec.Volumes { mountedSecrets[volume.Name] = "" } // Map holds list of secrets that are mounted as volumes for _, secretParam := range storageResource.GetSecretParams() { volName := fmt.Sprintf("volume-%s-%s", storageResource.GetName(), secretParam.SecretName) gcsSecretVolume := corev1.Volume{ Name: volName, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: secretParam.SecretName, }, }, } if _, ok := mountedSecrets[volName]; !ok { storageVol = append(storageVol, gcsSecretVolume) mountedSecrets[volName] = "" } } return gcsContainers, storageVol, nil } Add a warning when the same resource is used as an input and an output. In the future, we will be changing the behavior of this. Outputs will always be copied from the /workspace/outputs directory, rather than sometimes copying them from the input. /* Copyright 2019 The Tekton Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package resources import ( "fmt" "path/filepath" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/artifacts" "go.uber.org/zap" "golang.org/x/xerrors" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" ) var ( outputDir = "/workspace/output/" // allowedOutputResource checks if an output resource type produces // an output that should be copied to the PVC allowedOutputResources = map[v1alpha1.PipelineResourceType]bool{ v1alpha1.PipelineResourceTypeStorage: true, v1alpha1.PipelineResourceTypeGit: true, } ) // AddOutputResources reads the output resources and adds the corresponding container steps // This function also reads the inputs to check if resources are redeclared in inputs and has any custom // target directory. // Steps executed: // 1. If taskrun has owner reference as pipelinerun then all outputs are copied to parents PVC // and also runs any custom upload steps (upload to blob store) // 2. If taskrun does not have pipelinerun as owner reference then all outputs resources execute their custom // upload steps (like upload to blob store ) // // Resource source path determined // 1. If resource is declared in inputs then target path from input resource is used to identify source path // 2. If resource is declared in outputs only then the default is /output/resource_name func AddOutputResources( kubeclient kubernetes.Interface, taskName string, taskSpec *v1alpha1.TaskSpec, taskRun *v1alpha1.TaskRun, outputResources map[string]v1alpha1.PipelineResourceInterface, logger *zap.SugaredLogger, ) (*v1alpha1.TaskSpec, error) { if taskSpec == nil || taskSpec.Outputs == nil { return taskSpec, nil } taskSpec = taskSpec.DeepCopy() pvcName := taskRun.GetPipelineRunPVCName() as, err := artifacts.GetArtifactStorage(pvcName, kubeclient, logger) if err != nil { return nil, err } // track resources that are present in input of task cuz these resources will be copied onto PVC inputResourceMap := map[string]string{} if taskSpec.Inputs != nil { for _, input := range taskSpec.Inputs.Resources { inputResourceMap[input.Name] = destinationPath(input.Name, input.TargetPath) } } for _, output := range taskSpec.Outputs.Resources { boundResource, err := getBoundResource(output.Name, taskRun.Spec.Outputs.Resources) if err != nil { return nil, xerrors.Errorf("failed to get bound resource: %w", err) } resource, ok := outputResources[boundResource.Name] if !ok || resource == nil { return nil, xerrors.Errorf("failed to get output pipeline Resource for task %q resource %v", taskName, boundResource) } var ( resourceContainers []corev1.Container resourceVolumes []corev1.Volume ) // if resource is declared in input then copy outputs to pvc // To build copy step it needs source path(which is targetpath of input resourcemap) from task input source sourcePath := inputResourceMap[boundResource.Name] if sourcePath != "" { logger.Warn(`This task uses the same resource as an input and output. The behavior of this will change in a future release. See https://github.com/tektoncd/pipeline/issues/1118 for more information.`) } else { if output.TargetPath == "" { sourcePath = filepath.Join(outputDir, boundResource.Name) } else { sourcePath = output.TargetPath } } resource.SetDestinationDirectory(sourcePath) switch resource.GetType() { case v1alpha1.PipelineResourceTypeStorage: { storageResource, ok := resource.(v1alpha1.PipelineStorageResourceInterface) if !ok { return nil, xerrors.Errorf("task %q invalid storage Pipeline Resource: %q", taskName, boundResource.ResourceRef.Name, ) } resourceContainers, resourceVolumes, err = addStoreUploadStep(taskSpec, storageResource) if err != nil { return nil, xerrors.Errorf("task %q invalid Pipeline Resource: %q; invalid upload steps err: %w", taskName, boundResource.ResourceRef.Name, err) } } default: { resourceContainers, err = resource.GetUploadContainerSpec() if err != nil { return nil, xerrors.Errorf("task %q invalid download spec: %q; error %w", taskName, boundResource.ResourceRef.Name, err) } } } if allowedOutputResources[resource.GetType()] && taskRun.HasPipelineRunOwnerReference() { var newSteps []corev1.Container for _, dPath := range boundResource.Paths { containers := as.GetCopyToStorageFromContainerSpec(resource.GetName(), sourcePath, dPath) newSteps = append(newSteps, containers...) } resourceContainers = append(resourceContainers, newSteps...) resourceVolumes = append(resourceVolumes, as.GetSecretsVolumes()...) } taskSpec.Steps = append(taskSpec.Steps, resourceContainers...) taskSpec.Volumes = append(taskSpec.Volumes, resourceVolumes...) if as.GetType() == v1alpha1.ArtifactStoragePVCType { if pvcName == "" { return taskSpec, nil } // attach pvc volume only if it is not already attached for _, buildVol := range taskSpec.Volumes { if buildVol.Name == pvcName { return taskSpec, nil } } taskSpec.Volumes = append(taskSpec.Volumes, GetPVCVolume(pvcName)) } } return taskSpec, nil } func addStoreUploadStep(spec *v1alpha1.TaskSpec, storageResource v1alpha1.PipelineStorageResourceInterface, ) ([]corev1.Container, []corev1.Volume, error) { gcsContainers, err := storageResource.GetUploadContainerSpec() if err != nil { return nil, nil, err } var storageVol []corev1.Volume mountedSecrets := map[string]string{} for _, volume := range spec.Volumes { mountedSecrets[volume.Name] = "" } // Map holds list of secrets that are mounted as volumes for _, secretParam := range storageResource.GetSecretParams() { volName := fmt.Sprintf("volume-%s-%s", storageResource.GetName(), secretParam.SecretName) gcsSecretVolume := corev1.Volume{ Name: volName, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: secretParam.SecretName, }, }, } if _, ok := mountedSecrets[volName]; !ok { storageVol = append(storageVol, gcsSecretVolume) mountedSecrets[volName] = "" } } return gcsContainers, storageVol, nil }
package filesys import ( "context" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" ) func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error { newDir := newDirectory.(*Dir) newPath := util.NewFullPath(newDir.FullPath(), req.NewName) oldPath := util.NewFullPath(dir.FullPath(), req.OldName) glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath) // find local old entry oldEntry, err := dir.wfs.metaCache.FindEntry(context.Background(), oldPath) if err != nil { glog.Errorf("dir Rename can not find source %s : %v", oldPath, err) return fuse.ENOENT } // update remote filer err = dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() request := &filer_pb.AtomicRenameEntryRequest{ OldDirectory: dir.FullPath(), OldName: req.OldName, NewDirectory: newDir.FullPath(), NewName: req.NewName, } _, err := client.AtomicRenameEntry(ctx, request) if err != nil { glog.Errorf("dir AtomicRenameEntry %s => %s : %v", oldPath, newPath, err) return fuse.EXDEV } return nil }) if err != nil { glog.V(0).Infof("dir Rename %s => %s : %v", oldPath, newPath, err) return fuse.EIO } // TODO: replicate renaming logic on filer if err := dir.wfs.metaCache.DeleteEntry(context.Background(), oldPath); err != nil { glog.V(0).Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err) return fuse.EIO } oldEntry.FullPath = newPath if err := dir.wfs.metaCache.InsertEntry(context.Background(), oldEntry); err != nil { glog.V(0).Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err) return fuse.EIO } oldFsNode := NodeWithId(oldPath.AsInode()) newFsNode := NodeWithId(newPath.AsInode()) dir.wfs.Server.InvalidateInternalNode(oldFsNode, newFsNode, func(internalNode fs.Node) { if file, ok := internalNode.(*File); ok { glog.V(4).Infof("internal node %s", file.Name) file.Name = req.NewName file.id = uint64(newFsNode) } }) // change file handle dir.wfs.handlesLock.Lock() defer dir.wfs.handlesLock.Unlock() inodeId := oldPath.AsInode() existingHandle, found := dir.wfs.handles[inodeId] glog.V(4).Infof("has open filehandle %s: %v", oldPath, found) if !found || existingHandle == nil { return nil } glog.V(4).Infof("opened filehandle %s => %s", oldPath, newPath) delete(dir.wfs.handles, inodeId) dir.wfs.handles[newPath.AsInode()] = existingHandle return nil } directory rename: change directory name after renaming fix https://github.com/chrislusf/seaweedfs/issues/2068 package filesys import ( "context" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" ) func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error { newDir := newDirectory.(*Dir) newPath := util.NewFullPath(newDir.FullPath(), req.NewName) oldPath := util.NewFullPath(dir.FullPath(), req.OldName) glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath) // find local old entry oldEntry, err := dir.wfs.metaCache.FindEntry(context.Background(), oldPath) if err != nil { glog.Errorf("dir Rename can not find source %s : %v", oldPath, err) return fuse.ENOENT } // update remote filer err = dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() request := &filer_pb.AtomicRenameEntryRequest{ OldDirectory: dir.FullPath(), OldName: req.OldName, NewDirectory: newDir.FullPath(), NewName: req.NewName, } _, err := client.AtomicRenameEntry(ctx, request) if err != nil { glog.Errorf("dir AtomicRenameEntry %s => %s : %v", oldPath, newPath, err) return fuse.EXDEV } return nil }) if err != nil { glog.V(0).Infof("dir Rename %s => %s : %v", oldPath, newPath, err) return fuse.EIO } // TODO: replicate renaming logic on filer if err := dir.wfs.metaCache.DeleteEntry(context.Background(), oldPath); err != nil { glog.V(0).Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err) return fuse.EIO } oldEntry.FullPath = newPath if err := dir.wfs.metaCache.InsertEntry(context.Background(), oldEntry); err != nil { glog.V(0).Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err) return fuse.EIO } oldFsNode := NodeWithId(oldPath.AsInode()) newFsNode := NodeWithId(newPath.AsInode()) dir.wfs.Server.InvalidateInternalNode(oldFsNode, newFsNode, func(internalNode fs.Node) { if file, ok := internalNode.(*File); ok { glog.V(4).Infof("internal file node %s", file.Name) file.Name = req.NewName file.id = uint64(newFsNode) } if dir, ok := internalNode.(*Dir); ok { glog.V(4).Infof("internal dir node %s", dir.name) dir.name = req.NewName dir.id = uint64(newFsNode) } }) // change file handle dir.wfs.handlesLock.Lock() defer dir.wfs.handlesLock.Unlock() inodeId := oldPath.AsInode() existingHandle, found := dir.wfs.handles[inodeId] glog.V(4).Infof("has open filehandle %s: %v", oldPath, found) if !found || existingHandle == nil { return nil } glog.V(4).Infof("opened filehandle %s => %s", oldPath, newPath) delete(dir.wfs.handles, inodeId) dir.wfs.handles[newPath.AsInode()] = existingHandle return nil }
package objects import ( "log" "github.com/godbus/dbus" "github.com/godbus/dbus/prop" "github.com/muka/device-manager/api" ) // NewDeviceManager initialize a new DeviceManager object func NewDeviceManager() *DeviceManager { d := DeviceManager{} d.Devices = []dbus.ObjectPath{} d.path = DeviceManagerPath d.iface = DeviceManagerInterface return &d } // DeviceManager manages devices in the gateway type DeviceManager struct { api.Proxy Devices []dbus.ObjectPath devices map[string]*DeviceDefinition path string iface string logger *log.Logger } // GetPath return object path func (d *DeviceManager) GetPath() string { return d.path } // GetInterface return interface func (d *DeviceManager) GetInterface() string { return d.iface } //SetLogger set default logger func (d *DeviceManager) SetLogger(logger *log.Logger) { d.logger = logger } //GetProperties return properties func (d *DeviceManager) GetProperties() map[string]map[string]*prop.Prop { return map[string]map[string]*prop.Prop{} } // Find search for devices func (d *DeviceManager) Find(q *BaseQuery) (devices []dbus.ObjectPath, err *dbus.Error) { if &d.Devices == nil { d.Devices = devices } return d.Devices, err } // Create add a device func (d *DeviceManager) Create(dev DeviceDefinition) (path dbus.ObjectPath, err *dbus.Error) { return path, err } // Read a device definition func (d *DeviceManager) Read(id string) (dev DeviceDefinition, err *dbus.Error) { dev.Id = id dev.Description = "My SensorTag device" dev.Name = "SensorTag" dev.Path = dbus.ObjectPath("/iot/agile/device/Dummy") dev.Protocol = dbus.ObjectPath("/iot/agile/protocol/BLE") dev.Streams = make([]DeviceComponent, 2) dev.Streams[0] = DeviceComponent{} dev.Streams[0].Id = "temperature" dev.Streams[0].Format = "float" dev.Streams[0].Unit = "C" dev.Streams[1] = DeviceComponent{} dev.Streams[1].Id = "light" dev.Streams[1].Format = "float" dev.Streams[1].Unit = "lumen" log.Printf("Read %s: \n%v\n", id, dev) return dev, err } // Update a device definition func (d *DeviceManager) Update(id string, dev DeviceDefinition) (res bool, err *dbus.Error) { res = true return res, err } // Delete a device definition func (d *DeviceManager) Delete(id string) (res bool, err *dbus.Error) { res = true return res, err } // Batch exec batch ops func (d *DeviceManager) Batch(operation string, ops map[string]string) (res bool, err *dbus.Error) { res = true return res, err } // BaseQuery base query for devices record type BaseQuery struct { Criteria map[string]string OrderBy map[string]string Limit int32 Offset int32 } // DeviceComponent A device component type DeviceComponent struct { Id string Unit string Format string Properties map[string]string } // DeviceDefinition A device details list type DeviceDefinition struct { Id string Name string Description string Path dbus.ObjectPath Protocol dbus.ObjectPath Properties map[string]string Streams []DeviceComponent } added signal package objects import ( "log" "github.com/godbus/dbus" "github.com/godbus/dbus/prop" "github.com/muka/device-manager/api" ) // NewDeviceManager initialize a new DeviceManager object func NewDeviceManager() *DeviceManager { d := DeviceManager{} d.Devices = []dbus.ObjectPath{} d.path = DeviceManagerPath d.iface = DeviceManagerInterface return &d } // DeviceManager manages devices in the gateway type DeviceManager struct { api.Proxy Devices []dbus.ObjectPath devices map[string]*DeviceDefinition path string iface string logger *log.Logger } // GetPath return object path func (d *DeviceManager) GetPath() string { return d.path } // GetInterface return interface func (d *DeviceManager) GetInterface() string { return d.iface } //SetLogger set default logger func (d *DeviceManager) SetLogger(logger *log.Logger) { d.logger = logger } //GetProperties return properties func (d *DeviceManager) GetProperties() map[string]map[string]*prop.Prop { return map[string]map[string]*prop.Prop{ d.GetInterface(): { "Devices": { Value: d.Devices, Writable: false, Emit: prop.EmitTrue, Callback: func(c *prop.Change) *dbus.Error { d.logger.Printf("Changed value %s=%v on %s", c.Name, c.Value, c.Iface) return nil }, }, }, } } // Find search for devices func (d *DeviceManager) Find(q *BaseQuery) (devices []dbus.ObjectPath, err *dbus.Error) { if &d.Devices == nil { d.Devices = devices } return d.Devices, err } // Create add a device func (d *DeviceManager) Create(dev DeviceDefinition) (path dbus.ObjectPath, err *dbus.Error) { return path, err } // Read a device definition func (d *DeviceManager) Read(id string) (dev DeviceDefinition, err *dbus.Error) { dev.Id = id dev.Description = "My SensorTag device" dev.Name = "SensorTag" dev.Path = dbus.ObjectPath("/iot/agile/device/Dummy") dev.Protocol = dbus.ObjectPath("/iot/agile/protocol/BLE") dev.Streams = make([]DeviceComponent, 2) dev.Streams[0] = DeviceComponent{} dev.Streams[0].Id = "temperature" dev.Streams[0].Format = "float" dev.Streams[0].Unit = "C" dev.Streams[1] = DeviceComponent{} dev.Streams[1].Id = "light" dev.Streams[1].Format = "float" dev.Streams[1].Unit = "lumen" log.Printf("Read %s: \n%v\n", id, dev) return dev, err } // Update a device definition func (d *DeviceManager) Update(id string, dev DeviceDefinition) (res bool, err *dbus.Error) { res = true return res, err } // Delete a device definition func (d *DeviceManager) Delete(id string) (res bool, err *dbus.Error) { res = true return res, err } // Batch exec batch ops func (d *DeviceManager) Batch(operation string, ops map[string]string) (res bool, err *dbus.Error) { res = true return res, err } // BaseQuery base query for devices record type BaseQuery struct { Criteria map[string]string OrderBy map[string]string Limit int32 Offset int32 } // DeviceComponent A device component type DeviceComponent struct { Id string Unit string Format string Properties map[string]string } // DeviceDefinition A device details list type DeviceDefinition struct { Id string Name string Description string Path dbus.ObjectPath Protocol dbus.ObjectPath Properties map[string]string Streams []DeviceComponent }
package kv_test import ( "context" "testing" "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/inmem" "github.com/influxdata/influxdb/v2/kv" "github.com/influxdata/influxdb/v2/snowflake" influxdbtesting "github.com/influxdata/influxdb/v2/testing" "go.uber.org/zap/zaptest" ) type testable interface { Helper() Logf(string, ...interface{}) Error(args ...interface{}) Errorf(string, ...interface{}) Fail() Failed() bool Name() string FailNow() Fatal(args ...interface{}) Fatalf(format string, args ...interface{}) } func TestBoltUserResourceMappingService(t *testing.T) { influxdbtesting.UserResourceMappingService(initURMServiceFunc(NewTestBoltStore), t) } func TestInmemUserResourceMappingService(t *testing.T) { influxdbtesting.UserResourceMappingService(initURMServiceFunc(NewTestBoltStore), t) } type userResourceMappingTestFunc func(influxdbtesting.UserResourceFields, *testing.T) (influxdb.UserResourceMappingService, func()) func initURMServiceFunc(storeFn func(*testing.T) (kv.SchemaStore, func(), error), confs ...kv.ServiceConfig) userResourceMappingTestFunc { return func(f influxdbtesting.UserResourceFields, t *testing.T) (influxdb.UserResourceMappingService, func()) { s, closeStore, err := storeFn(t) if err != nil { t.Fatalf("failed to create new kv store: %v", err) } svc, closeSvc := initUserResourceMappingService(s, f, t, confs...) return svc, func() { closeSvc() closeStore() } } } func initUserResourceMappingService(s kv.SchemaStore, f influxdbtesting.UserResourceFields, t testable, configs ...kv.ServiceConfig) (influxdb.UserResourceMappingService, func()) { ctx := context.Background() svc := kv.NewService(zaptest.NewLogger(t), s, configs...) for _, o := range f.Organizations { if err := svc.CreateOrganization(ctx, o); err != nil { t.Fatalf("failed to create org %q", err) } } for _, u := range f.Users { if err := svc.CreateUser(ctx, u); err != nil { t.Fatalf("failed to create user %q", err) } } for _, b := range f.Buckets { if err := svc.PutBucket(ctx, b); err != nil { t.Fatalf("failed to create bucket %q", err) } } for _, m := range f.UserResourceMappings { if err := svc.CreateUserResourceMapping(ctx, m); err != nil { t.Fatalf("failed to populate mappings %q", err) } } return svc, func() { for _, m := range f.UserResourceMappings { if err := svc.DeleteUserResourceMapping(ctx, m.ResourceID, m.UserID); err != nil { t.Logf("failed to remove user resource mapping: %v", err) } } for _, b := range f.Buckets { if err := svc.DeleteBucket(ctx, b.ID); err != nil { t.Logf("failed to delete org", err) } } for _, u := range f.Users { if err := svc.DeleteUser(ctx, u.ID); err != nil { t.Fatalf("failed to delete user %q", err) } } for _, o := range f.Organizations { if err := svc.DeleteOrganization(ctx, o.ID); err != nil { t.Logf("failed to delete org", err) } } } } func BenchmarkReadURMs(b *testing.B) { urms := influxdbtesting.UserResourceFields{ UserResourceMappings: make([]*influxdb.UserResourceMapping, 10000), } idgen := snowflake.NewDefaultIDGenerator() users := make([]influxdb.ID, 10) for i := 0; i < 10; i++ { users[i] = idgen.ID() } for i := 0; i < 10000; i++ { urms.UserResourceMappings[i] = &influxdb.UserResourceMapping{ ResourceID: idgen.ID(), UserID: users[i%len(users)], UserType: influxdb.Member, ResourceType: influxdb.BucketsResourceType, } } st := inmem.NewKVStore() initUserResourceMappingService(st, urms, b) svc := kv.NewService(zaptest.NewLogger(b), st) b.ResetTimer() for i := 0; i < b.N; i++ { svc.FindUserResourceMappings(context.Background(), influxdb.UserResourceMappingFilter{ UserID: users[0], }) } } fix(kv): Really use inmem store for in memory URM tests package kv_test import ( "context" "testing" "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/inmem" "github.com/influxdata/influxdb/v2/kv" "github.com/influxdata/influxdb/v2/snowflake" influxdbtesting "github.com/influxdata/influxdb/v2/testing" "go.uber.org/zap/zaptest" ) type testable interface { Helper() Logf(string, ...interface{}) Error(args ...interface{}) Errorf(string, ...interface{}) Fail() Failed() bool Name() string FailNow() Fatal(args ...interface{}) Fatalf(format string, args ...interface{}) } func TestBoltUserResourceMappingService(t *testing.T) { influxdbtesting.UserResourceMappingService(initURMServiceFunc(NewTestBoltStore), t) } func TestInmemUserResourceMappingService(t *testing.T) { influxdbtesting.UserResourceMappingService(initURMServiceFunc(NewTestInmemStore), t) } type userResourceMappingTestFunc func(influxdbtesting.UserResourceFields, *testing.T) (influxdb.UserResourceMappingService, func()) func initURMServiceFunc(storeFn func(*testing.T) (kv.SchemaStore, func(), error), confs ...kv.ServiceConfig) userResourceMappingTestFunc { return func(f influxdbtesting.UserResourceFields, t *testing.T) (influxdb.UserResourceMappingService, func()) { s, closeStore, err := storeFn(t) if err != nil { t.Fatalf("failed to create new kv store: %v", err) } svc, closeSvc := initUserResourceMappingService(s, f, t, confs...) return svc, func() { closeSvc() closeStore() } } } func initUserResourceMappingService(s kv.SchemaStore, f influxdbtesting.UserResourceFields, t testable, configs ...kv.ServiceConfig) (influxdb.UserResourceMappingService, func()) { ctx := context.Background() svc := kv.NewService(zaptest.NewLogger(t), s, configs...) for _, o := range f.Organizations { if err := svc.CreateOrganization(ctx, o); err != nil { t.Fatalf("failed to create org %q", err) } } for _, u := range f.Users { if err := svc.CreateUser(ctx, u); err != nil { t.Fatalf("failed to create user %q", err) } } for _, b := range f.Buckets { if err := svc.PutBucket(ctx, b); err != nil { t.Fatalf("failed to create bucket %q", err) } } for _, m := range f.UserResourceMappings { if err := svc.CreateUserResourceMapping(ctx, m); err != nil { t.Fatalf("failed to populate mappings %q", err) } } return svc, func() { for _, m := range f.UserResourceMappings { if err := svc.DeleteUserResourceMapping(ctx, m.ResourceID, m.UserID); err != nil { t.Logf("failed to remove user resource mapping: %v", err) } } for _, b := range f.Buckets { if err := svc.DeleteBucket(ctx, b.ID); err != nil { t.Logf("failed to delete org", err) } } for _, u := range f.Users { if err := svc.DeleteUser(ctx, u.ID); err != nil { t.Fatalf("failed to delete user %q", err) } } for _, o := range f.Organizations { if err := svc.DeleteOrganization(ctx, o.ID); err != nil { t.Logf("failed to delete org", err) } } } } func BenchmarkReadURMs(b *testing.B) { urms := influxdbtesting.UserResourceFields{ UserResourceMappings: make([]*influxdb.UserResourceMapping, 10000), } idgen := snowflake.NewDefaultIDGenerator() users := make([]influxdb.ID, 10) for i := 0; i < 10; i++ { users[i] = idgen.ID() } for i := 0; i < 10000; i++ { urms.UserResourceMappings[i] = &influxdb.UserResourceMapping{ ResourceID: idgen.ID(), UserID: users[i%len(users)], UserType: influxdb.Member, ResourceType: influxdb.BucketsResourceType, } } st := inmem.NewKVStore() initUserResourceMappingService(st, urms, b) svc := kv.NewService(zaptest.NewLogger(b), st) b.ResetTimer() for i := 0; i < b.N; i++ { svc.FindUserResourceMappings(context.Background(), influxdb.UserResourceMappingFilter{ UserID: users[0], }) } }
package service import ( "context" "database/sql" "errors" "fmt" "net/http" "time" "github.com/gilcrest/go-api-basic/domain/audit" "github.com/gilcrest/go-api-basic/domain/auth" "github.com/gilcrest/go-api-basic/domain/errs" "github.com/gilcrest/go-api-basic/domain/secure" "github.com/google/uuid" "github.com/gorilla/mux" "github.com/jackc/pgconn" "github.com/jackc/pgx/v4" "github.com/rs/zerolog" ) // DBAuthorizer determines authorization for a user // by running sql against tables in the database type DBAuthorizer struct { Datastorer Datastorer } // Authorize ensures that a subject (user.User) can perform a // particular action on a resource, e.g. subject otto.maddox711@gmail.com // can read (GET) the resource /api/v1/movies (path). // // The http.Request context is used to determine the route/path information // and must be issued through the gorilla/mux library. func (a DBAuthorizer) Authorize(lgr zerolog.Logger, r *http.Request, adt audit.Audit) error { // current matched route for the request route := mux.CurrentRoute(r) // CurrentRoute can return a nil if route not setup properly or // is being called outside the handler of the matched route if route == nil { return errs.E(errs.Unauthorized, "nil route returned from mux.CurrentRoute") } pathTemplate, err := route.GetPathTemplate() if err != nil { return errs.E(errs.Unauthorized, err) } arg := authstore.IsAuthorizedParams{ Resource: pathTemplate, Operation: r.Method, UserID: adt.User.ID, } // call IsAuthorized method to validate user has access to the resource and operation var authorizedID uuid.UUID authorizedID, err = authstore.New(a.Datastorer.Pool()).IsAuthorized(r.Context(), arg) if err != nil || authorizedID == uuid.Nil { lgr.Info().Str("user", adt.User.Username).Str("resource", pathTemplate).Str("operation", r.Method). Msgf("Unauthorized (user: %s, resource: %s, operation: %s)", adt.User.Username, pathTemplate, r.Method) // "In summary, a 401 Unauthorized response should be used for missing or // bad authentication, and a 403 Forbidden response should be used afterwards, // when the user is authenticated but isn’t authorized to perform the // requested operation on the given resource." // If the user has gotten here, they have gotten through authentication // but do have the right access, this they are Unauthorized return errs.E(errs.Unauthorized, fmt.Sprintf("user %s does not have %s permission for %s", adt.User.Username, r.Method, pathTemplate)) } lgr.Debug().Str("user", adt.User.Username).Str("resource", pathTemplate).Str("operation", r.Method). Msgf("Authorized (user: %s, resource: %s, operation: %s)", adt.User.Username, pathTemplate, r.Method) return nil } // PermissionService is a service for creating, reading, updating and deleting a Permission type PermissionService struct { Datastorer Datastorer } // Create is used to create a Permission func (s PermissionService) Create(ctx context.Context, r *auth.Permission, adt audit.Audit) (p auth.Permission, err error) { // set Unique ID for Permission r.ID = uuid.New() r.ExternalID = secure.NewID() // start db txn using pgxpool var tx pgx.Tx tx, err = s.Datastorer.BeginTx(ctx) if err != nil { return auth.Permission{}, err } // defer transaction rollback and handle error, if any defer func() { err = s.Datastorer.RollbackTx(ctx, tx, err) }() arg := authstore.CreatePermissionParams{ PermissionID: r.ID, PermissionExtlID: r.ExternalID.String(), Resource: r.Resource, Operation: r.Operation, PermissionDescription: r.Description, Active: sql.NullBool{Bool: r.Active, Valid: true}, CreateAppID: adt.App.ID, CreateUserID: adt.User.NullID(), CreateTimestamp: time.Now(), UpdateAppID: adt.App.ID, UpdateUserID: adt.User.NullID(), UpdateTimestamp: time.Now(), } var rowsAffected int64 rowsAffected, err = authstore.New(tx).CreatePermission(ctx, arg) if err != nil { var pgErr *pgconn.PgError if errors.As(err, &pgErr) { if pgErr.Code == "23505" { return auth.Permission{}, errs.E(errs.Exist, errs.Exist.String()) } return auth.Permission{}, errs.E(errs.Database, pgErr.Message) } return auth.Permission{}, errs.E(errs.Database, err) } // should only impact exactly one record if rowsAffected != 1 { return auth.Permission{}, errs.E(errs.Database, fmt.Sprintf("Create() should insert 1 row, actual: %d", rowsAffected)) } // commit db txn using pgxpool err = s.Datastorer.CommitTx(ctx, tx) if err != nil { return auth.Permission{}, err } return *r, nil } // FindAll retrieves all permissions func (s PermissionService) FindAll(ctx context.Context) ([]auth.Permission, error) { rows, err := authstore.New(s.Datastorer.Pool()).FindAllPermissions(ctx) if err != nil { return nil, errs.E(errs.Database, err) } var sp []auth.Permission for _, row := range rows { p := auth.Permission{ ID: row.PermissionID, ExternalID: secure.MustParseIdentifier(row.PermissionExtlID), Resource: row.Resource, Operation: row.Operation, Description: row.PermissionDescription, Active: row.Active.Bool, } sp = append(sp, p) } return sp, nil } type RoleService struct { Datastorer Datastorer } func (s RoleService) Create(ctx context.Context, r *auth.Role, adt audit.Audit) (role auth.Role, err error) { // set Unique ID for Role r.ID = uuid.New() // set Unique External ID r.ExternalID = secure.NewID() // start db txn using pgxpool var tx pgx.Tx tx, err = s.Datastorer.BeginTx(ctx) if err != nil { return auth.Role{}, err } // defer transaction rollback and handle error, if any defer func() { err = s.Datastorer.RollbackTx(ctx, tx, err) }() arg := authstore.CreateRoleParams{ RoleID: r.ID, RoleExtlID: r.ExternalID.String(), RoleCd: r.Code, Active: sql.NullBool{Bool: r.Active, Valid: true}, CreateAppID: adt.App.ID, CreateUserID: adt.User.NullID(), CreateTimestamp: time.Now(), UpdateAppID: adt.App.ID, UpdateUserID: adt.User.NullID(), UpdateTimestamp: time.Now(), } var rowsAffected int64 rowsAffected, err = authstore.New(tx).CreateRole(ctx, arg) if err != nil { return auth.Role{}, err } // should only impact exactly one record if rowsAffected != 1 { return auth.Role{}, errs.E(errs.Database, fmt.Sprintf("Create() should insert 1 row, actual: %d", rowsAffected)) } // commit db txn using pgxpool err = s.Datastorer.CommitTx(ctx, tx) if err != nil { return auth.Role{}, err } return *r, nil } organize imports package service import ( "context" "database/sql" "errors" "fmt" "net/http" "time" "github.com/google/uuid" "github.com/gorilla/mux" "github.com/jackc/pgconn" "github.com/jackc/pgx/v4" "github.com/rs/zerolog" "github.com/gilcrest/go-api-basic/datastore/authstore" "github.com/gilcrest/go-api-basic/domain/audit" "github.com/gilcrest/go-api-basic/domain/auth" "github.com/gilcrest/go-api-basic/domain/errs" "github.com/gilcrest/go-api-basic/domain/secure" ) // DBAuthorizer determines authorization for a user // by running sql against tables in the database type DBAuthorizer struct { Datastorer Datastorer } // Authorize ensures that a subject (user.User) can perform a // particular action on a resource, e.g. subject otto.maddox711@gmail.com // can read (GET) the resource /api/v1/movies (path). // // The http.Request context is used to determine the route/path information // and must be issued through the gorilla/mux library. func (a DBAuthorizer) Authorize(lgr zerolog.Logger, r *http.Request, adt audit.Audit) error { // current matched route for the request route := mux.CurrentRoute(r) // CurrentRoute can return a nil if route not setup properly or // is being called outside the handler of the matched route if route == nil { return errs.E(errs.Unauthorized, "nil route returned from mux.CurrentRoute") } pathTemplate, err := route.GetPathTemplate() if err != nil { return errs.E(errs.Unauthorized, err) } arg := authstore.IsAuthorizedParams{ Resource: pathTemplate, Operation: r.Method, UserID: adt.User.ID, } // call IsAuthorized method to validate user has access to the resource and operation var authorizedID uuid.UUID authorizedID, err = authstore.New(a.Datastorer.Pool()).IsAuthorized(r.Context(), arg) if err != nil || authorizedID == uuid.Nil { lgr.Info().Str("user", adt.User.Username).Str("resource", pathTemplate).Str("operation", r.Method). Msgf("Unauthorized (user: %s, resource: %s, operation: %s)", adt.User.Username, pathTemplate, r.Method) // "In summary, a 401 Unauthorized response should be used for missing or // bad authentication, and a 403 Forbidden response should be used afterwards, // when the user is authenticated but isn’t authorized to perform the // requested operation on the given resource." // If the user has gotten here, they have gotten through authentication // but do have the right access, this they are Unauthorized return errs.E(errs.Unauthorized, fmt.Sprintf("user %s does not have %s permission for %s", adt.User.Username, r.Method, pathTemplate)) } lgr.Debug().Str("user", adt.User.Username).Str("resource", pathTemplate).Str("operation", r.Method). Msgf("Authorized (user: %s, resource: %s, operation: %s)", adt.User.Username, pathTemplate, r.Method) return nil } // PermissionService is a service for creating, reading, updating and deleting a Permission type PermissionService struct { Datastorer Datastorer } // Create is used to create a Permission func (s PermissionService) Create(ctx context.Context, r *auth.Permission, adt audit.Audit) (p auth.Permission, err error) { // set Unique ID for Permission r.ID = uuid.New() r.ExternalID = secure.NewID() // start db txn using pgxpool var tx pgx.Tx tx, err = s.Datastorer.BeginTx(ctx) if err != nil { return auth.Permission{}, err } // defer transaction rollback and handle error, if any defer func() { err = s.Datastorer.RollbackTx(ctx, tx, err) }() arg := authstore.CreatePermissionParams{ PermissionID: r.ID, PermissionExtlID: r.ExternalID.String(), Resource: r.Resource, Operation: r.Operation, PermissionDescription: r.Description, Active: sql.NullBool{Bool: r.Active, Valid: true}, CreateAppID: adt.App.ID, CreateUserID: adt.User.NullID(), CreateTimestamp: time.Now(), UpdateAppID: adt.App.ID, UpdateUserID: adt.User.NullID(), UpdateTimestamp: time.Now(), } var rowsAffected int64 rowsAffected, err = authstore.New(tx).CreatePermission(ctx, arg) if err != nil { var pgErr *pgconn.PgError if errors.As(err, &pgErr) { if pgErr.Code == "23505" { return auth.Permission{}, errs.E(errs.Exist, errs.Exist.String()) } return auth.Permission{}, errs.E(errs.Database, pgErr.Message) } return auth.Permission{}, errs.E(errs.Database, err) } // should only impact exactly one record if rowsAffected != 1 { return auth.Permission{}, errs.E(errs.Database, fmt.Sprintf("Create() should insert 1 row, actual: %d", rowsAffected)) } // commit db txn using pgxpool err = s.Datastorer.CommitTx(ctx, tx) if err != nil { return auth.Permission{}, err } return *r, nil } // FindAll retrieves all permissions func (s PermissionService) FindAll(ctx context.Context) ([]auth.Permission, error) { rows, err := authstore.New(s.Datastorer.Pool()).FindAllPermissions(ctx) if err != nil { return nil, errs.E(errs.Database, err) } var sp []auth.Permission for _, row := range rows { p := auth.Permission{ ID: row.PermissionID, ExternalID: secure.MustParseIdentifier(row.PermissionExtlID), Resource: row.Resource, Operation: row.Operation, Description: row.PermissionDescription, Active: row.Active.Bool, } sp = append(sp, p) } return sp, nil } type RoleService struct { Datastorer Datastorer } func (s RoleService) Create(ctx context.Context, r *auth.Role, adt audit.Audit) (role auth.Role, err error) { // set Unique ID for Role r.ID = uuid.New() // set Unique External ID r.ExternalID = secure.NewID() // start db txn using pgxpool var tx pgx.Tx tx, err = s.Datastorer.BeginTx(ctx) if err != nil { return auth.Role{}, err } // defer transaction rollback and handle error, if any defer func() { err = s.Datastorer.RollbackTx(ctx, tx, err) }() arg := authstore.CreateRoleParams{ RoleID: r.ID, RoleExtlID: r.ExternalID.String(), RoleCd: r.Code, Active: sql.NullBool{Bool: r.Active, Valid: true}, CreateAppID: adt.App.ID, CreateUserID: adt.User.NullID(), CreateTimestamp: time.Now(), UpdateAppID: adt.App.ID, UpdateUserID: adt.User.NullID(), UpdateTimestamp: time.Now(), } var rowsAffected int64 rowsAffected, err = authstore.New(tx).CreateRole(ctx, arg) if err != nil { return auth.Role{}, err } // should only impact exactly one record if rowsAffected != 1 { return auth.Role{}, errs.E(errs.Database, fmt.Sprintf("Create() should insert 1 row, actual: %d", rowsAffected)) } // commit db txn using pgxpool err = s.Datastorer.CommitTx(ctx, tx) if err != nil { return auth.Role{}, err } return *r, nil }
package main import ( "golang.org/x/net/context" "google.golang.org/grpc" pb "proto" "testing" ) const ( address = "localhost:50003" test_key = "test_key" ) func TestCasDelay(t *testing.T) { cas_delay() } func TestSnowflake(t *testing.T) { // Set up a connection to the server. conn, err := grpc.Dial(address, grpc.WithInsecure()) if err != nil { t.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewSnowflakeServiceClient(conn) // Contact the server and print out its response. r, err := c.Next(context.Background(), &pb.Snowflake_Key{Name: test_key}) if err != nil { t.Fatalf("could not get next value: %v", err) } t.Log(r.Value) } func BenchmarkSnowflake(b *testing.B) { // Set up a connection to the server. conn, err := grpc.Dial(address, grpc.WithInsecure()) if err != nil { b.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewSnowflakeServiceClient(conn) for i := 0; i < b.N; i++ { // Contact the server and print out its response. _, err := c.Next(context.Background(), &pb.Snowflake_Key{Name: test_key}) if err != nil { b.Fatalf("could not get next value: %v", err) } } } func TestSnowflakeUUID(t *testing.T) { // Set up a connection to the server. conn, err := grpc.Dial(address, grpc.WithInsecure()) if err != nil { t.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewSnowflakeServiceClient(conn) // Contact the server and print out its response. r, err := c.GetUUID(context.Background(), &pb.Snowflake_NullRequest{}) if err != nil { t.Fatalf("could not get next value: %v", err) } t.Logf("%b", r.Uuid) } func BenchmarkSnowflakeUUID(b *testing.B) { // Set up a connection to the server. conn, err := grpc.Dial(address, grpc.WithInsecure()) if err != nil { b.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewSnowflakeServiceClient(conn) for i := 0; i < b.N; i++ { // Contact the server and print out its response. _, err := c.GetUUID(context.Background(), &pb.Snowflake_NullRequest{}) if err != nil { b.Fatalf("could not get uuid: %v", err) } } } upd test package main import ( pb "snowflake/proto" "testing" "golang.org/x/net/context" "google.golang.org/grpc" ) const ( address = "localhost:50003" test_key = "test_key" ) func TestCasDelay(t *testing.T) { cas_delay() } func TestSnowflake(t *testing.T) { // Set up a connection to the server. conn, err := grpc.Dial(address, grpc.WithInsecure()) if err != nil { t.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewSnowflakeServiceClient(conn) // Contact the server and print out its response. r, err := c.Next(context.Background(), &pb.Snowflake_Key{Name: test_key}) if err != nil { t.Fatalf("could not get next value: %v", err) } t.Log(r.Value) } func BenchmarkSnowflake(b *testing.B) { // Set up a connection to the server. conn, err := grpc.Dial(address, grpc.WithInsecure()) if err != nil { b.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewSnowflakeServiceClient(conn) for i := 0; i < b.N; i++ { // Contact the server and print out its response. _, err := c.Next(context.Background(), &pb.Snowflake_Key{Name: test_key}) if err != nil { b.Fatalf("could not get next value: %v", err) } } } func TestSnowflakeUUID(t *testing.T) { // Set up a connection to the server. conn, err := grpc.Dial(address, grpc.WithInsecure()) if err != nil { t.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewSnowflakeServiceClient(conn) // Contact the server and print out its response. r, err := c.GetUUID(context.Background(), &pb.Snowflake_NullRequest{}) if err != nil { t.Fatalf("could not get next value: %v", err) } t.Logf("%b", r.Uuid) } func BenchmarkSnowflakeUUID(b *testing.B) { // Set up a connection to the server. conn, err := grpc.Dial(address, grpc.WithInsecure()) if err != nil { b.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewSnowflakeServiceClient(conn) for i := 0; i < b.N; i++ { // Contact the server and print out its response. _, err := c.GetUUID(context.Background(), &pb.Snowflake_NullRequest{}) if err != nil { b.Fatalf("could not get uuid: %v", err) } } }
// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package addresser import ( "time" "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils" apiWatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/instance" "github.com/juju/juju/network" "github.com/juju/juju/state" "github.com/juju/juju/worker" ) var logger = loggo.GetLogger("juju.worker.addresser") var shortAttempt = utils.AttemptStrategy{ Total: 5 * time.Second, Delay: 300 * time.Millisecond, } type releaser interface { // ReleaseAddress has the same signature as the same method in the // environs.Networking interface. ReleaseAddress(instance.Id, network.Id, network.Address) error } // stateAddresser defines the State methods used by the addresserHandler type stateAddresser interface { DeadIPAddresses() ([]*state.IPAddress, error) EnvironConfig() (*config.Config, error) IPAddress(string) (*state.IPAddress, error) Machine(string) (*state.Machine, error) WatchIPAddresses() state.StringsWatcher } type addresserHandler struct { dying chan struct{} st stateAddresser releaser releaser } // NewWorker returns a worker that keeps track of // IP address lifecycles, releaseing and removing Dead addresses. func NewWorker(st stateAddresser) (worker.Worker, error) { config, err := st.EnvironConfig() if err != nil { return nil, errors.Trace(err) } environ, err := environs.New(config) if err != nil { return nil, errors.Trace(err) } netEnviron, ok := environs.SupportsNetworking(environ) if !ok { return nil, errors.New("environment does not support networking") } a := newWorkerWithReleaser(st, netEnviron) return a, nil } func newWorkerWithReleaser(st stateAddresser, releaser releaser) worker.Worker { a := &addresserHandler{ st: st, releaser: releaser, dying: make(chan struct{}), } w := worker.NewStringsWorker(a) return w } // Handle is part of the StringsWorker interface. func (a *addresserHandler) Handle(ids []string) error { for _, id := range ids { addr, err := a.st.IPAddress(id) if err != nil { return err } if addr.Life() != state.Dead { continue } err = a.releaseIPAddress(addr) if err != nil { return err } err = addr.Remove() if err != nil { return err } } return nil } func (a *addresserHandler) releaseIPAddress(addr *state.IPAddress) (err error) { defer errors.DeferredAnnotatef(&err, "failed to release address %v", addr.Value) var machine *state.Machine machine, err = a.st.Machine(addr.MachineId()) if err != nil { return errors.Annotatef(err, "cannot get allocated machine %q", addr.MachineId()) } var instId instance.Id instId, err = machine.InstanceId() if err != nil { return errors.Annotatef(err, "cannot get machine %q instance ID", addr.MachineId()) } netId := network.Id(addr.SubnetId()) for attempt := shortAttempt.Start(); attempt.Next(); { err = a.releaser.ReleaseAddress(instId, netId, addr.Address()) if err == nil { return nil } } // Don't remove the address from state so we // can retry releasing the address later. logger.Warningf("cannot release address %q: %v (will retry)", addr.Value(), err) return errors.Trace(err) } // SetUp is part of the StringsWorker interface. func (a *addresserHandler) SetUp() (apiWatcher.StringsWatcher, error) { dead, err := a.st.DeadIPAddresses() if err != nil { return nil, errors.Trace(err) } w := a.st.WatchIPAddresses() deadQueue := make(chan *state.IPAddress, len(dead)) for _, deadAddr := range dead { deadQueue <- deadAddr } go func() { select { case addr := <-deadQueue: err := a.releaseIPAddress(addr) if err != nil { logger.Warningf("error releasing dead IP address %q: %v", addr, err) } else { err = addr.Remove() if err != nil { logger.Warningf("error removing dead IP address %q: %v", addr, err) } } case <-a.dying: return default: return } }() return w, nil } func (a *addresserHandler) TearDown() error { close(a.dying) return nil } Kill worker on error removing dead addresses // Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package addresser import ( "time" "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils" apiWatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/instance" "github.com/juju/juju/network" "github.com/juju/juju/state" "github.com/juju/juju/worker" ) var logger = loggo.GetLogger("juju.worker.addresser") var shortAttempt = utils.AttemptStrategy{ Total: 5 * time.Second, Delay: 300 * time.Millisecond, } type releaser interface { // ReleaseAddress has the same signature as the same method in the // environs.Networking interface. ReleaseAddress(instance.Id, network.Id, network.Address) error } // stateAddresser defines the State methods used by the addresserHandler type stateAddresser interface { DeadIPAddresses() ([]*state.IPAddress, error) EnvironConfig() (*config.Config, error) IPAddress(string) (*state.IPAddress, error) Machine(string) (*state.Machine, error) WatchIPAddresses() state.StringsWatcher } type addresserHandler struct { dying chan struct{} st stateAddresser releaser releaser kill func() } // NewWorker returns a worker that keeps track of // IP address lifecycles, releaseing and removing Dead addresses. func NewWorker(st stateAddresser) (worker.Worker, error) { config, err := st.EnvironConfig() if err != nil { return nil, errors.Trace(err) } environ, err := environs.New(config) if err != nil { return nil, errors.Trace(err) } netEnviron, ok := environs.SupportsNetworking(environ) if !ok { return nil, errors.New("environment does not support networking") } a := newWorkerWithReleaser(st, netEnviron) return a, nil } func newWorkerWithReleaser(st stateAddresser, releaser releaser) worker.Worker { a := &addresserHandler{ st: st, releaser: releaser, dying: make(chan struct{}), } w := worker.NewStringsWorker(a) a.kill = func() { w.Kill() } return w } // Handle is part of the StringsWorker interface. func (a *addresserHandler) Handle(ids []string) error { for _, id := range ids { addr, err := a.st.IPAddress(id) if err != nil { return err } if addr.Life() != state.Dead { continue } err = a.releaseIPAddress(addr) if err != nil { return err } err = addr.Remove() if err != nil { return err } } return nil } func (a *addresserHandler) releaseIPAddress(addr *state.IPAddress) (err error) { defer errors.DeferredAnnotatef(&err, "failed to release address %v", addr.Value) var machine *state.Machine machine, err = a.st.Machine(addr.MachineId()) if err != nil { return errors.Annotatef(err, "cannot get allocated machine %q", addr.MachineId()) } var instId instance.Id instId, err = machine.InstanceId() if err != nil { return errors.Annotatef(err, "cannot get machine %q instance ID", addr.MachineId()) } netId := network.Id(addr.SubnetId()) for attempt := shortAttempt.Start(); attempt.Next(); { err = a.releaser.ReleaseAddress(instId, netId, addr.Address()) if err == nil { return nil } } // Don't remove the address from state so we // can retry releasing the address later. logger.Warningf("cannot release address %q: %v (will retry)", addr.Value(), err) return errors.Trace(err) } // SetUp is part of the StringsWorker interface. func (a *addresserHandler) SetUp() (apiWatcher.StringsWatcher, error) { dead, err := a.st.DeadIPAddresses() if err != nil { return nil, errors.Trace(err) } w := a.st.WatchIPAddresses() deadQueue := make(chan *state.IPAddress, len(dead)) for _, deadAddr := range dead { deadQueue <- deadAddr } go func() { select { case addr := <-deadQueue: err := a.releaseIPAddress(addr) if err != nil { logger.Warningf("error releasing dead IP address %q: %v", addr, err) a.kill() close(a.dying) } else { err = addr.Remove() if err != nil { logger.Warningf("error removing dead IP address %q: %v", addr, err) a.kill() close(a.dying) } } case <-a.dying: return default: return } }() return w, nil } func (a *addresserHandler) TearDown() error { close(a.dying) return nil }
package server import ( "encoding/hex" "errors" "fmt" "time" "getmelange.com/zooko/message" "getmelange.com/zooko/rpc" "github.com/melange-app/nmcd/btcjson" ) const ( nameNewConfirmations = 12 nameFirstUpdateConfirmations = 1 confirmationTime = 10 * time.Minute checkConfirmationTime = 5 * time.Minute ) type lookupRequest struct { Name string Response chan *[]byte } type registerRequest struct { Name string Value []byte // TxID is the transaction identifier of the name_new // transaction. We will watch this for updates and send // name_firstupdate when we need to. TxID string NameFirstUpdate []byte // This is how we keep track of transaction that will not // actually make it into the blockchain and purge them from // the cache if necessary. Ticks int Attempts int } // NamesManager is a system that keeps track of all of the Namecoin // registrations currently waiting to be processed. type NamesManager struct { *rpc.Server // Storage for the cached names. cached map[string]*registerRequest // Actions that we can receive in the loop. lookup chan lookupRequest register chan *registerRequest confirmationTimer *time.Ticker } // CreateNamesManager will create a new Name Management system that // interfaces with Namecoin and implements a cache for when names // aren't yet in the blockchain. func CreateNamesManager(s *rpc.Server) *NamesManager { n := &NamesManager{ Server: s, // Make the maps and channels cached: make(map[string]*registerRequest), lookup: make(chan lookupRequest), register: make(chan *registerRequest), // Make the timer confirmationTimer: time.NewTicker(checkConfirmationTime), } // Start the channel looking loop. go n.loop() return n } func (n *NamesManager) loop() { for { select { case l := <-n.lookup: val, ok := n.cached[l.Name] if !ok { l.Response <- nil continue } l.Response <- &val.Value case r := <-n.register: n.cached[r.Name] = r case <-n.confirmationTimer.C: n.checkForConfirmations() } } } func (n *NamesManager) checkForConfirmations() { newCache := make(map[string]*registerRequest) for name, reg := range n.cached { confirmations, err := n.Server.Confirmations(reg.TxID) if err != nil { fmt.Println( "Got error checking for confirmation on", name, err) continue } if reg.NameFirstUpdate == nil && confirmations >= nameFirstUpdateConfirmations { // In this situation, we can simply remove the // transaction from the cache. } else if confirmations >= nameNewConfirmations { // In this situation, we need to broadcast the // new transaction and get the txid of it to // place back in the cache and wait more... :/ rawTx := hex.EncodeToString(reg.NameFirstUpdate) txId, err := n.broadcastAndGetID(rawTx) if err != nil { // If we have consistently gotten an // error on the past 5 attempts, we // will not continue taking up space // in the cache. if reg.Attempts > 5 { fmt.Println("[ZOOKO] Removing", name, "from the cache for getting 5 successive errors on broadcast transaction.") continue } fmt.Println("[ZOOKO] Received error while broadcast name_firstupdate for", name, err) // If we get an error at this point, // we are pretty screwed. We are going // to increase a marker, and we will // remove from the cache on too many // attempts. reg.Ticks++ reg.Attempts++ newCache[name] = reg continue } // Input the new request. reg.Ticks = 0 reg.TxID = txId newCache[name] = reg } else { // Otherwise we populate the new cache with // the name immediately. reg.Ticks++ // In this situation, we have waited a LONG // time to get a confirmation, we should // assume that the network will not accept // this transaction and remove it from the // chain. if time.Duration(reg.Ticks)*checkConfirmationTime > (2*nameNewConfirmations*confirmationTime) && confirmations == 0 { fmt.Println("[ZOOKO] Removing", name, "from the cache for failing to get a confirmation.") continue } newCache[name] = reg } } // Overwrite the cache. if len(newCache) != 0 { n.cached = newCache } } func (n *NamesManager) broadcastAndGetID(rawTx string) (string, error) { // Broadcast the Transaction if err := n.Server.Broadcast(rawTx); err != nil { return "", err } // Get the TxID from the raw transaction cmd, err := btcjson.NewDecodeRawTransactionCmd(nil, rawTx) if err != nil { return "", err } result, err := n.Server.Send(cmd) if err != nil { return "", err } else if result.Error != nil { return "", *result.Error } // Get the Transaction ID return result.Result.(*btcjson.TxRawResult).Txid, nil } func (n *NamesManager) Register(msg *message.RegisterName) (bool, error) { if _, ok, err := n.Lookup(*msg.Name); err != nil || ok { return false, err } rawTx := hex.EncodeToString(msg.NameNew) txId, err := n.broadcastAndGetID(rawTx) if err != nil { return true, err } n.register <- &registerRequest{ Name: *msg.Name, Value: msg.Value, TxID: txId, NameFirstUpdate: msg.NameFirstupdate, } return true, nil } func (n *NamesManager) Renew(msg *message.RenewName) error { if _, ok, err := n.Lookup(*msg.Name); err != nil || !ok { return errors.New("zooko/server: name lookup failed before renew") } rawTx := hex.EncodeToString(msg.NameUpdate) txId, err := n.broadcastAndGetID(rawTx) if err != nil { return err } n.register <- &registerRequest{ Name: *msg.Name, Value: msg.Value, TxID: txId, } return nil } func (n *NamesManager) Lookup(name string) ([]byte, bool, error) { resp := make(chan *[]byte) n.lookup <- lookupRequest{ Name: name, Response: resp, } data := <-resp if data != nil { return *data, true, nil } val, found, err := n.Server.LookupName(name) if err != nil || !found { return nil, false, err } return []byte(val), true, nil } zooko/server: fix error where we expect the wrong result type package server import ( "encoding/hex" "errors" "fmt" "time" "getmelange.com/zooko/message" "getmelange.com/zooko/rpc" "github.com/melange-app/nmcd/btcjson" ) const ( nameNewConfirmations = 12 nameFirstUpdateConfirmations = 1 confirmationTime = 10 * time.Minute checkConfirmationTime = 5 * time.Minute ) type lookupRequest struct { Name string Response chan *[]byte } type registerRequest struct { Name string Value []byte // TxID is the transaction identifier of the name_new // transaction. We will watch this for updates and send // name_firstupdate when we need to. TxID string NameFirstUpdate []byte // This is how we keep track of transaction that will not // actually make it into the blockchain and purge them from // the cache if necessary. Ticks int Attempts int } // NamesManager is a system that keeps track of all of the Namecoin // registrations currently waiting to be processed. type NamesManager struct { *rpc.Server // Storage for the cached names. cached map[string]*registerRequest // Actions that we can receive in the loop. lookup chan lookupRequest register chan *registerRequest confirmationTimer *time.Ticker } // CreateNamesManager will create a new Name Management system that // interfaces with Namecoin and implements a cache for when names // aren't yet in the blockchain. func CreateNamesManager(s *rpc.Server) *NamesManager { n := &NamesManager{ Server: s, // Make the maps and channels cached: make(map[string]*registerRequest), lookup: make(chan lookupRequest), register: make(chan *registerRequest), // Make the timer confirmationTimer: time.NewTicker(checkConfirmationTime), } // Start the channel looking loop. go n.loop() return n } func (n *NamesManager) loop() { for { select { case l := <-n.lookup: val, ok := n.cached[l.Name] if !ok { l.Response <- nil continue } l.Response <- &val.Value case r := <-n.register: n.cached[r.Name] = r case <-n.confirmationTimer.C: n.checkForConfirmations() } } } func (n *NamesManager) checkForConfirmations() { newCache := make(map[string]*registerRequest) for name, reg := range n.cached { confirmations, err := n.Server.Confirmations(reg.TxID) if err != nil { fmt.Println( "Got error checking for confirmation on", name, err) continue } if reg.NameFirstUpdate == nil && confirmations >= nameFirstUpdateConfirmations { // In this situation, we can simply remove the // transaction from the cache. } else if confirmations >= nameNewConfirmations { // In this situation, we need to broadcast the // new transaction and get the txid of it to // place back in the cache and wait more... :/ rawTx := hex.EncodeToString(reg.NameFirstUpdate) txId, err := n.broadcastAndGetID(rawTx) if err != nil { // If we have consistently gotten an // error on the past 5 attempts, we // will not continue taking up space // in the cache. if reg.Attempts > 5 { fmt.Println("[ZOOKO] Removing", name, "from the cache for getting 5 successive errors on broadcast transaction.") continue } fmt.Println("[ZOOKO] Received error while broadcast name_firstupdate for", name, err) // If we get an error at this point, // we are pretty screwed. We are going // to increase a marker, and we will // remove from the cache on too many // attempts. reg.Ticks++ reg.Attempts++ newCache[name] = reg continue } // Input the new request. reg.Ticks = 0 reg.TxID = txId newCache[name] = reg } else { // Otherwise we populate the new cache with // the name immediately. reg.Ticks++ // In this situation, we have waited a LONG // time to get a confirmation, we should // assume that the network will not accept // this transaction and remove it from the // chain. if time.Duration(reg.Ticks)*checkConfirmationTime > (2*nameNewConfirmations*confirmationTime) && confirmations == 0 { fmt.Println("[ZOOKO] Removing", name, "from the cache for failing to get a confirmation.") continue } newCache[name] = reg } } // Overwrite the cache. if len(newCache) != 0 { n.cached = newCache } } func (n *NamesManager) broadcastAndGetID(rawTx string) (string, error) { // Broadcast the Transaction if err := n.Server.Broadcast(rawTx); err != nil { return "", err } // Get the TxID from the raw transaction cmd, err := btcjson.NewDecodeRawTransactionCmd(nil, rawTx) if err != nil { return "", err } result, err := n.Server.Send(cmd) if err != nil { return "", err } else if result.Error != nil { return "", *result.Error } // Get the Transaction ID return result.Result.(*btcjson.TxRawDecodeResult).Txid, nil } func (n *NamesManager) Register(msg *message.RegisterName) (bool, error) { if _, ok, err := n.Lookup(*msg.Name); err != nil || ok { return false, err } rawTx := hex.EncodeToString(msg.NameNew) txId, err := n.broadcastAndGetID(rawTx) if err != nil { return true, err } n.register <- &registerRequest{ Name: *msg.Name, Value: msg.Value, TxID: txId, NameFirstUpdate: msg.NameFirstupdate, } return true, nil } func (n *NamesManager) Renew(msg *message.RenewName) error { if _, ok, err := n.Lookup(*msg.Name); err != nil || !ok { return errors.New("zooko/server: name lookup failed before renew") } rawTx := hex.EncodeToString(msg.NameUpdate) txId, err := n.broadcastAndGetID(rawTx) if err != nil { return err } n.register <- &registerRequest{ Name: *msg.Name, Value: msg.Value, TxID: txId, } return nil } func (n *NamesManager) Lookup(name string) ([]byte, bool, error) { resp := make(chan *[]byte) n.lookup <- lookupRequest{ Name: name, Response: resp, } data := <-resp if data != nil { return *data, true, nil } val, found, err := n.Server.LookupName(name) if err != nil || !found { return nil, false, err } return []byte(val), true, nil }
package session import ( "github.com/cenkalti/rain/internal/announcer" "github.com/cenkalti/rain/internal/handshaker/incominghandshaker" "github.com/cenkalti/rain/internal/handshaker/outgoinghandshaker" "github.com/cenkalti/rain/internal/tracker" ) func (t *torrent) stop(err error) { s := t.status() if s == Stopping || s == Stopped { return } t.log.Info("stopping torrent") t.lastError = err if err != nil && err != errClosed { t.log.Error(err) } t.log.Debugln("stopping acceptor") t.stopAcceptor() t.log.Debugln("closing peer connections") t.stopPeers() t.log.Debugln("stopping piece downloaders") t.stopPiecedownloaders() t.log.Debugln("stopping info downloaders") t.stopInfoDownloaders() if t.resume != nil && t.bitfield != nil { t.writeBitfield(false) } t.log.Debugln("stopping unchoke timers") t.stopUnchokeTimers() // Closing data is necessary to cancel ongoing IO operations on files. t.log.Debugln("closing open files") t.closeData() // Data must be closed before closing Allocator. t.log.Debugln("stopping allocator") if t.allocator != nil { t.allocator.Close() t.allocator = nil } // Data must be closed before closing Verifier. t.log.Debugln("stopping verifier") if t.verifier != nil { t.verifier.Close() t.verifier = nil } t.log.Debugln("stopping outgoing handshakers") t.stopOutgoingHandshakers() t.log.Debugln("stopping incoming handshakers") t.stopIncomingHandshakers() t.log.Debugln("stopping stats writer") t.stopStatsWriter() t.log.Debugln("clearing piece cache") t.pieceCache.Clear() // Stop periodical announcers first. t.log.Debugln("stopping announcers") announcers := t.announcers // keep a reference to the list before nilling in order to start StopAnnouncer t.stopPeriodicalAnnouncers() // Then start another announcer to announce Stopped event to the trackers. // The torrent enters "Stopping" state. // This announcer times out in 5 seconds. After it's done the torrent is in "Stopped" status. trackers := make([]tracker.Tracker, 0, len(announcers)) for _, an := range announcers { if an.HasAnnounced { trackers = append(trackers, an.Tracker) } } if t.stoppedEventAnnouncer != nil { panic("stopped event announcer exists") } t.stoppedEventAnnouncer = announcer.NewStopAnnouncer(trackers, t.announcerFields(), t.config.TrackerStopTimeout, t.announcersStoppedC, t.log) go t.stoppedEventAnnouncer.Run() t.addrList.Reset() } func (t *torrent) stopStatsWriter() { t.statsWriteTicker.Stop() t.statsWriteTicker = nil t.statsWriteTickerC = nil } func (t *torrent) stopOutgoingHandshakers() { for oh := range t.outgoingHandshakers { oh.Close() } t.outgoingHandshakers = make(map[*outgoinghandshaker.OutgoingHandshaker]struct{}) } func (t *torrent) stopIncomingHandshakers() { for ih := range t.incomingHandshakers { ih.Close() } t.incomingHandshakers = make(map[*incominghandshaker.IncomingHandshaker]struct{}) } func (t *torrent) closeData() { for _, f := range t.files { err := f.Close() if err != nil { t.log.Error(err) } } t.files = nil t.pieces = nil t.piecePicker = nil t.bytesAllocated = 0 t.checkedPieces = 0 } func (t *torrent) stopPeriodicalAnnouncers() { for _, an := range t.announcers { an.Close() } t.announcers = nil if t.dhtAnnouncer != nil { t.dhtAnnouncer.Close() t.dhtAnnouncer = nil } } func (t *torrent) stopAcceptor() { if t.acceptor != nil { t.acceptor.Close() } t.acceptor = nil } func (t *torrent) stopPeers() { for p := range t.peers { t.closePeer(p) } } func (t *torrent) stopUnchokeTimers() { if t.unchokeTimer != nil { t.unchokeTimer.Stop() t.unchokeTimer = nil t.unchokeTimerC = nil } if t.optimisticUnchokeTimer != nil { t.optimisticUnchokeTimer.Stop() t.optimisticUnchokeTimer = nil t.optimisticUnchokeTimerC = nil } } func (t *torrent) stopInfoDownloaders() { for _, id := range t.infoDownloaders { t.closeInfoDownloader(id) } } func (t *torrent) stopPiecedownloaders() { for _, pd := range t.pieceDownloaders { t.closePieceDownloader(pd) } } add todo package session import ( "github.com/cenkalti/rain/internal/announcer" "github.com/cenkalti/rain/internal/handshaker/incominghandshaker" "github.com/cenkalti/rain/internal/handshaker/outgoinghandshaker" "github.com/cenkalti/rain/internal/tracker" ) func (t *torrent) stop(err error) { s := t.status() if s == Stopping || s == Stopped { return } t.log.Info("stopping torrent") t.lastError = err if err != nil && err != errClosed { t.log.Error(err) } t.log.Debugln("stopping acceptor") t.stopAcceptor() t.log.Debugln("closing peer connections") t.stopPeers() t.log.Debugln("stopping piece downloaders") t.stopPiecedownloaders() t.log.Debugln("stopping info downloaders") t.stopInfoDownloaders() if t.resume != nil && t.bitfield != nil { t.writeBitfield(false) } t.log.Debugln("stopping unchoke timers") t.stopUnchokeTimers() // Closing data is necessary to cancel ongoing IO operations on files. t.log.Debugln("closing open files") t.closeData() // Data must be closed before closing Allocator. t.log.Debugln("stopping allocator") if t.allocator != nil { t.allocator.Close() t.allocator = nil } // Data must be closed before closing Verifier. t.log.Debugln("stopping verifier") if t.verifier != nil { t.verifier.Close() t.verifier = nil } t.log.Debugln("stopping outgoing handshakers") t.stopOutgoingHandshakers() t.log.Debugln("stopping incoming handshakers") t.stopIncomingHandshakers() t.log.Debugln("stopping stats writer") t.stopStatsWriter() t.log.Debugln("clearing piece cache") t.pieceCache.Clear() // Stop periodical announcers first. t.log.Debugln("stopping announcers") announcers := t.announcers // keep a reference to the list before nilling in order to start StopAnnouncer t.stopPeriodicalAnnouncers() // Then start another announcer to announce Stopped event to the trackers. // The torrent enters "Stopping" state. // This announcer times out in 5 seconds. After it's done the torrent is in "Stopped" status. trackers := make([]tracker.Tracker, 0, len(announcers)) for _, an := range announcers { if an.HasAnnounced { trackers = append(trackers, an.Tracker) } } if t.stoppedEventAnnouncer != nil { panic("stopped event announcer exists") } t.stoppedEventAnnouncer = announcer.NewStopAnnouncer(trackers, t.announcerFields(), t.config.TrackerStopTimeout, t.announcersStoppedC, t.log) go t.stoppedEventAnnouncer.Run() t.addrList.Reset() } func (t *torrent) stopStatsWriter() { // TODO write stats before stopping the ticker t.statsWriteTicker.Stop() t.statsWriteTicker = nil t.statsWriteTickerC = nil } func (t *torrent) stopOutgoingHandshakers() { for oh := range t.outgoingHandshakers { oh.Close() } t.outgoingHandshakers = make(map[*outgoinghandshaker.OutgoingHandshaker]struct{}) } func (t *torrent) stopIncomingHandshakers() { for ih := range t.incomingHandshakers { ih.Close() } t.incomingHandshakers = make(map[*incominghandshaker.IncomingHandshaker]struct{}) } func (t *torrent) closeData() { for _, f := range t.files { err := f.Close() if err != nil { t.log.Error(err) } } t.files = nil t.pieces = nil t.piecePicker = nil t.bytesAllocated = 0 t.checkedPieces = 0 } func (t *torrent) stopPeriodicalAnnouncers() { for _, an := range t.announcers { an.Close() } t.announcers = nil if t.dhtAnnouncer != nil { t.dhtAnnouncer.Close() t.dhtAnnouncer = nil } } func (t *torrent) stopAcceptor() { if t.acceptor != nil { t.acceptor.Close() } t.acceptor = nil } func (t *torrent) stopPeers() { for p := range t.peers { t.closePeer(p) } } func (t *torrent) stopUnchokeTimers() { if t.unchokeTimer != nil { t.unchokeTimer.Stop() t.unchokeTimer = nil t.unchokeTimerC = nil } if t.optimisticUnchokeTimer != nil { t.optimisticUnchokeTimer.Stop() t.optimisticUnchokeTimer = nil t.optimisticUnchokeTimerC = nil } } func (t *torrent) stopInfoDownloaders() { for _, id := range t.infoDownloaders { t.closeInfoDownloader(id) } } func (t *torrent) stopPiecedownloaders() { for _, pd := range t.pieceDownloaders { t.closePieceDownloader(pd) } }
// Copyright 2013 The ql Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSES/QL-LICENSE file. // Copyright 2015 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package session import ( "context" "sync" "time" "github.com/opentracing/opentracing-go" "github.com/pingcap/errors" "github.com/pingcap/parser" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/sqlexec" "go.uber.org/zap" ) type domainMap struct { domains map[string]*domain.Domain mu sync.Mutex } func (dm *domainMap) Get(store kv.Storage) (d *domain.Domain, err error) { dm.mu.Lock() defer dm.mu.Unlock() // If this is the only domain instance, and the caller doesn't provide store. if len(dm.domains) == 1 && store == nil { for _, r := range dm.domains { return r, nil } } key := store.UUID() d = dm.domains[key] if d != nil { return } ddlLease := schemaLease statisticLease := statsLease err = util.RunWithRetry(util.DefaultMaxRetries, util.RetryInterval, func() (retry bool, err1 error) { logutil.BgLogger().Info("new domain", zap.String("store", store.UUID()), zap.Stringer("ddl lease", ddlLease), zap.Stringer("stats lease", statisticLease)) factory := createSessionFunc(store) sysFactory := createSessionWithDomainFunc(store) d = domain.NewDomain(store, ddlLease, statisticLease, factory) err1 = d.Init(ddlLease, sysFactory) if err1 != nil { // If we don't clean it, there are some dirty data when retrying the function of Init. d.Close() logutil.BgLogger().Error("[ddl] init domain failed", zap.Error(err1)) } return true, err1 }) if err != nil { return nil, err } dm.domains[key] = d return } func (dm *domainMap) Delete(store kv.Storage) { dm.mu.Lock() delete(dm.domains, store.UUID()) dm.mu.Unlock() } var ( domap = &domainMap{ domains: map[string]*domain.Domain{}, } // store.UUID()-> IfBootstrapped storeBootstrapped = make(map[string]bool) storeBootstrappedLock sync.Mutex // schemaLease is the time for re-updating remote schema. // In online DDL, we must wait 2 * SchemaLease time to guarantee // all servers get the neweset schema. // Default schema lease time is 1 second, you can change it with a proper time, // but you must know that too little may cause badly performance degradation. // For production, you should set a big schema lease, like 300s+. schemaLease = 1 * time.Second // statsLease is the time for reload stats table. statsLease = 3 * time.Second ) // SetSchemaLease changes the default schema lease time for DDL. // This function is very dangerous, don't use it if you really know what you do. // SetSchemaLease only affects not local storage after bootstrapped. func SetSchemaLease(lease time.Duration) { schemaLease = lease } // SetStatsLease changes the default stats lease time for loading stats info. func SetStatsLease(lease time.Duration) { statsLease = lease } // DisableStats4Test disables the stats for tests. func DisableStats4Test() { statsLease = -1 } // Parse parses a query string to raw ast.StmtNode. func Parse(ctx sessionctx.Context, src string) ([]ast.StmtNode, error) { logutil.BgLogger().Debug("compiling", zap.String("source", src)) charset, collation := ctx.GetSessionVars().GetCharsetInfo() p := parser.New() p.EnableWindowFunc(ctx.GetSessionVars().EnableWindowFunction) p.SetSQLMode(ctx.GetSessionVars().SQLMode) stmts, warns, err := p.Parse(src, charset, collation) for _, warn := range warns { ctx.GetSessionVars().StmtCtx.AppendWarning(warn) } if err != nil { logutil.BgLogger().Warn("compiling", zap.String("source", src), zap.Error(err)) return nil, err } return stmts, nil } // Compile is safe for concurrent use by multiple goroutines. func Compile(ctx context.Context, sctx sessionctx.Context, stmtNode ast.StmtNode) (sqlexec.Statement, error) { compiler := executor.Compiler{Ctx: sctx} stmt, err := compiler.Compile(ctx, stmtNode) return stmt, err } func finishStmt(ctx context.Context, sctx sessionctx.Context, se *session, sessVars *variable.SessionVars, meetsErr error) error { if meetsErr != nil { if !sessVars.InTxn() { logutil.BgLogger().Info("rollbackTxn for ddl/autocommit error.") se.RollbackTxn(ctx) } else if se.txn.Valid() && se.txn.IsPessimistic() && executor.ErrDeadlock.Equal(meetsErr) { logutil.BgLogger().Info("rollbackTxn for deadlock error", zap.Uint64("txn", se.txn.StartTS())) se.RollbackTxn(ctx) } return meetsErr } if !sessVars.InTxn() { return se.CommitTxn(ctx) } return checkStmtLimit(ctx, sctx, se) } func checkStmtLimit(ctx context.Context, sctx sessionctx.Context, se *session) error { // If the user insert, insert, insert ... but never commit, TiDB would OOM. // So we limit the statement count in a transaction here. var err error sessVars := se.GetSessionVars() history := GetHistory(sctx) if history.Count() > int(config.GetGlobalConfig().Performance.StmtCountLimit) { if !sessVars.BatchCommit { se.RollbackTxn(ctx) return errors.Errorf("statement count %d exceeds the transaction limitation, autocommit = %t", history.Count(), sctx.GetSessionVars().IsAutocommit()) } err = se.NewTxn(ctx) // The transaction does not committed yet, we need to keep it in transaction. // The last history could not be "commit"/"rollback" statement. // It means it is impossible to start a new transaction at the end of the transaction. // Because after the server executed "commit"/"rollback" statement, the session is out of the transaction. sessVars.SetStatusFlag(mysql.ServerStatusInTrans, true) } return err } // runStmt executes the sqlexec.Statement and commit or rollback the current transaction. func runStmt(ctx context.Context, sctx sessionctx.Context, s sqlexec.Statement) (rs sqlexec.RecordSet, err error) { if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { span1 := span.Tracer().StartSpan("session.runStmt", opentracing.ChildOf(span.Context())) span1.LogKV("sql", s.OriginText()) defer span1.Finish() ctx = opentracing.ContextWithSpan(ctx, span1) } se := sctx.(*session) sessVars := se.GetSessionVars() // Save origTxnCtx here to avoid it reset in the transaction retry. origTxnCtx := sessVars.TxnCtx defer func() { // If it is not a select statement, we record its slow log here, // then it could include the transaction commit time. if rs == nil { s.(*executor.ExecStmt).LogSlowQuery(origTxnCtx.StartTS, err != nil) } }() err = se.checkTxnAborted(s) if err != nil { return nil, err } rs, err = s.Exec(ctx) sessVars.TxnCtx.StatementCount++ if !s.IsReadOnly(sessVars) { // All the history should be added here. if err == nil && sessVars.TxnCtx.CouldRetry { GetHistory(sctx).Add(s, sessVars.StmtCtx) } // Handle the stmt commit/rollback. if txn, err1 := sctx.Txn(false); err1 == nil { if txn.Valid() { if err != nil { sctx.StmtRollback() } else { err = sctx.StmtCommit() } } } else { logutil.BgLogger().Error("get txn error", zap.Error(err1)) } } err = finishStmt(ctx, sctx, se, sessVars, err) if se.txn.pending() { // After run statement finish, txn state is still pending means the // statement never need a Txn(), such as: // // set @@tidb_general_log = 1 // set @@autocommit = 0 // select 1 // // Reset txn state to invalid to dispose the pending start ts. se.txn.changeToInvalid() } return rs, err } // GetHistory get all stmtHistory in current txn. Exported only for test. func GetHistory(ctx sessionctx.Context) *StmtHistory { hist, ok := ctx.GetSessionVars().TxnCtx.History.(*StmtHistory) if ok { return hist } hist = new(StmtHistory) ctx.GetSessionVars().TxnCtx.History = hist return hist } // GetRows4Test gets all the rows from a RecordSet, only used for test. func GetRows4Test(ctx context.Context, sctx sessionctx.Context, rs sqlexec.RecordSet) ([]chunk.Row, error) { if rs == nil { return nil, nil } var rows []chunk.Row req := rs.NewChunk() // Must reuse `req` for imitating server.(*clientConn).writeChunks for { err := rs.Next(ctx, req) if err != nil { return nil, err } if req.NumRows() == 0 { break } iter := chunk.NewIterator4Chunk(req.CopyConstruct()) for row := iter.Begin(); row != iter.End(); row = iter.Next() { rows = append(rows, row) } } return rows, nil } // ResultSetToStringSlice changes the RecordSet to [][]string. func ResultSetToStringSlice(ctx context.Context, s Session, rs sqlexec.RecordSet) ([][]string, error) { rows, err := GetRows4Test(ctx, s, rs) if err != nil { return nil, err } err = rs.Close() if err != nil { return nil, err } sRows := make([][]string, len(rows)) for i := range rows { row := rows[i] iRow := make([]string, row.Len()) for j := 0; j < row.Len(); j++ { if row.IsNull(j) { iRow[j] = "<nil>" } else { d := row.GetDatum(j, &rs.Fields()[j].Column.FieldType) iRow[j], err = d.ToString() if err != nil { return nil, err } } } sRows[i] = iRow } return sRows, nil } // Session errors. var ( ErrForUpdateCantRetry = terror.ClassSession.New(codeForUpdateCantRetry, mysql.MySQLErrName[mysql.ErrForUpdateCantRetry]) ) const ( codeForUpdateCantRetry terror.ErrCode = mysql.ErrForUpdateCantRetry ) func init() { sessionMySQLErrCodes := map[terror.ErrCode]uint16{ codeForUpdateCantRetry: mysql.ErrForUpdateCantRetry, } terror.ErrClassToMySQLCodes[terror.ClassSession] = sessionMySQLErrCodes } session: fix log slow log succ field (#11867) // Copyright 2013 The ql Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSES/QL-LICENSE file. // Copyright 2015 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package session import ( "context" "sync" "time" "github.com/opentracing/opentracing-go" "github.com/pingcap/errors" "github.com/pingcap/parser" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/sqlexec" "go.uber.org/zap" ) type domainMap struct { domains map[string]*domain.Domain mu sync.Mutex } func (dm *domainMap) Get(store kv.Storage) (d *domain.Domain, err error) { dm.mu.Lock() defer dm.mu.Unlock() // If this is the only domain instance, and the caller doesn't provide store. if len(dm.domains) == 1 && store == nil { for _, r := range dm.domains { return r, nil } } key := store.UUID() d = dm.domains[key] if d != nil { return } ddlLease := schemaLease statisticLease := statsLease err = util.RunWithRetry(util.DefaultMaxRetries, util.RetryInterval, func() (retry bool, err1 error) { logutil.BgLogger().Info("new domain", zap.String("store", store.UUID()), zap.Stringer("ddl lease", ddlLease), zap.Stringer("stats lease", statisticLease)) factory := createSessionFunc(store) sysFactory := createSessionWithDomainFunc(store) d = domain.NewDomain(store, ddlLease, statisticLease, factory) err1 = d.Init(ddlLease, sysFactory) if err1 != nil { // If we don't clean it, there are some dirty data when retrying the function of Init. d.Close() logutil.BgLogger().Error("[ddl] init domain failed", zap.Error(err1)) } return true, err1 }) if err != nil { return nil, err } dm.domains[key] = d return } func (dm *domainMap) Delete(store kv.Storage) { dm.mu.Lock() delete(dm.domains, store.UUID()) dm.mu.Unlock() } var ( domap = &domainMap{ domains: map[string]*domain.Domain{}, } // store.UUID()-> IfBootstrapped storeBootstrapped = make(map[string]bool) storeBootstrappedLock sync.Mutex // schemaLease is the time for re-updating remote schema. // In online DDL, we must wait 2 * SchemaLease time to guarantee // all servers get the neweset schema. // Default schema lease time is 1 second, you can change it with a proper time, // but you must know that too little may cause badly performance degradation. // For production, you should set a big schema lease, like 300s+. schemaLease = 1 * time.Second // statsLease is the time for reload stats table. statsLease = 3 * time.Second ) // SetSchemaLease changes the default schema lease time for DDL. // This function is very dangerous, don't use it if you really know what you do. // SetSchemaLease only affects not local storage after bootstrapped. func SetSchemaLease(lease time.Duration) { schemaLease = lease } // SetStatsLease changes the default stats lease time for loading stats info. func SetStatsLease(lease time.Duration) { statsLease = lease } // DisableStats4Test disables the stats for tests. func DisableStats4Test() { statsLease = -1 } // Parse parses a query string to raw ast.StmtNode. func Parse(ctx sessionctx.Context, src string) ([]ast.StmtNode, error) { logutil.BgLogger().Debug("compiling", zap.String("source", src)) charset, collation := ctx.GetSessionVars().GetCharsetInfo() p := parser.New() p.EnableWindowFunc(ctx.GetSessionVars().EnableWindowFunction) p.SetSQLMode(ctx.GetSessionVars().SQLMode) stmts, warns, err := p.Parse(src, charset, collation) for _, warn := range warns { ctx.GetSessionVars().StmtCtx.AppendWarning(warn) } if err != nil { logutil.BgLogger().Warn("compiling", zap.String("source", src), zap.Error(err)) return nil, err } return stmts, nil } // Compile is safe for concurrent use by multiple goroutines. func Compile(ctx context.Context, sctx sessionctx.Context, stmtNode ast.StmtNode) (sqlexec.Statement, error) { compiler := executor.Compiler{Ctx: sctx} stmt, err := compiler.Compile(ctx, stmtNode) return stmt, err } func finishStmt(ctx context.Context, sctx sessionctx.Context, se *session, sessVars *variable.SessionVars, meetsErr error) error { if meetsErr != nil { if !sessVars.InTxn() { logutil.BgLogger().Info("rollbackTxn for ddl/autocommit error.") se.RollbackTxn(ctx) } else if se.txn.Valid() && se.txn.IsPessimistic() && executor.ErrDeadlock.Equal(meetsErr) { logutil.BgLogger().Info("rollbackTxn for deadlock error", zap.Uint64("txn", se.txn.StartTS())) se.RollbackTxn(ctx) } return meetsErr } if !sessVars.InTxn() { return se.CommitTxn(ctx) } return checkStmtLimit(ctx, sctx, se) } func checkStmtLimit(ctx context.Context, sctx sessionctx.Context, se *session) error { // If the user insert, insert, insert ... but never commit, TiDB would OOM. // So we limit the statement count in a transaction here. var err error sessVars := se.GetSessionVars() history := GetHistory(sctx) if history.Count() > int(config.GetGlobalConfig().Performance.StmtCountLimit) { if !sessVars.BatchCommit { se.RollbackTxn(ctx) return errors.Errorf("statement count %d exceeds the transaction limitation, autocommit = %t", history.Count(), sctx.GetSessionVars().IsAutocommit()) } err = se.NewTxn(ctx) // The transaction does not committed yet, we need to keep it in transaction. // The last history could not be "commit"/"rollback" statement. // It means it is impossible to start a new transaction at the end of the transaction. // Because after the server executed "commit"/"rollback" statement, the session is out of the transaction. sessVars.SetStatusFlag(mysql.ServerStatusInTrans, true) } return err } // runStmt executes the sqlexec.Statement and commit or rollback the current transaction. func runStmt(ctx context.Context, sctx sessionctx.Context, s sqlexec.Statement) (rs sqlexec.RecordSet, err error) { if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { span1 := span.Tracer().StartSpan("session.runStmt", opentracing.ChildOf(span.Context())) span1.LogKV("sql", s.OriginText()) defer span1.Finish() ctx = opentracing.ContextWithSpan(ctx, span1) } se := sctx.(*session) sessVars := se.GetSessionVars() // Save origTxnCtx here to avoid it reset in the transaction retry. origTxnCtx := sessVars.TxnCtx defer func() { // If it is not a select statement, we record its slow log here, // then it could include the transaction commit time. if rs == nil { s.(*executor.ExecStmt).LogSlowQuery(origTxnCtx.StartTS, err == nil) } }() err = se.checkTxnAborted(s) if err != nil { return nil, err } rs, err = s.Exec(ctx) sessVars.TxnCtx.StatementCount++ if !s.IsReadOnly(sessVars) { // All the history should be added here. if err == nil && sessVars.TxnCtx.CouldRetry { GetHistory(sctx).Add(s, sessVars.StmtCtx) } // Handle the stmt commit/rollback. if txn, err1 := sctx.Txn(false); err1 == nil { if txn.Valid() { if err != nil { sctx.StmtRollback() } else { err = sctx.StmtCommit() } } } else { logutil.BgLogger().Error("get txn error", zap.Error(err1)) } } err = finishStmt(ctx, sctx, se, sessVars, err) if se.txn.pending() { // After run statement finish, txn state is still pending means the // statement never need a Txn(), such as: // // set @@tidb_general_log = 1 // set @@autocommit = 0 // select 1 // // Reset txn state to invalid to dispose the pending start ts. se.txn.changeToInvalid() } return rs, err } // GetHistory get all stmtHistory in current txn. Exported only for test. func GetHistory(ctx sessionctx.Context) *StmtHistory { hist, ok := ctx.GetSessionVars().TxnCtx.History.(*StmtHistory) if ok { return hist } hist = new(StmtHistory) ctx.GetSessionVars().TxnCtx.History = hist return hist } // GetRows4Test gets all the rows from a RecordSet, only used for test. func GetRows4Test(ctx context.Context, sctx sessionctx.Context, rs sqlexec.RecordSet) ([]chunk.Row, error) { if rs == nil { return nil, nil } var rows []chunk.Row req := rs.NewChunk() // Must reuse `req` for imitating server.(*clientConn).writeChunks for { err := rs.Next(ctx, req) if err != nil { return nil, err } if req.NumRows() == 0 { break } iter := chunk.NewIterator4Chunk(req.CopyConstruct()) for row := iter.Begin(); row != iter.End(); row = iter.Next() { rows = append(rows, row) } } return rows, nil } // ResultSetToStringSlice changes the RecordSet to [][]string. func ResultSetToStringSlice(ctx context.Context, s Session, rs sqlexec.RecordSet) ([][]string, error) { rows, err := GetRows4Test(ctx, s, rs) if err != nil { return nil, err } err = rs.Close() if err != nil { return nil, err } sRows := make([][]string, len(rows)) for i := range rows { row := rows[i] iRow := make([]string, row.Len()) for j := 0; j < row.Len(); j++ { if row.IsNull(j) { iRow[j] = "<nil>" } else { d := row.GetDatum(j, &rs.Fields()[j].Column.FieldType) iRow[j], err = d.ToString() if err != nil { return nil, err } } } sRows[i] = iRow } return sRows, nil } // Session errors. var ( ErrForUpdateCantRetry = terror.ClassSession.New(codeForUpdateCantRetry, mysql.MySQLErrName[mysql.ErrForUpdateCantRetry]) ) const ( codeForUpdateCantRetry terror.ErrCode = mysql.ErrForUpdateCantRetry ) func init() { sessionMySQLErrCodes := map[terror.ErrCode]uint16{ codeForUpdateCantRetry: mysql.ErrForUpdateCantRetry, } terror.ErrClassToMySQLCodes[terror.ClassSession] = sessionMySQLErrCodes }
/* Copyright 2016 Tamás Gulácsi Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package oracall import ( "encoding/hex" "fmt" "hash/fnv" "io" "strings" ) type PlsType struct { ora string } func (arg PlsType) String() string { return arg.ora } // NewArg returns a new argument to ease arument conversions. func NewPlsType(ora string) PlsType { return PlsType{ora: ora} } // FromOra retrieves the value of the argument with arg type, from src variable to dst variable. func (arg PlsType) FromOra(dst, src, varName string) string { if Gogo { if varName != "" { switch arg.ora { case "DATE": return fmt.Sprintf("%s.Set(%s)", dst, varName) } } } switch arg.ora { case "DATE": return fmt.Sprintf("%s = string(%s)", dst, src) case "PLS_INTEGER": return fmt.Sprintf("%s = %s.Value", dst, src) case "NUMBER": return fmt.Sprintf("%s = %s.Value", dst, src) } return fmt.Sprintf("%s = %s // %s", dst, src, arg.ora) } func (arg PlsType) GetOra(src, varName string) string { if Gogo { switch arg.ora { case "DATE": if varName != "" { return fmt.Sprintf("custom.NewDate(%s)", varName) } return fmt.Sprintf("custom.AsDate(%s)", src) } } return src } // ToOra adds the value of the argument with arg type, from src variable to dst variable. func (arg PlsType) ToOra(dst, src string) (expr string, variable string) { dstVar := mkVarName(dst) if Gogo { switch arg.ora { case "DATE": // custom.Date var pointer string if src[0] == '&' { pointer = "&" } return fmt.Sprintf(`%s := %s.Get() // toOra D %s = %s%s`, dstVar, strings.TrimPrefix(src, "&"), dst, pointer, dstVar, ), dstVar } } switch arg.ora { case "PLS_INTEGER": if src[0] != '&' { return fmt.Sprintf("%s := ora.Int32{IsNull:true}; if %s != 0 { %s.Value, %s.IsNull = %s, false }; %s = %s", dstVar, src, dstVar, dstVar, src, dst, dstVar), dstVar } case "NUMBER": if src[0] != '&' { return fmt.Sprintf("%s := ora.Float64{IsNull:true}; if %s != 0 { %s.Value, %s.IsNull = %s, false }; %s = %s", dstVar, src, dstVar, dstVar, src, dst, dstVar), dstVar } } return fmt.Sprintf("%s = %s // %s", dst, src, arg.ora), "" } func mkVarName(dst string) string { h := fnv.New64() io.WriteString(h, dst) var raw [8]byte var enc [8 * 2]byte hex.Encode(enc[:], h.Sum(raw[:0])) return fmt.Sprintf("var_%s", enc[:]) } string(custom.AsDate) /* Copyright 2016 Tamás Gulácsi Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package oracall import ( "encoding/hex" "fmt" "hash/fnv" "io" "strings" ) type PlsType struct { ora string } func (arg PlsType) String() string { return arg.ora } // NewArg returns a new argument to ease arument conversions. func NewPlsType(ora string) PlsType { return PlsType{ora: ora} } // FromOra retrieves the value of the argument with arg type, from src variable to dst variable. func (arg PlsType) FromOra(dst, src, varName string) string { if Gogo { if varName != "" { switch arg.ora { case "DATE": return fmt.Sprintf("%s = string(custom.NewDate(%s))", dst, varName) } } } switch arg.ora { case "DATE": return fmt.Sprintf("%s = string(%s)", dst, src) case "PLS_INTEGER": return fmt.Sprintf("%s = %s.Value", dst, src) case "NUMBER": return fmt.Sprintf("%s = %s.Value", dst, src) } return fmt.Sprintf("%s = %s // %s", dst, src, arg.ora) } func (arg PlsType) GetOra(src, varName string) string { if Gogo { switch arg.ora { case "DATE": if varName != "" { return fmt.Sprintf("string(custom.NewDate(%s))", varName) } return fmt.Sprintf("string(custom.AsDate(%s))", src) } } return src } // ToOra adds the value of the argument with arg type, from src variable to dst variable. func (arg PlsType) ToOra(dst, src string) (expr string, variable string) { dstVar := mkVarName(dst) if Gogo { switch arg.ora { case "DATE": // custom.Date var pointer string if src[0] == '&' { pointer = "&" } return fmt.Sprintf(`%s := custom.Date(%s).Get() // toOra D %s = %s%s`, dstVar, strings.TrimPrefix(src, "&"), dst, pointer, dstVar, ), dstVar } } switch arg.ora { case "PLS_INTEGER": if src[0] != '&' { return fmt.Sprintf("%s := ora.Int32{IsNull:true}; if %s != 0 { %s.Value, %s.IsNull = %s, false }; %s = %s", dstVar, src, dstVar, dstVar, src, dst, dstVar), dstVar } case "NUMBER": if src[0] != '&' { return fmt.Sprintf("%s := ora.Float64{IsNull:true}; if %s != 0 { %s.Value, %s.IsNull = %s, false }; %s = %s", dstVar, src, dstVar, dstVar, src, dst, dstVar), dstVar } } return fmt.Sprintf("%s = %s // %s", dst, src, arg.ora), "" } func mkVarName(dst string) string { h := fnv.New64() io.WriteString(h, dst) var raw [8]byte var enc [8 * 2]byte hex.Encode(enc[:], h.Sum(raw[:0])) return fmt.Sprintf("var_%s", enc[:]) }
/* Nging is a toolbox for webmasters Copyright (C) 2018-present Wenhui Shen <swh@admpub.com> This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. */ package seaweedfs import ( "fmt" "os" "path" "testing" "github.com/stretchr/testify/assert" ) //step1. weed master //step2. weed volume -port=9001 -dir=./_test //step3. weed filer -collection=test -port=8888 -port.readonly=8989 -master=localhost:9333 // or weed filer -collection=test -port=8888 -port.readonly=8989 -master=localhost:9333,localhost:9334 //============================================================================ // 映射为本地虚拟目录(此时可以使用filesystem驱动像操作本地文件一样操作seaweedfs中的文件) //============================================================================ //挂载:sudo weed mount -filer=localhost:8888 -dir=./public/upload/test -filer.path=/ -collection=test //取消挂载:关闭服务后执行 sudo umount -l ./public/upload/test // SeaweedFS对图片有很好的支持,可以指定图片显示的长度、宽度、模式,如: // http://localhost:8989/3/01637037d6.jpg?height=200&width=200 // http://localhost:8989/3/01637037d6.jpg?height=200&width=200&mode=fit // http://localhost:8989/3/01637037d6.jpg?height=200&width=200&mode=fill func TestSeaweedfs(t *testing.T) { return r := NewSeaweedfs(`test`) f, err := os.Open(`./config.go`) if err != nil { t.Fatal(err) } defer f.Close() fi, err := f.Stat() if err != nil { t.Fatal(err) } purl, err := r.Put(`/config.go`, f, fi.Size()) if err != nil { t.Fatal(err) } fmt.Println(purl) return err = r.Delete(path.Base(purl)) if err != nil { t.Fatal(err) } var html string assert.Equal(t, "<h2>安装 Go 第三方包 go-sqlite3</h2>", html) // 成功取得HTML内容进行后续处理 fmt.Println(html) } update /* Nging is a toolbox for webmasters Copyright (C) 2018-present Wenhui Shen <swh@admpub.com> This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. */ package seaweedfs import ( "fmt" "os" "path" "testing" "github.com/stretchr/testify/assert" ) //step1. weed master //step2. weed volume -port=9001 -dir=./_test //step3. weed filer -collection=test -port=8888 -port.readonly=8989 -master=localhost:9333 // or weed filer -collection=test -port=8888 -port.readonly=8989 -master=localhost:9333,localhost:9334 //============================================================================ // 映射为本地虚拟目录(此时可以使用filesystem驱动像操作本地文件一样操作seaweedfs中的文件) //============================================================================ //挂载:sudo weed mount -filer=localhost:8888 -dir=./public/upload/test -filer.path=/ -collection=test //取消挂载:关闭服务后执行 sudo umount -l ./public/upload/test // SeaweedFS对图片有很好的支持,可以指定图片显示的长度、宽度、模式,如: // http://localhost:8989/3/01637037d6.jpg?height=200&width=200 // http://localhost:8989/3/01637037d6.jpg?height=200&width=200&mode=fit // http://localhost:8989/3/01637037d6.jpg?height=200&width=200&mode=fill // 其它fuse方案: // https://github.com/kahing/goofys // https://github.com/ncw/rclone : // 1. rclone配置。执行 rclone config 开始配置Rclone // 2. rclone挂载网盘。命令示例:rclone mount config2:image-dir /root/image-dir --allow-non-empty,此命令会把本地目录/root/image-dir ,挂载到网盘(config2配置文件名)目录image-dir 下。如果你的目标目录中有文件,可以使用参数:--allow-non-empty,但是注意它会清空原目录中的文件。使用rclone mount 命令后,如果不守护进程的话,会话结束后rclone mount 进程就会终止。我们可以用screen来让rclone保持在后台运行。在执行Rclone挂载命令前,选择执行:screen –S config2,挂载完成后再用快捷键CTRL-a d 来暂时断开当前会话。最后用screen -r <screen_pid>重新连接上。 func TestSeaweedfs(t *testing.T) { return r := NewSeaweedfs(nil, `test`) f, err := os.Open(`./config.go`) if err != nil { t.Fatal(err) } defer f.Close() fi, err := f.Stat() if err != nil { t.Fatal(err) } _, purl, err := r.Put(`/config.go`, f, fi.Size()) if err != nil { t.Fatal(err) } fmt.Println(purl) return err = r.Delete(path.Base(purl)) if err != nil { t.Fatal(err) } var html string assert.Equal(t, "<h2>安装 Go 第三方包 go-sqlite3</h2>", html) // 成功取得HTML内容进行后续处理 fmt.Println(html) }
// Copyright (c) 2016-2020 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resources import ( "context" "encoding/json" "errors" "fmt" "strings" log "github.com/sirupsen/logrus" kapiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/client-go/kubernetes" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/api/errors" apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3" "github.com/projectcalico/libcalico-go/lib/backend/api" "github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion" "github.com/projectcalico/libcalico-go/lib/backend/model" cerrors "github.com/projectcalico/libcalico-go/lib/errors" ) func NewWorkloadEndpointClient(c kubernetes.Interface) K8sResourceClient { return &WorkloadEndpointClient{ clientSet: c, converter: conversion.NewConverter(), } } // Implements the api.Client interface for WorkloadEndpoints. type WorkloadEndpointClient struct { clientSet kubernetes.Interface converter conversion.Converter } func (c *WorkloadEndpointClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) { log.Debug("Received Create request on WorkloadEndpoint type") // As a special case for the CNI plugin, try to patch the Pod with the IP that we've calculated. // This works around a bug in kubelet that causes it to delay writing the Pod IP for a long time: // https://github.com/kubernetes/kubernetes/issues/39113. // // Note: it's a bit odd to do this in the Create, but the CNI plugin uses CreateOrUpdate(). Doing it // here makes sure that, if the update fails: we retry here, and, we don't report success without // making the patch. return c.patchInPodIPs(ctx, kvp) } func (c *WorkloadEndpointClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) { log.Debug("Received Update request on WorkloadEndpoint type") // As a special case for the CNI plugin, try to patch the Pod with the IP that we've calculated. // This works around a bug in kubelet that causes it to delay writing the Pod IP for a long time: // https://github.com/kubernetes/kubernetes/issues/39113. return c.patchInPodIPs(ctx, kvp) } func (c *WorkloadEndpointClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) { return c.Delete(ctx, kvp.Key, kvp.Revision, kvp.UID) } func (c *WorkloadEndpointClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) { log.Debug("Delete for WorkloadEndpoint, patching out annotations.") return c.patchOutPodIPs(ctx, key, revision, uid) } // patchInPodIPs PATCHes the Kubernetes Pod associated with the given KVPair with the IP addresses it contains. // This is a no-op if there is no IP address. // // We store the IP addresses in annotations because patching the PodStatus directly races with changes that // kubelet makes so kubelet can undo our changes. func (c *WorkloadEndpointClient) patchInPodIPs(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) { ips := kvp.Value.(*apiv3.WorkloadEndpoint).Spec.IPNetworks if len(ips) == 0 { return kvp, nil } log.Debugf("PATCHing pod with IPs: %v", ips) key := kvp.Key return c.patchPodIPAnnotations(key, kvp.Revision, kvp.UID, ips) } // patchOutPodIPs sets our pod IP annotations to empty strings; this is used to signal that the IP has been removed // from the pod at teardown. func (c *WorkloadEndpointClient) patchOutPodIPs(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) { return c.patchPodIPAnnotations(key, revision, uid, nil) } func (c *WorkloadEndpointClient) patchPodIPAnnotations(key model.Key, revision string, uid *types.UID, ips []string) (*model.KVPair, error) { wepID, err := c.converter.ParseWorkloadEndpointName(key.(model.ResourceKey).Name) if err != nil { return nil, err } if wepID.Pod == "" { return nil, cerrors.ErrorInsufficientIdentifiers{Name: key.(model.ResourceKey).Name} } // Write the IP addresses into annotations. This generates an event more quickly than // waiting for kubelet to update the PodStatus PodIP and PodIPs fields. ns := key.(model.ResourceKey).Namespace firstIP := "" if len(ips) > 0 { firstIP = ips[0] } patch, err := calculateAnnotationPatch( revision, uid, conversion.AnnotationPodIP, firstIP, conversion.AnnotationPodIPs, strings.Join(ips, ","), ) if err != nil { log.WithError(err).Error("failed to calculate Pod patch.") return nil, err } log.WithField("patch", string(patch)).Debug("Calculated pod patch.") pod, err := c.clientSet.CoreV1().Pods(ns).Patch(wepID.Pod, types.StrategicMergePatchType, patch, "status") if err != nil { return nil, K8sErrorToCalico(err, key) } log.Debugf("Successfully PATCHed pod to set podIP annotation: %+v", pod) kvps, err := c.converter.PodToWorkloadEndpoints(pod) if err != nil { return nil, err } return kvps[0], nil } func calculateAnnotationPatch(revision string, uid *types.UID, namesAndValues ...string) ([]byte, error) { patch := map[string]interface{}{} metadata := map[string]interface{}{} patch["metadata"] = metadata annotations := map[string]interface{}{} metadata["annotations"] = annotations for i := 0; i < len(namesAndValues); i += 2 { annotations[namesAndValues[i]] = namesAndValues[i+1] } if revision != "" { // We have a revision. Since the revision is immutable, if our patch revision doesn't match then the // patch will fail. log.WithField("rev", revision).Debug("Generating patch for specific rev") metadata["resourceVersion"] = revision } if uid != nil { // We have a UID, which identifies a particular instance of a pod with a particular name; add that to // the patch. Since the UID is immutable, if our patch UID doesn't match then the patch will fail. log.WithField("uid", *uid).Debug("Generating patch for specific UID") metadata["uid"] = uid } return json.Marshal(patch) } func (c *WorkloadEndpointClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) { log.Debug("Received Get request on WorkloadEndpoint type") k := key.(model.ResourceKey) // Parse resource name so we can get get the podName wepID, err := c.converter.ParseWorkloadEndpointName(key.(model.ResourceKey).Name) if err != nil { return nil, err } if wepID.Pod == "" { return nil, cerrors.ErrorResourceDoesNotExist{ Identifier: key, Err: errors.New("malformed WorkloadEndpoint name - unable to determine Pod name"), } } pod, err := c.clientSet.CoreV1().Pods(k.Namespace).Get(wepID.Pod, metav1.GetOptions{ResourceVersion: revision}) if err != nil { return nil, K8sErrorToCalico(err, k) } // Decide if this pod should be displayed. if !c.converter.IsValidCalicoWorkloadEndpoint(pod) { return nil, cerrors.ErrorResourceDoesNotExist{Identifier: k} } kvps, err := c.converter.PodToWorkloadEndpoints(pod) if err != nil { return nil, err } // Find the WorkloadEndpoint that has a name matching the name in the given key for _, kvp := range kvps { wep := kvp.Value.(*apiv3.WorkloadEndpoint) if wep.Name == key.(model.ResourceKey).Name { return kvp, nil } } return nil, kerrors.NewNotFound(apiv3.Resource("WorkloadEndpoint"), key.String()) } func (c *WorkloadEndpointClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) { log.Debug("Received List request on WorkloadEndpoint type") l := list.(model.ResourceListOptions) // If a "Name" is provided, we may be able to get the exact WorkloadEndpoint or narrow the WorkloadEndpoints to a // single Pod. if l.Name != "" { return c.listUsingName(ctx, l, revision) } return c.list(l, revision) } // listUsingName uses the name in the listOptions to retrieve the WorkloadEndpoints. The name, at the very least, must identify // a single Pod, otherwise an error will occur. func (c *WorkloadEndpointClient) listUsingName(ctx context.Context, listOptions model.ResourceListOptions, revision string) (*model.KVPairList, error) { wepID, err := c.converter.ParseWorkloadEndpointName(listOptions.Name) if err != nil { return nil, err } if wepID.Pod == "" { return nil, cerrors.ErrorResourceDoesNotExist{ Identifier: listOptions, Err: errors.New("malformed WorkloadEndpoint name - unable to determine Pod name"), } } pod, err := c.clientSet.CoreV1().Pods(listOptions.Namespace).Get(wepID.Pod, metav1.GetOptions{ResourceVersion: revision}) if err != nil { if kerrors.IsNotFound(err) { return &model.KVPairList{ KVPairs: []*model.KVPair{}, Revision: revision, }, nil } else { return nil, err } } kvps, err := c.converter.PodToWorkloadEndpoints(pod) if err != nil { return nil, err } // If Endpoint is available get the single WorkloadEndpoint if wepID.Endpoint != "" { // Set to an empty list in case a match isn't found var tmpKVPs []*model.KVPair wepName, err := wepID.CalculateWorkloadEndpointName(false) if err != nil { return nil, err } // Find the WorkloadEndpoint that has a name matching the name in the given key for _, kvp := range kvps { wep := kvp.Value.(*apiv3.WorkloadEndpoint) if wep.Name == wepName { tmpKVPs = []*model.KVPair{kvp} break } } kvps = tmpKVPs } return &model.KVPairList{ KVPairs: kvps, Revision: revision, }, nil } // list lists all the Workload endpoints for the namespace given in listOptions. func (c *WorkloadEndpointClient) list(listOptions model.ResourceListOptions, revision string) (*model.KVPairList, error) { podList, err := c.clientSet.CoreV1().Pods(listOptions.Namespace).List(metav1.ListOptions{ResourceVersion: revision}) if err != nil { return nil, K8sErrorToCalico(err, listOptions) } // For each Pod, return a workload endpoint. var ret []*model.KVPair for _, pod := range podList.Items { // Decide if this pod should be included. if !c.converter.IsValidCalicoWorkloadEndpoint(&pod) { continue } kvps, err := c.converter.PodToWorkloadEndpoints(&pod) if err != nil { return nil, err } ret = append(ret, kvps...) } return &model.KVPairList{ KVPairs: ret, Revision: revision, }, nil } func (c *WorkloadEndpointClient) EnsureInitialized() error { return nil } func (c *WorkloadEndpointClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) { // Build watch options to pass to k8s. opts := metav1.ListOptions{ResourceVersion: revision, Watch: true} rlo, ok := list.(model.ResourceListOptions) if !ok { return nil, fmt.Errorf("ListInterface is not a ResourceListOptions: %s", list) } if len(rlo.Name) != 0 { if len(rlo.Namespace) == 0 { return nil, errors.New("cannot watch a specific WorkloadEndpoint without a namespace") } // We've been asked to watch a specific workloadendpoint wepids, err := c.converter.ParseWorkloadEndpointName(rlo.Name) if err != nil { return nil, err } log.WithField("name", wepids.Pod).Debug("Watching a single workloadendpoint") opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", wepids.Pod).String() } ns := rlo.Namespace k8sWatch, err := c.clientSet.CoreV1().Pods(ns).Watch(opts) if err != nil { return nil, K8sErrorToCalico(err, list) } converter := func(r Resource) ([]*model.KVPair, error) { k8sPod, ok := r.(*kapiv1.Pod) if !ok { return nil, errors.New("Pod conversion with incorrect k8s resource type") } if !c.converter.IsValidCalicoWorkloadEndpoint(k8sPod) { // If this is not a valid Calico workload endpoint then don't return in the watch. // Returning a nil KVP and a nil error swallows the event. return nil, nil } return c.converter.PodToWorkloadEndpoints(k8sPod) } return newK8sWatcherConverterOneToMany(ctx, "Pod", converter, k8sWatch), nil } Work around: Drop the resource version when patching pods to add IPs. The CNI plugin can't handle the version conflict and I think kubelet ensures that more than one CNI plugin can't run at once so any conflicts are spurious. // Copyright (c) 2016-2020 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resources import ( "context" "encoding/json" "errors" "fmt" "strings" log "github.com/sirupsen/logrus" kapiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/client-go/kubernetes" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/api/errors" apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3" "github.com/projectcalico/libcalico-go/lib/backend/api" "github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion" "github.com/projectcalico/libcalico-go/lib/backend/model" cerrors "github.com/projectcalico/libcalico-go/lib/errors" ) func NewWorkloadEndpointClient(c kubernetes.Interface) K8sResourceClient { return &WorkloadEndpointClient{ clientSet: c, converter: conversion.NewConverter(), } } // Implements the api.Client interface for WorkloadEndpoints. type WorkloadEndpointClient struct { clientSet kubernetes.Interface converter conversion.Converter } func (c *WorkloadEndpointClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) { log.Debug("Received Create request on WorkloadEndpoint type") // As a special case for the CNI plugin, try to patch the Pod with the IP that we've calculated. // This works around a bug in kubelet that causes it to delay writing the Pod IP for a long time: // https://github.com/kubernetes/kubernetes/issues/39113. // // Note: it's a bit odd to do this in the Create, but the CNI plugin uses CreateOrUpdate(). Doing it // here makes sure that, if the update fails: we retry here, and, we don't report success without // making the patch. return c.patchInPodIPs(ctx, kvp) } func (c *WorkloadEndpointClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) { log.Debug("Received Update request on WorkloadEndpoint type") // As a special case for the CNI plugin, try to patch the Pod with the IP that we've calculated. // This works around a bug in kubelet that causes it to delay writing the Pod IP for a long time: // https://github.com/kubernetes/kubernetes/issues/39113. return c.patchInPodIPs(ctx, kvp) } func (c *WorkloadEndpointClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) { return c.Delete(ctx, kvp.Key, kvp.Revision, kvp.UID) } func (c *WorkloadEndpointClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) { log.Debug("Delete for WorkloadEndpoint, patching out annotations.") return c.patchOutPodIPs(ctx, key, revision, uid) } // patchInPodIPs PATCHes the Kubernetes Pod associated with the given KVPair with the IP addresses it contains. // This is a no-op if there is no IP address. // // We store the IP addresses in annotations because patching the PodStatus directly races with changes that // kubelet makes so kubelet can undo our changes. func (c *WorkloadEndpointClient) patchInPodIPs(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) { ips := kvp.Value.(*apiv3.WorkloadEndpoint).Spec.IPNetworks if len(ips) == 0 { return kvp, nil } log.Debugf("PATCHing pod with IPs: %v", ips) key := kvp.Key // Note: we drop the revision here because the CNI plugin can't handle a retry right now (and the kubelet // ensures that only one CNI ADD for a given UID can be in progress). return c.patchPodIPAnnotations(key, "", kvp.UID, ips) } // patchOutPodIPs sets our pod IP annotations to empty strings; this is used to signal that the IP has been removed // from the pod at teardown. func (c *WorkloadEndpointClient) patchOutPodIPs(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) { return c.patchPodIPAnnotations(key, revision, uid, nil) } func (c *WorkloadEndpointClient) patchPodIPAnnotations(key model.Key, revision string, uid *types.UID, ips []string) (*model.KVPair, error) { wepID, err := c.converter.ParseWorkloadEndpointName(key.(model.ResourceKey).Name) if err != nil { return nil, err } if wepID.Pod == "" { return nil, cerrors.ErrorInsufficientIdentifiers{Name: key.(model.ResourceKey).Name} } // Write the IP addresses into annotations. This generates an event more quickly than // waiting for kubelet to update the PodStatus PodIP and PodIPs fields. ns := key.(model.ResourceKey).Namespace firstIP := "" if len(ips) > 0 { firstIP = ips[0] } patch, err := calculateAnnotationPatch( revision, uid, conversion.AnnotationPodIP, firstIP, conversion.AnnotationPodIPs, strings.Join(ips, ","), ) if err != nil { log.WithError(err).Error("failed to calculate Pod patch.") return nil, err } log.WithField("patch", string(patch)).Debug("Calculated pod patch.") pod, err := c.clientSet.CoreV1().Pods(ns).Patch(wepID.Pod, types.StrategicMergePatchType, patch, "status") if err != nil { return nil, K8sErrorToCalico(err, key) } log.Debugf("Successfully PATCHed pod to set podIP annotation: %+v", pod) kvps, err := c.converter.PodToWorkloadEndpoints(pod) if err != nil { return nil, err } return kvps[0], nil } func calculateAnnotationPatch(revision string, uid *types.UID, namesAndValues ...string) ([]byte, error) { patch := map[string]interface{}{} metadata := map[string]interface{}{} patch["metadata"] = metadata annotations := map[string]interface{}{} metadata["annotations"] = annotations for i := 0; i < len(namesAndValues); i += 2 { annotations[namesAndValues[i]] = namesAndValues[i+1] } if revision != "" { // We have a revision. Since the revision is immutable, if our patch revision doesn't match then the // patch will fail. log.WithField("rev", revision).Debug("Generating patch for specific rev") metadata["resourceVersion"] = revision } if uid != nil { // We have a UID, which identifies a particular instance of a pod with a particular name; add that to // the patch. Since the UID is immutable, if our patch UID doesn't match then the patch will fail. log.WithField("uid", *uid).Debug("Generating patch for specific UID") metadata["uid"] = uid } return json.Marshal(patch) } func (c *WorkloadEndpointClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) { log.Debug("Received Get request on WorkloadEndpoint type") k := key.(model.ResourceKey) // Parse resource name so we can get get the podName wepID, err := c.converter.ParseWorkloadEndpointName(key.(model.ResourceKey).Name) if err != nil { return nil, err } if wepID.Pod == "" { return nil, cerrors.ErrorResourceDoesNotExist{ Identifier: key, Err: errors.New("malformed WorkloadEndpoint name - unable to determine Pod name"), } } pod, err := c.clientSet.CoreV1().Pods(k.Namespace).Get(wepID.Pod, metav1.GetOptions{ResourceVersion: revision}) if err != nil { return nil, K8sErrorToCalico(err, k) } // Decide if this pod should be displayed. if !c.converter.IsValidCalicoWorkloadEndpoint(pod) { return nil, cerrors.ErrorResourceDoesNotExist{Identifier: k} } kvps, err := c.converter.PodToWorkloadEndpoints(pod) if err != nil { return nil, err } // Find the WorkloadEndpoint that has a name matching the name in the given key for _, kvp := range kvps { wep := kvp.Value.(*apiv3.WorkloadEndpoint) if wep.Name == key.(model.ResourceKey).Name { return kvp, nil } } return nil, kerrors.NewNotFound(apiv3.Resource("WorkloadEndpoint"), key.String()) } func (c *WorkloadEndpointClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) { log.Debug("Received List request on WorkloadEndpoint type") l := list.(model.ResourceListOptions) // If a "Name" is provided, we may be able to get the exact WorkloadEndpoint or narrow the WorkloadEndpoints to a // single Pod. if l.Name != "" { return c.listUsingName(ctx, l, revision) } return c.list(l, revision) } // listUsingName uses the name in the listOptions to retrieve the WorkloadEndpoints. The name, at the very least, must identify // a single Pod, otherwise an error will occur. func (c *WorkloadEndpointClient) listUsingName(ctx context.Context, listOptions model.ResourceListOptions, revision string) (*model.KVPairList, error) { wepID, err := c.converter.ParseWorkloadEndpointName(listOptions.Name) if err != nil { return nil, err } if wepID.Pod == "" { return nil, cerrors.ErrorResourceDoesNotExist{ Identifier: listOptions, Err: errors.New("malformed WorkloadEndpoint name - unable to determine Pod name"), } } pod, err := c.clientSet.CoreV1().Pods(listOptions.Namespace).Get(wepID.Pod, metav1.GetOptions{ResourceVersion: revision}) if err != nil { if kerrors.IsNotFound(err) { return &model.KVPairList{ KVPairs: []*model.KVPair{}, Revision: revision, }, nil } else { return nil, err } } kvps, err := c.converter.PodToWorkloadEndpoints(pod) if err != nil { return nil, err } // If Endpoint is available get the single WorkloadEndpoint if wepID.Endpoint != "" { // Set to an empty list in case a match isn't found var tmpKVPs []*model.KVPair wepName, err := wepID.CalculateWorkloadEndpointName(false) if err != nil { return nil, err } // Find the WorkloadEndpoint that has a name matching the name in the given key for _, kvp := range kvps { wep := kvp.Value.(*apiv3.WorkloadEndpoint) if wep.Name == wepName { tmpKVPs = []*model.KVPair{kvp} break } } kvps = tmpKVPs } return &model.KVPairList{ KVPairs: kvps, Revision: revision, }, nil } // list lists all the Workload endpoints for the namespace given in listOptions. func (c *WorkloadEndpointClient) list(listOptions model.ResourceListOptions, revision string) (*model.KVPairList, error) { podList, err := c.clientSet.CoreV1().Pods(listOptions.Namespace).List(metav1.ListOptions{ResourceVersion: revision}) if err != nil { return nil, K8sErrorToCalico(err, listOptions) } // For each Pod, return a workload endpoint. var ret []*model.KVPair for _, pod := range podList.Items { // Decide if this pod should be included. if !c.converter.IsValidCalicoWorkloadEndpoint(&pod) { continue } kvps, err := c.converter.PodToWorkloadEndpoints(&pod) if err != nil { return nil, err } ret = append(ret, kvps...) } return &model.KVPairList{ KVPairs: ret, Revision: revision, }, nil } func (c *WorkloadEndpointClient) EnsureInitialized() error { return nil } func (c *WorkloadEndpointClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) { // Build watch options to pass to k8s. opts := metav1.ListOptions{ResourceVersion: revision, Watch: true} rlo, ok := list.(model.ResourceListOptions) if !ok { return nil, fmt.Errorf("ListInterface is not a ResourceListOptions: %s", list) } if len(rlo.Name) != 0 { if len(rlo.Namespace) == 0 { return nil, errors.New("cannot watch a specific WorkloadEndpoint without a namespace") } // We've been asked to watch a specific workloadendpoint wepids, err := c.converter.ParseWorkloadEndpointName(rlo.Name) if err != nil { return nil, err } log.WithField("name", wepids.Pod).Debug("Watching a single workloadendpoint") opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", wepids.Pod).String() } ns := rlo.Namespace k8sWatch, err := c.clientSet.CoreV1().Pods(ns).Watch(opts) if err != nil { return nil, K8sErrorToCalico(err, list) } converter := func(r Resource) ([]*model.KVPair, error) { k8sPod, ok := r.(*kapiv1.Pod) if !ok { return nil, errors.New("Pod conversion with incorrect k8s resource type") } if !c.converter.IsValidCalicoWorkloadEndpoint(k8sPod) { // If this is not a valid Calico workload endpoint then don't return in the watch. // Returning a nil KVP and a nil error swallows the event. return nil, nil } return c.converter.PodToWorkloadEndpoints(k8sPod) } return newK8sWatcherConverterOneToMany(ctx, "Pod", converter, k8sWatch), nil }
package instana import ( "strings" "time" "github.com/instana/go-sensor/w3ctrace" "github.com/opentracing/opentracing-go/ext" ) type typedSpanData interface { Type() RegisteredSpanType Kind() SpanKind } // Registered types supported by Instana. The span type is determined based on // the operation name passed to the `StartSpan()` call of a tracer. // // It is NOT RECOMMENDED to use operation names that match any of these constants in your // custom instrumentation code unless you explicitly wish to send data as a registered span. // The conversion will result in loss of custom tags that are not supported for this span type. // The list of supported tags can be found in the godoc of the respective span tags type below. const ( // SDK span, a generic span containing arbitrary data. Spans with operation name // not listed in the subsequent list will be sent as an SDK spans forwarding all // attached tags to the agent SDKSpanType = RegisteredSpanType("sdk") // HTTP server and client spans HTTPServerSpanType = RegisteredSpanType("g.http") HTTPClientSpanType = RegisteredSpanType("http") // RPC server and client spans RPCServerSpanType = RegisteredSpanType("rpc-server") RPCClientSpanType = RegisteredSpanType("rpc-client") // Kafka consumer/producer span KafkaSpanType = RegisteredSpanType("kafka") // Google Cloud Storage client span GCPStorageSpanType = RegisteredSpanType("gcs") // Google Cloud PubSub client span GCPPubSubSpanType = RegisteredSpanType("gcps") // AWS Lambda entry span AWSLambdaEntrySpanType = RegisteredSpanType("aws.lambda.entry") ) // RegisteredSpanType represents the span type supported by Instana type RegisteredSpanType string // ExtractData is a factory method to create the `data` section for a typed span func (st RegisteredSpanType) ExtractData(span *spanS) typedSpanData { switch st { case HTTPServerSpanType, HTTPClientSpanType: return NewHTTPSpanData(span) case RPCServerSpanType, RPCClientSpanType: return NewRPCSpanData(span) case KafkaSpanType: return NewKafkaSpanData(span) case GCPStorageSpanType: return NewGCPStorageSpanData(span) case GCPPubSubSpanType: return NewGCPPubSubSpanData(span) case AWSLambdaEntrySpanType: return NewAWSLambdaSpanData(span) default: return NewSDKSpanData(span) } } // SpanKind represents values of field `k` in OpenTracing span representation. It represents // the direction of the call associated with a span. type SpanKind uint8 // Valid span kinds const ( // The kind of a span associated with an inbound call, this must be the first span in the trace. EntrySpanKind SpanKind = iota + 1 // The kind of a span associated with an outbound call, e.g. an HTTP client request, posting to a message bus, etc. ExitSpanKind // The default kind for a span that is associated with a call within the same service. IntermediateSpanKind ) // String returns string representation of a span kind suitable for use as a value for `data.sdk.type` // tag of an SDK span. By default all spans are intermediate unless they are explicitly set to be "entry" or "exit" func (k SpanKind) String() string { switch k { case EntrySpanKind: return "entry" case ExitSpanKind: return "exit" default: return "intermediate" } } // ForeignParent represents a related 3rd-party trace context, e.g. a W3C Trace Context type ForeignParent struct { TraceID string `json:"t"` ParentID string `json:"p"` LatestTraceState string `json:"lts,omitempty"` } func newForeignParent(p interface{}) *ForeignParent { switch p := p.(type) { case w3ctrace.Context: return newW3CForeignParent(p) default: return nil } } func newW3CForeignParent(trCtx w3ctrace.Context) *ForeignParent { p, s := trCtx.Parent(), trCtx.State() var lastVendorData string if len(s) > 0 { lastVendorData = s[0] } return &ForeignParent{ TraceID: p.TraceID, ParentID: p.ParentID, LatestTraceState: lastVendorData, } } // Span represents the OpenTracing span document to be sent to the agent type Span struct { TraceID int64 `json:"-"` TraceID128 string `json:"t"` ParentID int64 `json:"p,omitempty"` SpanID int64 `json:"s"` Timestamp uint64 `json:"ts"` Duration uint64 `json:"d"` Name string `json:"n"` From *fromS `json:"f"` Batch *batchInfo `json:"b,omitempty"` Kind int `json:"k"` Ec int `json:"ec,omitempty"` Data typedSpanData `json:"data"` Synthetic bool `json:"sy,omitempty"` ForeignParent *ForeignParent `json:"fp,omitempty"` CorrelationType string `json:"crtp,omitempty"` CorrelationID string `json:"crid,omitempty"` } func newSpan(span *spanS) Span { data := RegisteredSpanType(span.Operation).ExtractData(span) sp := Span{ TraceID: span.context.TraceID, TraceID128: FormatLongID(span.context.TraceIDHi, span.context.TraceID), ParentID: span.context.ParentID, SpanID: span.context.SpanID, Timestamp: uint64(span.Start.UnixNano()) / uint64(time.Millisecond), Duration: uint64(span.Duration) / uint64(time.Millisecond), Name: string(data.Type()), Ec: span.ErrorCount, ForeignParent: newForeignParent(span.context.ForeignParent), CorrelationType: span.Correlation.Type, CorrelationID: span.Correlation.ID, Kind: int(data.Kind()), Data: data, } if bs, ok := span.Tags[batchSizeTag].(int); ok { if bs > 1 { sp.Batch = &batchInfo{Size: bs} } delete(span.Tags, batchSizeTag) } if syn, ok := span.Tags[syntheticCallTag].(bool); ok { sp.Synthetic = syn delete(span.Tags, syntheticCallTag) } return sp } type batchInfo struct { Size int `json:"s"` } // SpanData contains fields to be sent in the `data` section of an OT span document. These fields are // common for all span types. type SpanData struct { Service string `json:"service,omitempty"` st RegisteredSpanType sk interface{} } // NewSpanData initializes a new span data from tracer span func NewSpanData(span *spanS, st RegisteredSpanType) SpanData { return SpanData{ Service: span.Service, st: st, sk: span.Tags[string(ext.SpanKind)], } } // Type returns the registered span type suitable for use as the value of `n` field. func (d SpanData) Type() RegisteredSpanType { return d.st } // Kind returns the kind of the span. It handles the github.com/opentracing/opentracing-go/ext.SpanKindEnum // values as well as generic "entry" and "exit" func (d SpanData) Kind() SpanKind { switch d.sk { case ext.SpanKindRPCServerEnum, string(ext.SpanKindRPCServerEnum), ext.SpanKindConsumerEnum, string(ext.SpanKindConsumerEnum), "entry": return EntrySpanKind case ext.SpanKindRPCClientEnum, string(ext.SpanKindRPCClientEnum), ext.SpanKindProducerEnum, string(ext.SpanKindProducerEnum), "exit": return ExitSpanKind default: return IntermediateSpanKind } } // SDKSpanData represents the `data` section of an SDK span sent within an OT span document type SDKSpanData struct { SpanData Tags SDKSpanTags `json:"sdk"` } // NewSDKSpanData initializes a new SDK span data from a tracer span func NewSDKSpanData(span *spanS) SDKSpanData { d := NewSpanData(span, SDKSpanType) return SDKSpanData{ SpanData: d, Tags: NewSDKSpanTags(span, d.Kind().String()), } } // SDKSpanTags contains fields within the `data.sdk` section of an OT span document type SDKSpanTags struct { Name string `json:"name"` Type string `json:"type,omitempty"` Arguments string `json:"arguments,omitempty"` Return string `json:"return,omitempty"` Custom map[string]interface{} `json:"custom,omitempty"` } // NewSDKSpanTags extracts SDK span tags from a tracer span func NewSDKSpanTags(span *spanS, spanType string) SDKSpanTags { tags := SDKSpanTags{ Name: span.Operation, Type: spanType, Custom: map[string]interface{}{}, } if len(span.Tags) != 0 { tags.Custom["tags"] = span.Tags } if logs := collectTracerSpanLogs(span); len(logs) > 0 { tags.Custom["logs"] = logs } if len(span.context.Baggage) != 0 { tags.Custom["baggage"] = span.context.Baggage } return tags } // HTTPSpanData represents the `data` section of an HTTP span sent within an OT span document type HTTPSpanData struct { SpanData Tags HTTPSpanTags `json:"http"` } // NewHTTPSpanData initializes a new HTTP span data from tracer span func NewHTTPSpanData(span *spanS) HTTPSpanData { data := HTTPSpanData{ SpanData: NewSpanData(span, RegisteredSpanType(span.Operation)), Tags: NewHTTPSpanTags(span), } return data } // HTTPSpanTags contains fields within the `data.http` section of an OT span document type HTTPSpanTags struct { // Full request/response URL URL string `json:"url,omitempty"` // The HTTP status code returned with client/server response Status int `json:"status,omitempty"` // The HTTP method of the request Method string `json:"method,omitempty"` // Path is the path part of the request URL Path string `json:"path,omitempty"` // Params are the request query string parameters Params string `json:"params,omitempty"` // Headers are the captured request/response headers Headers map[string]string `json:"header,omitempty"` // PathTemplate is the raw template string used to route the request PathTemplate string `json:"path_tpl,omitempty"` // The name:port of the host to which the request had been sent Host string `json:"host,omitempty"` // The name of the protocol used for request ("http" or "https") Protocol string `json:"protocol,omitempty"` // The message describing an error occurred during the request handling Error string `json:"error,omitempty"` } // NewHTTPSpanTags extracts HTTP-specific span tags from a tracer span func NewHTTPSpanTags(span *spanS) HTTPSpanTags { var tags HTTPSpanTags for k, v := range span.Tags { switch k { case "http.url", string(ext.HTTPUrl): readStringTag(&tags.URL, v) case "http.status", "http.status_code": readIntTag(&tags.Status, v) case "http.method", string(ext.HTTPMethod): readStringTag(&tags.Method, v) case "http.path": readStringTag(&tags.Path, v) case "http.params": readStringTag(&tags.Params, v) case "http.header": if m, ok := v.(map[string]string); ok { tags.Headers = m } case "http.path_tpl": readStringTag(&tags.PathTemplate, v) case "http.host": readStringTag(&tags.Host, v) case "http.protocol": readStringTag(&tags.Protocol, v) case "http.error": readStringTag(&tags.Error, v) } } return tags } // RPCSpanData represents the `data` section of an RPC span sent within an OT span document type RPCSpanData struct { SpanData Tags RPCSpanTags `json:"rpc"` } // NewRPCSpanData initializes a new RPC span data from tracer span func NewRPCSpanData(span *spanS) RPCSpanData { data := RPCSpanData{ SpanData: NewSpanData(span, RegisteredSpanType(span.Operation)), Tags: NewRPCSpanTags(span), } return data } // RPCSpanTags contains fields within the `data.rpc` section of an OT span document type RPCSpanTags struct { // The name of the remote host for an RPC call Host string `json:"host,omitempty"` // The port of the remote host for an RPC call Port string `json:"port,omitempty"` // The name of the remote method to invoke Call string `json:"call,omitempty"` // The type of an RPC call, e.g. either "unary" or "stream" for GRPC requests CallType string `json:"call_type,omitempty"` // The RPC flavor used for this call, e.g. "grpc" for GRPC requests Flavor string `json:"flavor,omitempty"` // The message describing an error occurred during the request handling Error string `json:"error,omitempty"` } // NewRPCSpanTags extracts RPC-specific span tags from a tracer span func NewRPCSpanTags(span *spanS) RPCSpanTags { var tags RPCSpanTags for k, v := range span.Tags { switch k { case "rpc.host": readStringTag(&tags.Host, v) case "rpc.port": readStringTag(&tags.Port, v) case "rpc.call": readStringTag(&tags.Call, v) case "rpc.call_type": readStringTag(&tags.CallType, v) case "rpc.flavor": readStringTag(&tags.Flavor, v) case "rpc.error": readStringTag(&tags.Error, v) } } return tags } // KafkaSpanData represents the `data` section of an Kafka span sent within an OT span document type KafkaSpanData struct { SpanData Tags KafkaSpanTags `json:"kafka"` } // NewKafkaSpanData initializes a new Kafka span data from tracer span func NewKafkaSpanData(span *spanS) KafkaSpanData { data := KafkaSpanData{ SpanData: NewSpanData(span, RegisteredSpanType(span.Operation)), Tags: NewKafkaSpanTags(span), } return data } // KafkaSpanTags contains fields within the `data.kafka` section of an OT span document type KafkaSpanTags struct { // Kafka topic Service string `json:"service"` // The access mode:, either "send" for publisher or "consume" for consumer Access string `json:"access"` } // NewKafkaSpanTags extracts Kafka-specific span tags from a tracer span func NewKafkaSpanTags(span *spanS) KafkaSpanTags { var tags KafkaSpanTags for k, v := range span.Tags { switch k { case "kafka.service": readStringTag(&tags.Service, v) case "kafka.access": readStringTag(&tags.Access, v) } } return tags } // GCPStorageSpanData represents the `data` section of a Google Cloud Storage span sent within an OT span document type GCPStorageSpanData struct { SpanData Tags GCPStorageSpanTags `json:"gcs"` } // NewGCPStorageSpanData initializes a new Google Cloud Storage span data from tracer span func NewGCPStorageSpanData(span *spanS) GCPStorageSpanData { data := GCPStorageSpanData{ SpanData: NewSpanData(span, GCPStorageSpanType), Tags: NewGCPStorageSpanTags(span), } return data } // Kind returns the span kind for a Google Cloud Storage span func (d GCPStorageSpanData) Kind() SpanKind { return ExitSpanKind } // GCPStorageSpanTags contains fields within the `data.gcs` section of an OT span document type GCPStorageSpanTags struct { Operation string `json:"op,omitempty"` Bucket string `json:"bucket,omitempty"` Object string `json:"object,omitempty"` Entity string `json:"entity,omitempty"` Range string `json:"range,omitempty"` SourceBucket string `json:"sourceBucket,omitempty"` SourceObject string `json:"sourceObject,omitempty"` DestinationBucket string `json:"destinationBucket,omitempty"` DestinationObject string `json:"destinationObject,omitempty"` NumberOfOperations string `json:"numberOfOperations,omitempty"` ProjectID string `json:"projectId,omitempty"` AccessID string `json:"accessId,omitempty"` } // NewGCPStorageSpanTags extracts Google Cloud Storage span tags from a tracer span func NewGCPStorageSpanTags(span *spanS) GCPStorageSpanTags { var tags GCPStorageSpanTags for k, v := range span.Tags { switch k { case "gcs.op": readStringTag(&tags.Operation, v) case "gcs.bucket": readStringTag(&tags.Bucket, v) case "gcs.object": readStringTag(&tags.Object, v) case "gcs.entity": readStringTag(&tags.Entity, v) case "gcs.range": readStringTag(&tags.Range, v) case "gcs.sourceBucket": readStringTag(&tags.SourceBucket, v) case "gcs.sourceObject": readStringTag(&tags.SourceObject, v) case "gcs.destinationBucket": readStringTag(&tags.DestinationBucket, v) case "gcs.destinationObject": readStringTag(&tags.DestinationObject, v) case "gcs.numberOfOperations": readStringTag(&tags.NumberOfOperations, v) case "gcs.projectId": readStringTag(&tags.ProjectID, v) case "gcs.accessId": readStringTag(&tags.AccessID, v) } } return tags } // GCPPubSubSpanData represents the `data` section of a Google Cloud Pub/Sub span sent within an OT span document type GCPPubSubSpanData struct { SpanData Tags GCPPubSubSpanTags `json:"gcps"` } // NewGCPPubSubSpanData initializes a new Google Cloud Pub/Span span data from tracer span func NewGCPPubSubSpanData(span *spanS) GCPPubSubSpanData { data := GCPPubSubSpanData{ SpanData: NewSpanData(span, GCPPubSubSpanType), Tags: NewGCPPubSubSpanTags(span), } return data } // Kind returns the span kind for a Google Cloud Pub/Sub span func (d GCPPubSubSpanData) Kind() SpanKind { switch strings.ToLower(d.Tags.Operation) { case "consume": return EntrySpanKind default: return ExitSpanKind } } // GCPPubSubSpanTags contains fields within the `data.gcps` section of an OT span document type GCPPubSubSpanTags struct { ProjectID string `json:"projid"` Operation string `json:"op"` Topic string `json:"top,omitempty"` Subscription string `json:"sub,omitempty"` MessageID string `json:"msgid,omitempty"` } // NewGCPPubSubSpanTags extracts Google Cloud Pub/Sub span tags from a tracer span func NewGCPPubSubSpanTags(span *spanS) GCPPubSubSpanTags { var tags GCPPubSubSpanTags for k, v := range span.Tags { switch k { case "gcps.projid": readStringTag(&tags.ProjectID, v) case "gcps.op": readStringTag(&tags.Operation, v) case "gcps.top": readStringTag(&tags.Topic, v) case "gcps.sub": readStringTag(&tags.Subscription, v) case "gcps.msgid": readStringTag(&tags.MessageID, v) } } return tags } // AWSLambdaCloudWatchSpanTags contains fields within the `data.lambda.cw` section of an OT span document type AWSLambdaCloudWatchSpanTags struct { Events *AWSLambdaCloudWatchEventTags `json:"events,omitempty"` Logs *AWSLambdaCloudWatchLogsTags `json:"logs,omitempty"` } // NewAWSLambdaCloudWatchSpanTags extracts CloudWatch tags for an AWS Lambda entry span func NewAWSLambdaCloudWatchSpanTags(span *spanS) AWSLambdaCloudWatchSpanTags { var tags AWSLambdaCloudWatchSpanTags if events := NewAWSLambdaCloudWatchEventTags(span); !events.IsZero() { tags.Events = &events } if logs := NewAWSLambdaCloudWatchLogsTags(span); !logs.IsZero() { tags.Logs = &logs } return tags } // IsZero returns true if an AWSLambdaCloudWatchSpanTags struct was populated with event data func (tags AWSLambdaCloudWatchSpanTags) IsZero() bool { return (tags.Events == nil || tags.Events.IsZero()) && (tags.Logs == nil || tags.Logs.IsZero()) } // AWSLambdaCloudWatchEventTags contains fields within the `data.lambda.cw.events` section of an OT span document type AWSLambdaCloudWatchEventTags struct { // ID is the ID of the event ID string `json:"id"` // Resources contains the event resources Resources []string `json:"resources"` // More is set to true if the event resources list was truncated More bool `json:"more,omitempty"` } // NewAWSLambdaCloudWatchEventTags extracts CloudWatch event tags for an AWS Lambda entry span. It truncates // the resources list to the first 3 items, populating the `data.lambda.cw.events.more` tag and limits each // resource string to the first 200 characters to reduce the payload. func NewAWSLambdaCloudWatchEventTags(span *spanS) AWSLambdaCloudWatchEventTags { var tags AWSLambdaCloudWatchEventTags if v, ok := span.Tags["cloudwatch.events.id"]; ok { readStringTag(&tags.ID, v) } if v, ok := span.Tags["cloudwatch.events.resources"]; ok { switch v := v.(type) { case []string: if len(v) > 3 { v = v[:3] tags.More = true } tags.Resources = v case string: tags.Resources = []string{v} case []byte: tags.Resources = []string{string(v)} } } // truncate resources if len(tags.Resources) > 3 { tags.Resources, tags.More = tags.Resources[:3], true } for i := range tags.Resources { if len(tags.Resources[i]) > 200 { tags.Resources[i] = tags.Resources[i][:200] } } return tags } // IsZero returns true if an AWSCloudWatchEventTags struct was populated with event data func (tags AWSLambdaCloudWatchEventTags) IsZero() bool { return tags.ID == "" } // AWSLambdaCloudWatchLogsTags contains fields within the `data.lambda.cw.logs` section of an OT span document type AWSLambdaCloudWatchLogsTags struct { Group string `json:"group"` Stream string `json:"stream"` Events []string `json:"events"` More bool `json:"more,omitempty"` DecodingError string `json:"decodingError,omitempty"` } // NewAWSLambdaCloudWatchLogsTags extracts CloudWatch Logs tags for an AWS Lambda entry span. It truncates // the log events list to the first 3 items, populating the `data.lambda.cw.logs.more` tag and limits each // log string to the first 200 characters to reduce the payload. func NewAWSLambdaCloudWatchLogsTags(span *spanS) AWSLambdaCloudWatchLogsTags { var tags AWSLambdaCloudWatchLogsTags if v, ok := span.Tags["cloudwatch.logs.group"]; ok { readStringTag(&tags.Group, v) } if v, ok := span.Tags["cloudwatch.logs.stream"]; ok { readStringTag(&tags.Stream, v) } if v, ok := span.Tags["cloudwatch.logs.decodingError"]; ok { switch v := v.(type) { case error: tags.DecodingError = v.Error() case string: tags.DecodingError = v } } if v, ok := span.Tags["cloudwatch.logs.events"]; ok { switch v := v.(type) { case []string: if len(v) > 3 { v = v[:3] tags.More = true } tags.Events = v case string: tags.Events = []string{v} case []byte: tags.Events = []string{string(v)} } } // truncate events if len(tags.Events) > 3 { tags.Events, tags.More = tags.Events[:3], true } for i := range tags.Events { if len(tags.Events[i]) > 200 { tags.Events[i] = tags.Events[i][:200] } } return tags } // IsZero returns true if an AWSLambdaCloudWatchLogsTags struct was populated with logs data func (tags AWSLambdaCloudWatchLogsTags) IsZero() bool { return tags.Group == "" && tags.Stream == "" && tags.DecodingError == "" } // AWSS3EventTags represens metadata for an S3 event type AWSS3EventTags struct { Name string `json:"event"` Bucket string `json:"bucket"` Object string `json:"object,omitempty"` } // AWSLambdaS3SpanTags contains fields within the `data.lambda.s3` section of an OT span document type AWSLambdaS3SpanTags struct { Events []AWSS3EventTags `json:"events,omitempty"` } // NewAWSLambdaS3SpanTags extracts S3 Event tags for an AWS Lambda entry span. It truncates // the events list to the first 3 items and limits each object names to the first 200 characters to reduce the payload. func NewAWSLambdaS3SpanTags(span *spanS) AWSLambdaS3SpanTags { var tags AWSLambdaS3SpanTags if events, ok := span.Tags["s3.events"]; ok { events, ok := events.([]AWSS3EventTags) if ok { tags.Events = events } } if len(tags.Events) > 3 { tags.Events = tags.Events[:3] } for i := range tags.Events { if len(tags.Events[i].Object) > 200 { tags.Events[i].Object = tags.Events[i].Object[:200] } } return tags } // IsZero returns true if an AWSLambdaS3SpanTags struct was populated with events data func (tags AWSLambdaS3SpanTags) IsZero() bool { return len(tags.Events) == 0 } // AWSSQSMessageTags represents span tags for an SQS message delivery type AWSSQSMessageTags struct { Queue string `json:"queue"` } // AWSLambdaSQSSpanTags contains fields within the `data.lambda.sqs` section of an OT span document type AWSLambdaSQSSpanTags struct { // Messages are message tags for an SQS event Messages []AWSSQSMessageTags `json:"messages"` } // NewAWSLambdaSQSSpanTags extracts SQS event tags for an AWS Lambda entry span. It truncates // the events list to the first 3 items to reduce the payload. func NewAWSLambdaSQSSpanTags(span *spanS) AWSLambdaSQSSpanTags { var tags AWSLambdaSQSSpanTags if msgs, ok := span.Tags["sqs.messages"]; ok { msgs, ok := msgs.([]AWSSQSMessageTags) if ok { tags.Messages = msgs } } if len(tags.Messages) > 3 { tags.Messages = tags.Messages[:3] } return tags } // IsZero returns true if an AWSLambdaSQSSpanTags struct was populated with messages data func (tags AWSLambdaSQSSpanTags) IsZero() bool { return len(tags.Messages) == 0 } // AWSLambdaSpanTags contains fields within the `data.lambda` section of an OT span document type AWSLambdaSpanTags struct { // ARN is the ARN of invoked AWS Lambda function with the version attached ARN string `json:"arn"` // Runtime is an Instana constant for this AWS lambda runtime (always "go") Runtime string `json:"runtime"` // Name is the name of invoked function Name string `json:"functionName,omitempty"` // Version is either the numeric version or $LATEST Version string `json:"functionVersion,omitempty"` // Trigger is the trigger event type (if any) Trigger string `json:"trigger,omitempty"` // CloudWatch holds the details of a CloudWatch event associated with this lambda CloudWatch *AWSLambdaCloudWatchSpanTags `json:"cw,omitempty"` // S3 holds the details of a S3 events associated with this lambda S3 *AWSLambdaS3SpanTags // SQS holds the details of a SQS events associated with this lambda SQS *AWSLambdaSQSSpanTags } // NewAWSLambdaSpanTags extracts AWS Lambda entry span tags from a tracer span func NewAWSLambdaSpanTags(span *spanS) AWSLambdaSpanTags { tags := AWSLambdaSpanTags{Runtime: "go"} if v, ok := span.Tags["lambda.arn"]; ok { readStringTag(&tags.ARN, v) } if v, ok := span.Tags["lambda.name"]; ok { readStringTag(&tags.Name, v) } if v, ok := span.Tags["lambda.version"]; ok { readStringTag(&tags.Version, v) } if v, ok := span.Tags["lambda.trigger"]; ok { readStringTag(&tags.Trigger, v) } if cw := NewAWSLambdaCloudWatchSpanTags(span); !cw.IsZero() { tags.CloudWatch = &cw } if st := NewAWSLambdaS3SpanTags(span); !st.IsZero() { tags.S3 = &st } if sqs := NewAWSLambdaSQSSpanTags(span); !sqs.IsZero() { tags.SQS = &sqs } return tags } // AWSLambdaSpanData is the base span data type for AWS Lambda entry spans type AWSLambdaSpanData struct { Snapshot AWSLambdaSpanTags `json:"lambda"` HTTP *HTTPSpanTags `json:"http,omitempty"` } // NewAWSLambdaSpanData initializes a new AWSLambdaSpanData from span func NewAWSLambdaSpanData(span *spanS) AWSLambdaSpanData { d := AWSLambdaSpanData{ Snapshot: NewAWSLambdaSpanTags(span), } switch span.Tags["lambda.trigger"] { case "aws:api.gateway", "aws:application.load.balancer": tags := NewHTTPSpanTags(span) d.HTTP = &tags } return d } // Type returns the span type for an AWS Lambda span func (d AWSLambdaSpanData) Type() RegisteredSpanType { return AWSLambdaEntrySpanType } // Kind returns the span kind for an AWS Lambda span func (d AWSLambdaSpanData) Kind() SpanKind { return EntrySpanKind } // readStringTag populates the &dst with the tag value if it's of either string or []byte type func readStringTag(dst *string, tag interface{}) { switch s := tag.(type) { case string: *dst = s case []byte: *dst = string(s) } } // readIntTag populates the &dst with the tag value if it's of any kind of integer type func readIntTag(dst *int, tag interface{}) { switch n := tag.(type) { case int: *dst = n case int8: *dst = int(n) case int16: *dst = int(n) case int32: *dst = int(n) case int64: *dst = int(n) case uint: *dst = int(n) case uint8: *dst = int(n) case uint16: *dst = int(n) case uint32: *dst = int(n) case uint64: *dst = int(n) } } func collectTracerSpanLogs(span *spanS) map[uint64]map[string]interface{} { logs := make(map[uint64]map[string]interface{}) for _, l := range span.Logs { if _, ok := logs[uint64(l.Timestamp.UnixNano())/uint64(time.Millisecond)]; !ok { logs[uint64(l.Timestamp.UnixNano())/uint64(time.Millisecond)] = make(map[string]interface{}) } for _, f := range l.Fields { logs[uint64(l.Timestamp.UnixNano())/uint64(time.Millisecond)][f.Key()] = f.Value() } } return logs } Send parent and span IDs as strings to the host agent package instana import ( "encoding/json" "strings" "time" "github.com/instana/go-sensor/w3ctrace" "github.com/opentracing/opentracing-go/ext" ) type typedSpanData interface { Type() RegisteredSpanType Kind() SpanKind } // Registered types supported by Instana. The span type is determined based on // the operation name passed to the `StartSpan()` call of a tracer. // // It is NOT RECOMMENDED to use operation names that match any of these constants in your // custom instrumentation code unless you explicitly wish to send data as a registered span. // The conversion will result in loss of custom tags that are not supported for this span type. // The list of supported tags can be found in the godoc of the respective span tags type below. const ( // SDK span, a generic span containing arbitrary data. Spans with operation name // not listed in the subsequent list will be sent as an SDK spans forwarding all // attached tags to the agent SDKSpanType = RegisteredSpanType("sdk") // HTTP server and client spans HTTPServerSpanType = RegisteredSpanType("g.http") HTTPClientSpanType = RegisteredSpanType("http") // RPC server and client spans RPCServerSpanType = RegisteredSpanType("rpc-server") RPCClientSpanType = RegisteredSpanType("rpc-client") // Kafka consumer/producer span KafkaSpanType = RegisteredSpanType("kafka") // Google Cloud Storage client span GCPStorageSpanType = RegisteredSpanType("gcs") // Google Cloud PubSub client span GCPPubSubSpanType = RegisteredSpanType("gcps") // AWS Lambda entry span AWSLambdaEntrySpanType = RegisteredSpanType("aws.lambda.entry") ) // RegisteredSpanType represents the span type supported by Instana type RegisteredSpanType string // ExtractData is a factory method to create the `data` section for a typed span func (st RegisteredSpanType) ExtractData(span *spanS) typedSpanData { switch st { case HTTPServerSpanType, HTTPClientSpanType: return NewHTTPSpanData(span) case RPCServerSpanType, RPCClientSpanType: return NewRPCSpanData(span) case KafkaSpanType: return NewKafkaSpanData(span) case GCPStorageSpanType: return NewGCPStorageSpanData(span) case GCPPubSubSpanType: return NewGCPPubSubSpanData(span) case AWSLambdaEntrySpanType: return NewAWSLambdaSpanData(span) default: return NewSDKSpanData(span) } } // SpanKind represents values of field `k` in OpenTracing span representation. It represents // the direction of the call associated with a span. type SpanKind uint8 // Valid span kinds const ( // The kind of a span associated with an inbound call, this must be the first span in the trace. EntrySpanKind SpanKind = iota + 1 // The kind of a span associated with an outbound call, e.g. an HTTP client request, posting to a message bus, etc. ExitSpanKind // The default kind for a span that is associated with a call within the same service. IntermediateSpanKind ) // String returns string representation of a span kind suitable for use as a value for `data.sdk.type` // tag of an SDK span. By default all spans are intermediate unless they are explicitly set to be "entry" or "exit" func (k SpanKind) String() string { switch k { case EntrySpanKind: return "entry" case ExitSpanKind: return "exit" default: return "intermediate" } } // ForeignParent represents a related 3rd-party trace context, e.g. a W3C Trace Context type ForeignParent struct { TraceID string `json:"t"` ParentID string `json:"p"` LatestTraceState string `json:"lts,omitempty"` } func newForeignParent(p interface{}) *ForeignParent { switch p := p.(type) { case w3ctrace.Context: return newW3CForeignParent(p) default: return nil } } func newW3CForeignParent(trCtx w3ctrace.Context) *ForeignParent { p, s := trCtx.Parent(), trCtx.State() var lastVendorData string if len(s) > 0 { lastVendorData = s[0] } return &ForeignParent{ TraceID: p.TraceID, ParentID: p.ParentID, LatestTraceState: lastVendorData, } } // Span represents the OpenTracing span document to be sent to the agent type Span struct { TraceID int64 TraceIDHi int64 ParentID int64 SpanID int64 Timestamp uint64 Duration uint64 Name string From *fromS Batch *batchInfo Kind int Ec int Data typedSpanData Synthetic bool ForeignParent *ForeignParent CorrelationType string CorrelationID string } func newSpan(span *spanS) Span { data := RegisteredSpanType(span.Operation).ExtractData(span) sp := Span{ TraceID: span.context.TraceID, TraceIDHi: span.context.TraceIDHi, ParentID: span.context.ParentID, SpanID: span.context.SpanID, Timestamp: uint64(span.Start.UnixNano()) / uint64(time.Millisecond), Duration: uint64(span.Duration) / uint64(time.Millisecond), Name: string(data.Type()), Ec: span.ErrorCount, ForeignParent: newForeignParent(span.context.ForeignParent), CorrelationType: span.Correlation.Type, CorrelationID: span.Correlation.ID, Kind: int(data.Kind()), Data: data, } if bs, ok := span.Tags[batchSizeTag].(int); ok { if bs > 1 { sp.Batch = &batchInfo{Size: bs} } delete(span.Tags, batchSizeTag) } if syn, ok := span.Tags[syntheticCallTag].(bool); ok { sp.Synthetic = syn delete(span.Tags, syntheticCallTag) } return sp } // MarshalJSON serializes span to JSON for sending it to Instana func (sp Span) MarshalJSON() ([]byte, error) { var parentID string if sp.ParentID != 0 { parentID = FormatID(sp.ParentID) } return json.Marshal(struct { TraceID string `json:"t"` ParentID string `json:"p,omitempty"` SpanID string `json:"s"` Timestamp uint64 `json:"ts"` Duration uint64 `json:"d"` Name string `json:"n"` From *fromS `json:"f"` Batch *batchInfo `json:"b,omitempty"` Kind int `json:"k"` Ec int `json:"ec,omitempty"` Data typedSpanData `json:"data"` Synthetic bool `json:"sy,omitempty"` ForeignParent *ForeignParent `json:"fp,omitempty"` CorrelationType string `json:"crtp,omitempty"` CorrelationID string `json:"crid,omitempty"` }{ FormatLongID(sp.TraceIDHi, sp.TraceID), parentID, FormatID(sp.SpanID), sp.Timestamp, sp.Duration, sp.Name, sp.From, sp.Batch, sp.Kind, sp.Ec, sp.Data, sp.Synthetic, sp.ForeignParent, sp.CorrelationType, sp.CorrelationID, }) } type batchInfo struct { Size int `json:"s"` } // SpanData contains fields to be sent in the `data` section of an OT span document. These fields are // common for all span types. type SpanData struct { Service string `json:"service,omitempty"` st RegisteredSpanType sk interface{} } // NewSpanData initializes a new span data from tracer span func NewSpanData(span *spanS, st RegisteredSpanType) SpanData { return SpanData{ Service: span.Service, st: st, sk: span.Tags[string(ext.SpanKind)], } } // Type returns the registered span type suitable for use as the value of `n` field. func (d SpanData) Type() RegisteredSpanType { return d.st } // Kind returns the kind of the span. It handles the github.com/opentracing/opentracing-go/ext.SpanKindEnum // values as well as generic "entry" and "exit" func (d SpanData) Kind() SpanKind { switch d.sk { case ext.SpanKindRPCServerEnum, string(ext.SpanKindRPCServerEnum), ext.SpanKindConsumerEnum, string(ext.SpanKindConsumerEnum), "entry": return EntrySpanKind case ext.SpanKindRPCClientEnum, string(ext.SpanKindRPCClientEnum), ext.SpanKindProducerEnum, string(ext.SpanKindProducerEnum), "exit": return ExitSpanKind default: return IntermediateSpanKind } } // SDKSpanData represents the `data` section of an SDK span sent within an OT span document type SDKSpanData struct { SpanData Tags SDKSpanTags `json:"sdk"` } // NewSDKSpanData initializes a new SDK span data from a tracer span func NewSDKSpanData(span *spanS) SDKSpanData { d := NewSpanData(span, SDKSpanType) return SDKSpanData{ SpanData: d, Tags: NewSDKSpanTags(span, d.Kind().String()), } } // SDKSpanTags contains fields within the `data.sdk` section of an OT span document type SDKSpanTags struct { Name string `json:"name"` Type string `json:"type,omitempty"` Arguments string `json:"arguments,omitempty"` Return string `json:"return,omitempty"` Custom map[string]interface{} `json:"custom,omitempty"` } // NewSDKSpanTags extracts SDK span tags from a tracer span func NewSDKSpanTags(span *spanS, spanType string) SDKSpanTags { tags := SDKSpanTags{ Name: span.Operation, Type: spanType, Custom: map[string]interface{}{}, } if len(span.Tags) != 0 { tags.Custom["tags"] = span.Tags } if logs := collectTracerSpanLogs(span); len(logs) > 0 { tags.Custom["logs"] = logs } if len(span.context.Baggage) != 0 { tags.Custom["baggage"] = span.context.Baggage } return tags } // HTTPSpanData represents the `data` section of an HTTP span sent within an OT span document type HTTPSpanData struct { SpanData Tags HTTPSpanTags `json:"http"` } // NewHTTPSpanData initializes a new HTTP span data from tracer span func NewHTTPSpanData(span *spanS) HTTPSpanData { data := HTTPSpanData{ SpanData: NewSpanData(span, RegisteredSpanType(span.Operation)), Tags: NewHTTPSpanTags(span), } return data } // HTTPSpanTags contains fields within the `data.http` section of an OT span document type HTTPSpanTags struct { // Full request/response URL URL string `json:"url,omitempty"` // The HTTP status code returned with client/server response Status int `json:"status,omitempty"` // The HTTP method of the request Method string `json:"method,omitempty"` // Path is the path part of the request URL Path string `json:"path,omitempty"` // Params are the request query string parameters Params string `json:"params,omitempty"` // Headers are the captured request/response headers Headers map[string]string `json:"header,omitempty"` // PathTemplate is the raw template string used to route the request PathTemplate string `json:"path_tpl,omitempty"` // The name:port of the host to which the request had been sent Host string `json:"host,omitempty"` // The name of the protocol used for request ("http" or "https") Protocol string `json:"protocol,omitempty"` // The message describing an error occurred during the request handling Error string `json:"error,omitempty"` } // NewHTTPSpanTags extracts HTTP-specific span tags from a tracer span func NewHTTPSpanTags(span *spanS) HTTPSpanTags { var tags HTTPSpanTags for k, v := range span.Tags { switch k { case "http.url", string(ext.HTTPUrl): readStringTag(&tags.URL, v) case "http.status", "http.status_code": readIntTag(&tags.Status, v) case "http.method", string(ext.HTTPMethod): readStringTag(&tags.Method, v) case "http.path": readStringTag(&tags.Path, v) case "http.params": readStringTag(&tags.Params, v) case "http.header": if m, ok := v.(map[string]string); ok { tags.Headers = m } case "http.path_tpl": readStringTag(&tags.PathTemplate, v) case "http.host": readStringTag(&tags.Host, v) case "http.protocol": readStringTag(&tags.Protocol, v) case "http.error": readStringTag(&tags.Error, v) } } return tags } // RPCSpanData represents the `data` section of an RPC span sent within an OT span document type RPCSpanData struct { SpanData Tags RPCSpanTags `json:"rpc"` } // NewRPCSpanData initializes a new RPC span data from tracer span func NewRPCSpanData(span *spanS) RPCSpanData { data := RPCSpanData{ SpanData: NewSpanData(span, RegisteredSpanType(span.Operation)), Tags: NewRPCSpanTags(span), } return data } // RPCSpanTags contains fields within the `data.rpc` section of an OT span document type RPCSpanTags struct { // The name of the remote host for an RPC call Host string `json:"host,omitempty"` // The port of the remote host for an RPC call Port string `json:"port,omitempty"` // The name of the remote method to invoke Call string `json:"call,omitempty"` // The type of an RPC call, e.g. either "unary" or "stream" for GRPC requests CallType string `json:"call_type,omitempty"` // The RPC flavor used for this call, e.g. "grpc" for GRPC requests Flavor string `json:"flavor,omitempty"` // The message describing an error occurred during the request handling Error string `json:"error,omitempty"` } // NewRPCSpanTags extracts RPC-specific span tags from a tracer span func NewRPCSpanTags(span *spanS) RPCSpanTags { var tags RPCSpanTags for k, v := range span.Tags { switch k { case "rpc.host": readStringTag(&tags.Host, v) case "rpc.port": readStringTag(&tags.Port, v) case "rpc.call": readStringTag(&tags.Call, v) case "rpc.call_type": readStringTag(&tags.CallType, v) case "rpc.flavor": readStringTag(&tags.Flavor, v) case "rpc.error": readStringTag(&tags.Error, v) } } return tags } // KafkaSpanData represents the `data` section of an Kafka span sent within an OT span document type KafkaSpanData struct { SpanData Tags KafkaSpanTags `json:"kafka"` } // NewKafkaSpanData initializes a new Kafka span data from tracer span func NewKafkaSpanData(span *spanS) KafkaSpanData { data := KafkaSpanData{ SpanData: NewSpanData(span, RegisteredSpanType(span.Operation)), Tags: NewKafkaSpanTags(span), } return data } // KafkaSpanTags contains fields within the `data.kafka` section of an OT span document type KafkaSpanTags struct { // Kafka topic Service string `json:"service"` // The access mode:, either "send" for publisher or "consume" for consumer Access string `json:"access"` } // NewKafkaSpanTags extracts Kafka-specific span tags from a tracer span func NewKafkaSpanTags(span *spanS) KafkaSpanTags { var tags KafkaSpanTags for k, v := range span.Tags { switch k { case "kafka.service": readStringTag(&tags.Service, v) case "kafka.access": readStringTag(&tags.Access, v) } } return tags } // GCPStorageSpanData represents the `data` section of a Google Cloud Storage span sent within an OT span document type GCPStorageSpanData struct { SpanData Tags GCPStorageSpanTags `json:"gcs"` } // NewGCPStorageSpanData initializes a new Google Cloud Storage span data from tracer span func NewGCPStorageSpanData(span *spanS) GCPStorageSpanData { data := GCPStorageSpanData{ SpanData: NewSpanData(span, GCPStorageSpanType), Tags: NewGCPStorageSpanTags(span), } return data } // Kind returns the span kind for a Google Cloud Storage span func (d GCPStorageSpanData) Kind() SpanKind { return ExitSpanKind } // GCPStorageSpanTags contains fields within the `data.gcs` section of an OT span document type GCPStorageSpanTags struct { Operation string `json:"op,omitempty"` Bucket string `json:"bucket,omitempty"` Object string `json:"object,omitempty"` Entity string `json:"entity,omitempty"` Range string `json:"range,omitempty"` SourceBucket string `json:"sourceBucket,omitempty"` SourceObject string `json:"sourceObject,omitempty"` DestinationBucket string `json:"destinationBucket,omitempty"` DestinationObject string `json:"destinationObject,omitempty"` NumberOfOperations string `json:"numberOfOperations,omitempty"` ProjectID string `json:"projectId,omitempty"` AccessID string `json:"accessId,omitempty"` } // NewGCPStorageSpanTags extracts Google Cloud Storage span tags from a tracer span func NewGCPStorageSpanTags(span *spanS) GCPStorageSpanTags { var tags GCPStorageSpanTags for k, v := range span.Tags { switch k { case "gcs.op": readStringTag(&tags.Operation, v) case "gcs.bucket": readStringTag(&tags.Bucket, v) case "gcs.object": readStringTag(&tags.Object, v) case "gcs.entity": readStringTag(&tags.Entity, v) case "gcs.range": readStringTag(&tags.Range, v) case "gcs.sourceBucket": readStringTag(&tags.SourceBucket, v) case "gcs.sourceObject": readStringTag(&tags.SourceObject, v) case "gcs.destinationBucket": readStringTag(&tags.DestinationBucket, v) case "gcs.destinationObject": readStringTag(&tags.DestinationObject, v) case "gcs.numberOfOperations": readStringTag(&tags.NumberOfOperations, v) case "gcs.projectId": readStringTag(&tags.ProjectID, v) case "gcs.accessId": readStringTag(&tags.AccessID, v) } } return tags } // GCPPubSubSpanData represents the `data` section of a Google Cloud Pub/Sub span sent within an OT span document type GCPPubSubSpanData struct { SpanData Tags GCPPubSubSpanTags `json:"gcps"` } // NewGCPPubSubSpanData initializes a new Google Cloud Pub/Span span data from tracer span func NewGCPPubSubSpanData(span *spanS) GCPPubSubSpanData { data := GCPPubSubSpanData{ SpanData: NewSpanData(span, GCPPubSubSpanType), Tags: NewGCPPubSubSpanTags(span), } return data } // Kind returns the span kind for a Google Cloud Pub/Sub span func (d GCPPubSubSpanData) Kind() SpanKind { switch strings.ToLower(d.Tags.Operation) { case "consume": return EntrySpanKind default: return ExitSpanKind } } // GCPPubSubSpanTags contains fields within the `data.gcps` section of an OT span document type GCPPubSubSpanTags struct { ProjectID string `json:"projid"` Operation string `json:"op"` Topic string `json:"top,omitempty"` Subscription string `json:"sub,omitempty"` MessageID string `json:"msgid,omitempty"` } // NewGCPPubSubSpanTags extracts Google Cloud Pub/Sub span tags from a tracer span func NewGCPPubSubSpanTags(span *spanS) GCPPubSubSpanTags { var tags GCPPubSubSpanTags for k, v := range span.Tags { switch k { case "gcps.projid": readStringTag(&tags.ProjectID, v) case "gcps.op": readStringTag(&tags.Operation, v) case "gcps.top": readStringTag(&tags.Topic, v) case "gcps.sub": readStringTag(&tags.Subscription, v) case "gcps.msgid": readStringTag(&tags.MessageID, v) } } return tags } // AWSLambdaCloudWatchSpanTags contains fields within the `data.lambda.cw` section of an OT span document type AWSLambdaCloudWatchSpanTags struct { Events *AWSLambdaCloudWatchEventTags `json:"events,omitempty"` Logs *AWSLambdaCloudWatchLogsTags `json:"logs,omitempty"` } // NewAWSLambdaCloudWatchSpanTags extracts CloudWatch tags for an AWS Lambda entry span func NewAWSLambdaCloudWatchSpanTags(span *spanS) AWSLambdaCloudWatchSpanTags { var tags AWSLambdaCloudWatchSpanTags if events := NewAWSLambdaCloudWatchEventTags(span); !events.IsZero() { tags.Events = &events } if logs := NewAWSLambdaCloudWatchLogsTags(span); !logs.IsZero() { tags.Logs = &logs } return tags } // IsZero returns true if an AWSLambdaCloudWatchSpanTags struct was populated with event data func (tags AWSLambdaCloudWatchSpanTags) IsZero() bool { return (tags.Events == nil || tags.Events.IsZero()) && (tags.Logs == nil || tags.Logs.IsZero()) } // AWSLambdaCloudWatchEventTags contains fields within the `data.lambda.cw.events` section of an OT span document type AWSLambdaCloudWatchEventTags struct { // ID is the ID of the event ID string `json:"id"` // Resources contains the event resources Resources []string `json:"resources"` // More is set to true if the event resources list was truncated More bool `json:"more,omitempty"` } // NewAWSLambdaCloudWatchEventTags extracts CloudWatch event tags for an AWS Lambda entry span. It truncates // the resources list to the first 3 items, populating the `data.lambda.cw.events.more` tag and limits each // resource string to the first 200 characters to reduce the payload. func NewAWSLambdaCloudWatchEventTags(span *spanS) AWSLambdaCloudWatchEventTags { var tags AWSLambdaCloudWatchEventTags if v, ok := span.Tags["cloudwatch.events.id"]; ok { readStringTag(&tags.ID, v) } if v, ok := span.Tags["cloudwatch.events.resources"]; ok { switch v := v.(type) { case []string: if len(v) > 3 { v = v[:3] tags.More = true } tags.Resources = v case string: tags.Resources = []string{v} case []byte: tags.Resources = []string{string(v)} } } // truncate resources if len(tags.Resources) > 3 { tags.Resources, tags.More = tags.Resources[:3], true } for i := range tags.Resources { if len(tags.Resources[i]) > 200 { tags.Resources[i] = tags.Resources[i][:200] } } return tags } // IsZero returns true if an AWSCloudWatchEventTags struct was populated with event data func (tags AWSLambdaCloudWatchEventTags) IsZero() bool { return tags.ID == "" } // AWSLambdaCloudWatchLogsTags contains fields within the `data.lambda.cw.logs` section of an OT span document type AWSLambdaCloudWatchLogsTags struct { Group string `json:"group"` Stream string `json:"stream"` Events []string `json:"events"` More bool `json:"more,omitempty"` DecodingError string `json:"decodingError,omitempty"` } // NewAWSLambdaCloudWatchLogsTags extracts CloudWatch Logs tags for an AWS Lambda entry span. It truncates // the log events list to the first 3 items, populating the `data.lambda.cw.logs.more` tag and limits each // log string to the first 200 characters to reduce the payload. func NewAWSLambdaCloudWatchLogsTags(span *spanS) AWSLambdaCloudWatchLogsTags { var tags AWSLambdaCloudWatchLogsTags if v, ok := span.Tags["cloudwatch.logs.group"]; ok { readStringTag(&tags.Group, v) } if v, ok := span.Tags["cloudwatch.logs.stream"]; ok { readStringTag(&tags.Stream, v) } if v, ok := span.Tags["cloudwatch.logs.decodingError"]; ok { switch v := v.(type) { case error: tags.DecodingError = v.Error() case string: tags.DecodingError = v } } if v, ok := span.Tags["cloudwatch.logs.events"]; ok { switch v := v.(type) { case []string: if len(v) > 3 { v = v[:3] tags.More = true } tags.Events = v case string: tags.Events = []string{v} case []byte: tags.Events = []string{string(v)} } } // truncate events if len(tags.Events) > 3 { tags.Events, tags.More = tags.Events[:3], true } for i := range tags.Events { if len(tags.Events[i]) > 200 { tags.Events[i] = tags.Events[i][:200] } } return tags } // IsZero returns true if an AWSLambdaCloudWatchLogsTags struct was populated with logs data func (tags AWSLambdaCloudWatchLogsTags) IsZero() bool { return tags.Group == "" && tags.Stream == "" && tags.DecodingError == "" } // AWSS3EventTags represens metadata for an S3 event type AWSS3EventTags struct { Name string `json:"event"` Bucket string `json:"bucket"` Object string `json:"object,omitempty"` } // AWSLambdaS3SpanTags contains fields within the `data.lambda.s3` section of an OT span document type AWSLambdaS3SpanTags struct { Events []AWSS3EventTags `json:"events,omitempty"` } // NewAWSLambdaS3SpanTags extracts S3 Event tags for an AWS Lambda entry span. It truncates // the events list to the first 3 items and limits each object names to the first 200 characters to reduce the payload. func NewAWSLambdaS3SpanTags(span *spanS) AWSLambdaS3SpanTags { var tags AWSLambdaS3SpanTags if events, ok := span.Tags["s3.events"]; ok { events, ok := events.([]AWSS3EventTags) if ok { tags.Events = events } } if len(tags.Events) > 3 { tags.Events = tags.Events[:3] } for i := range tags.Events { if len(tags.Events[i].Object) > 200 { tags.Events[i].Object = tags.Events[i].Object[:200] } } return tags } // IsZero returns true if an AWSLambdaS3SpanTags struct was populated with events data func (tags AWSLambdaS3SpanTags) IsZero() bool { return len(tags.Events) == 0 } // AWSSQSMessageTags represents span tags for an SQS message delivery type AWSSQSMessageTags struct { Queue string `json:"queue"` } // AWSLambdaSQSSpanTags contains fields within the `data.lambda.sqs` section of an OT span document type AWSLambdaSQSSpanTags struct { // Messages are message tags for an SQS event Messages []AWSSQSMessageTags `json:"messages"` } // NewAWSLambdaSQSSpanTags extracts SQS event tags for an AWS Lambda entry span. It truncates // the events list to the first 3 items to reduce the payload. func NewAWSLambdaSQSSpanTags(span *spanS) AWSLambdaSQSSpanTags { var tags AWSLambdaSQSSpanTags if msgs, ok := span.Tags["sqs.messages"]; ok { msgs, ok := msgs.([]AWSSQSMessageTags) if ok { tags.Messages = msgs } } if len(tags.Messages) > 3 { tags.Messages = tags.Messages[:3] } return tags } // IsZero returns true if an AWSLambdaSQSSpanTags struct was populated with messages data func (tags AWSLambdaSQSSpanTags) IsZero() bool { return len(tags.Messages) == 0 } // AWSLambdaSpanTags contains fields within the `data.lambda` section of an OT span document type AWSLambdaSpanTags struct { // ARN is the ARN of invoked AWS Lambda function with the version attached ARN string `json:"arn"` // Runtime is an Instana constant for this AWS lambda runtime (always "go") Runtime string `json:"runtime"` // Name is the name of invoked function Name string `json:"functionName,omitempty"` // Version is either the numeric version or $LATEST Version string `json:"functionVersion,omitempty"` // Trigger is the trigger event type (if any) Trigger string `json:"trigger,omitempty"` // CloudWatch holds the details of a CloudWatch event associated with this lambda CloudWatch *AWSLambdaCloudWatchSpanTags `json:"cw,omitempty"` // S3 holds the details of a S3 events associated with this lambda S3 *AWSLambdaS3SpanTags // SQS holds the details of a SQS events associated with this lambda SQS *AWSLambdaSQSSpanTags } // NewAWSLambdaSpanTags extracts AWS Lambda entry span tags from a tracer span func NewAWSLambdaSpanTags(span *spanS) AWSLambdaSpanTags { tags := AWSLambdaSpanTags{Runtime: "go"} if v, ok := span.Tags["lambda.arn"]; ok { readStringTag(&tags.ARN, v) } if v, ok := span.Tags["lambda.name"]; ok { readStringTag(&tags.Name, v) } if v, ok := span.Tags["lambda.version"]; ok { readStringTag(&tags.Version, v) } if v, ok := span.Tags["lambda.trigger"]; ok { readStringTag(&tags.Trigger, v) } if cw := NewAWSLambdaCloudWatchSpanTags(span); !cw.IsZero() { tags.CloudWatch = &cw } if st := NewAWSLambdaS3SpanTags(span); !st.IsZero() { tags.S3 = &st } if sqs := NewAWSLambdaSQSSpanTags(span); !sqs.IsZero() { tags.SQS = &sqs } return tags } // AWSLambdaSpanData is the base span data type for AWS Lambda entry spans type AWSLambdaSpanData struct { Snapshot AWSLambdaSpanTags `json:"lambda"` HTTP *HTTPSpanTags `json:"http,omitempty"` } // NewAWSLambdaSpanData initializes a new AWSLambdaSpanData from span func NewAWSLambdaSpanData(span *spanS) AWSLambdaSpanData { d := AWSLambdaSpanData{ Snapshot: NewAWSLambdaSpanTags(span), } switch span.Tags["lambda.trigger"] { case "aws:api.gateway", "aws:application.load.balancer": tags := NewHTTPSpanTags(span) d.HTTP = &tags } return d } // Type returns the span type for an AWS Lambda span func (d AWSLambdaSpanData) Type() RegisteredSpanType { return AWSLambdaEntrySpanType } // Kind returns the span kind for an AWS Lambda span func (d AWSLambdaSpanData) Kind() SpanKind { return EntrySpanKind } // readStringTag populates the &dst with the tag value if it's of either string or []byte type func readStringTag(dst *string, tag interface{}) { switch s := tag.(type) { case string: *dst = s case []byte: *dst = string(s) } } // readIntTag populates the &dst with the tag value if it's of any kind of integer type func readIntTag(dst *int, tag interface{}) { switch n := tag.(type) { case int: *dst = n case int8: *dst = int(n) case int16: *dst = int(n) case int32: *dst = int(n) case int64: *dst = int(n) case uint: *dst = int(n) case uint8: *dst = int(n) case uint16: *dst = int(n) case uint32: *dst = int(n) case uint64: *dst = int(n) } } func collectTracerSpanLogs(span *spanS) map[uint64]map[string]interface{} { logs := make(map[uint64]map[string]interface{}) for _, l := range span.Logs { if _, ok := logs[uint64(l.Timestamp.UnixNano())/uint64(time.Millisecond)]; !ok { logs[uint64(l.Timestamp.UnixNano())/uint64(time.Millisecond)] = make(map[string]interface{}) } for _, f := range l.Fields { logs[uint64(l.Timestamp.UnixNano())/uint64(time.Millisecond)][f.Key()] = f.Value() } } return logs }
package main import ( "encoding/json" "fmt" ) type person struct { First string Last string } func main() { p1 := person{ First: "a", Last: "z", } p2 := person{ First: "b", Last: "y", } xp := []person{p1, p2} fmt.Println("go data", xp) bs, err := json.Marshal(xp) if err != nil { fmt.Println(err) } else { fmt.Println("json", string(bs)) } j := `[{"First":"a","Last":"z"},{"First":"b","Last":"y"}]` fmt.Println("json", j) xp1 := []person{} errs := json.Unmarshal([]byte(j), &xp1) if errs != nil { fmt.Println(errs) } else { fmt.Printf("\n%+v\n", xp1) // + also print the field names of the struct } } JSON example package main import ( "encoding/json" "fmt" ) type person struct { First string `json:"first"` Last string } func main() { p1 := person{ First: "a", Last: "z", } p2 := person{ First: "b", Last: "y", } xp := []person{p1, p2} fmt.Println("go data", xp) bs, err := json.Marshal(xp) if err != nil { fmt.Println(err) } else { fmt.Println("json", string(bs)) } fmt.Printf("---------------\n"); j := `[{"first":"a","Last":"z"},{"First":"b","Last":"y"}]` fmt.Println("json", j) xp1 := []person{} errs := json.Unmarshal([]byte(j), &xp1) if errs != nil { fmt.Println(errs) } else { fmt.Printf("%+v\n", xp1) // + also print the field names of the struct } }
package amino_test import ( "bytes" "encoding/json" "os" "reflect" "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/tendermint/go-amino" ) func TestMain(m *testing.M) { // Register the concrete types first. var cdc = amino.NewCodec() cdc.RegisterConcrete(&Transport{}, "our/transport", nil) cdc.RegisterInterface((*Vehicle)(nil), &amino.InterfaceOptions{AlwaysDisambiguate: true}) cdc.RegisterInterface((*Asset)(nil), &amino.InterfaceOptions{AlwaysDisambiguate: true}) cdc.RegisterConcrete(Car(""), "car", nil) cdc.RegisterConcrete(insurancePlan(0), "insuranceplan", nil) cdc.RegisterConcrete(Boat(""), "boat", nil) cdc.RegisterConcrete(Plane{}, "plane", nil) os.Exit(m.Run()) } func TestMarshalJSON(t *testing.T) { t.Parallel() var cdc = amino.NewCodec() cases := []struct { in interface{} want string wantErr string }{ {&noFields{}, "{}", ""}, {&noExportedFields{a: 10, b: "foo"}, "{}", ""}, {nil, "null", ""}, {&oneExportedField{}, `{"A":""}`, ""}, {Vehicle(Car("Tesla")), `{"type":"2B2961A431B238","value":"Tesla"}`, ""}, {Car("Tesla"), `{"type":"2B2961A431B238","value":"Tesla"}`, ""}, {&oneExportedField{A: "Z"}, `{"A":"Z"}`, ""}, {[]string{"a", "bc"}, `["a","bc"]`, ""}, {[]interface{}{"a", "bc", 10, 10.93, 1e3}, ``, "Unregistered"}, {aPointerField{Foo: new(int), Name: "name"}, `{"Foo":0,"nm":"name"}`, ""}, { aPointerFieldAndEmbeddedField{intPtr(11), "ap", nil, &oneExportedField{A: "foo"}}, `{"Foo":11,"nm":"ap","bz":{"A":"foo"}}`, "", }, { doublyEmbedded{ Inner: &aPointerFieldAndEmbeddedField{ intPtr(11), "ap", nil, &oneExportedField{A: "foo"}, }, }, `{"Inner":{"Foo":11,"nm":"ap","bz":{"A":"foo"}},"year":0}`, "", }, { struct{}{}, `{}`, "", }, { struct{ A int }{A: 10}, `{"A":10}`, "", }, { Transport{}, `{"type":"AEB127E121A6B0","value":{"Vehicle":null,"Capacity":0}}`, "", }, { Transport{Vehicle: Car("Bugatti")}, `{"type":"AEB127E121A6B0","value":{"Vehicle":{"type":"2B2961A431B238","value":"Bugatti"},"Capacity":0}}`, "", }, { BalanceSheet{Assets: []Asset{Car("Corolla"), insurancePlan(1e7)}}, `{"assets":[{"type":"2B2961A431B238","value":"Corolla"},{"type":"7DF0BC76182A18","value":10000000}]}`, "", }, { Transport{Vehicle: Boat("Poseidon"), Capacity: 1789}, `{"type":"AEB127E121A6B0","value":{"Vehicle":{"type":"25CDB46D8D2110","value":"Poseidon"},"Capacity":1789}}`, "", }, { withCustomMarshaler{A: &aPointerField{Foo: intPtr(12)}, F: customJSONMarshaler(10)}, `{"fx":"Tendermint","A":{"Foo":12}}`, "", }, { func() json.Marshaler { v := customJSONMarshaler(10); return &v }(), `"Tendermint"`, "", }, // We don't yet support interface pointer registration i.e. `*interface{}` {interfacePtr("a"), "", "Unregistered interface interface {}"}, {&fp{"Foo", 10}, "<FP-MARSHALJSON>", ""}, {(*fp)(nil), "null", ""}, {struct { FP *fp Package string }{FP: &fp{"Foo", 10}, Package: "bytes"}, `{"FP":<FP-MARSHALJSON>,"Package":"bytes"}`, ""}, } for i, tt := range cases { t.Logf("Trying case #%v", i) blob, err := cdc.MarshalJSON(tt.in) if tt.wantErr != "" { if err == nil || !strings.Contains(err.Error(), tt.wantErr) { t.Errorf("#%d:\ngot:\n\t%q\nwant non-nil error containing\n\t%q", i, err, tt.wantErr) } continue } if err != nil { t.Errorf("#%d: unexpected error: %v\nblob: %v", i, err, tt.in) continue } if g, w := string(blob), tt.want; g != w { t.Errorf("#%d:\ngot:\n\t%s\nwant:\n\t%s", i, g, w) } } } func TestMarshalJSONWithMonotonicTime(t *testing.T) { var cdc = amino.NewCodec() type SimpleStruct struct { String string Bytes []byte Time time.Time } s := SimpleStruct{ String: "hello", Bytes: []byte("goodbye"), Time: time.Now().UTC().Truncate(time.Millisecond), // strip monotonic and timezone. } b, err := cdc.MarshalJSON(s) assert.Nil(t, err) var s2 SimpleStruct err = cdc.UnmarshalJSON(b, &s2) assert.Nil(t, err) assert.Equal(t, s, s2) } type fp struct { Name string Version int } func (f *fp) MarshalJSON() ([]byte, error) { return []byte("<FP-MARSHALJSON>"), nil } func (f *fp) UnmarshalJSON(blob []byte) error { f.Name = string(blob) return nil } var _ json.Marshaler = (*fp)(nil) var _ json.Unmarshaler = (*fp)(nil) type innerFP struct { PC uint64 FP *fp } func TestUnmarshalMap(t *testing.T) { binBytes := []byte(`dontcare`) jsonBytes := []byte(`{"2": 2}`) obj := new(map[string]int) cdc := amino.NewCodec() // Binary doesn't support decoding to a map... assert.Panics(t, func() { err := cdc.UnmarshalBinary(binBytes, &obj) assert.Fail(t, "should have paniced but got err: %v", err) }) assert.Panics(t, func() { err := cdc.UnmarshalBinary(binBytes, obj) assert.Fail(t, "should have paniced but got err: %v", err) }) // ... nor encoding it. assert.Panics(t, func() { bz, err := cdc.MarshalBinary(obj) assert.Fail(t, "should have paniced but got bz: %X err: %v", bz, err) }) // JSON doesn't support decoding to a map... assert.Panics(t, func() { err := cdc.UnmarshalJSON(jsonBytes, &obj) assert.Fail(t, "should have paniced but got err: %v", err) }) assert.Panics(t, func() { err := cdc.UnmarshalJSON(jsonBytes, obj) assert.Fail(t, "should have paniced but got err: %v", err) }) // ... nor encoding it. assert.Panics(t, func() { bz, err := cdc.MarshalJSON(obj) assert.Fail(t, "should have paniced but got bz: %X err: %v", bz, err) }) } func TestUnmarshalFunc(t *testing.T) { binBytes := []byte(`dontcare`) jsonBytes := []byte(`"dontcare"`) obj := func() {} cdc := amino.NewCodec() // Binary doesn't support decoding to a func... assert.Panics(t, func() { err := cdc.UnmarshalBinary(binBytes, &obj) assert.Fail(t, "should have paniced but got err: %v", err) }) assert.Panics(t, func() { err := cdc.UnmarshalBinary(binBytes, obj) assert.Fail(t, "should have paniced but got err: %v", err) }) // ... nor encoding it. assert.Panics(t, func() { bz, err := cdc.MarshalBinary(obj) assert.Fail(t, "should have paniced but got bz: %X err: %v", bz, err) }) // JSON doesn't support decoding to a func... assert.Panics(t, func() { err := cdc.UnmarshalJSON(jsonBytes, &obj) assert.Fail(t, "should have paniced but got err: %v", err) }) assert.Panics(t, func() { err := cdc.UnmarshalJSON(jsonBytes, obj) assert.Fail(t, "should have paniced but got err: %v", err) }) // ... nor encoding it. assert.Panics(t, func() { bz, err := cdc.MarshalJSON(obj) assert.Fail(t, "should have paniced but got bz: %X err: %v", bz, err) }) } func TestUnmarshalJSON(t *testing.T) { t.Parallel() var cdc = amino.NewCodec() cases := []struct { blob string in interface{} want interface{} wantErr string }{ { "null", 2, nil, "expects a pointer", }, { "null", new(int), new(int), "", }, { "2", new(int), intPtr(2), "", }, { `{"null"}`, new(int), nil, "invalid character", }, { `{"type":"AEB127E121A6B0","value":{"Vehicle":null,"Capacity":0}}`, new(Transport), new(Transport), "", }, { `{"type":"AEB127E121A6B0","value":{"Vehicle":{"type":"2B2961A431B238","value":"Bugatti"},"Capacity":10}}`, new(Transport), &Transport{ Vehicle: Car("Bugatti"), Capacity: 10, }, "", }, { `{"type":"2B2961A431B238","value":"Bugatti"}`, new(Car), func() *Car { c := Car("Bugatti"); return &c }(), "", }, { `[1, 2, 3]`, new([]int), func() interface{} { v := []int{1, 2, 3} return &v }(), "", }, { `["1", "2", "3"]`, new([]string), func() interface{} { v := []string{"1", "2", "3"} return &v }(), "", }, { `[1, "2", ["foo", "bar"]]`, new([]interface{}), nil, "Unregistered", }, { `2.34`, floatPtr(2.34), nil, "float* support requires", }, {"<FooBar>", new(fp), &fp{"<FooBar>", 0}, ""}, {"10", new(fp), &fp{Name: "10"}, ""}, {`{"PC":125,"FP":"10"}`, new(innerFP), &innerFP{PC: 125, FP: &fp{Name: `"10"`}}, ""}, {`{"PC":125,"FP":"<FP-FOO>"}`, new(innerFP), &innerFP{PC: 125, FP: &fp{Name: `"<FP-FOO>"`}}, ""}, } for i, tt := range cases { err := cdc.UnmarshalJSON([]byte(tt.blob), tt.in) if tt.wantErr != "" { if err == nil || !strings.Contains(err.Error(), tt.wantErr) { t.Errorf("#%d:\ngot:\n\t%q\nwant non-nil error containing\n\t%q", i, err, tt.wantErr) } continue } if err != nil { t.Errorf("#%d: unexpected error: %v\nblob: %s\nin: %+v\n", i, err, tt.blob, tt.in) continue } if g, w := tt.in, tt.want; !reflect.DeepEqual(g, w) { gb, _ := json.MarshalIndent(g, "", " ") wb, _ := json.MarshalIndent(w, "", " ") t.Errorf("#%d:\ngot:\n\t%#v\n(%s)\n\nwant:\n\t%#v\n(%s)", i, g, gb, w, wb) } } } func TestJSONCodecRoundTrip(t *testing.T) { var cdc = amino.NewCodec() type allInclusive struct { Tr Transport `json:"trx"` Vehicle Vehicle `json:"v,omitempty"` Comment string Data []byte } cases := []struct { in interface{} want interface{} out interface{} wantErr string }{ 0: { in: &allInclusive{ Tr: Transport{ Vehicle: Boat("Oracle"), }, Comment: "To the Cosmos! баллинг в космос", Data: []byte("祝你好运"), }, out: new(allInclusive), want: &allInclusive{ Tr: Transport{ Vehicle: Boat("Oracle"), }, Comment: "To the Cosmos! баллинг в космос", Data: []byte("祝你好运"), }, }, 1: { in: Transport{Vehicle: Plane{Name: "G6", MaxAltitude: 51e3}, Capacity: 18}, out: new(Transport), want: &Transport{Vehicle: Plane{Name: "G6", MaxAltitude: 51e3}, Capacity: 18}, }, } for i, tt := range cases { mBlob, err := cdc.MarshalJSON(tt.in) if tt.wantErr != "" { if err == nil || !strings.Contains(err.Error(), tt.wantErr) { t.Errorf("#%d:\ngot:\n\t%q\nwant non-nil error containing\n\t%q", i, err, tt.wantErr) } continue } if err != nil { t.Errorf("#%d: unexpected error after MarshalJSON: %v", i, err) continue } if err := cdc.UnmarshalJSON(mBlob, tt.out); err != nil { t.Errorf("#%d: unexpected error after UnmarshalJSON: %v\nmBlob: %s", i, err, mBlob) continue } // Now check that the input is exactly equal to the output uBlob, err := cdc.MarshalJSON(tt.out) if err := cdc.UnmarshalJSON(mBlob, tt.out); err != nil { t.Errorf("#%d: unexpected error after second MarshalJSON: %v", i, err) continue } if !reflect.DeepEqual(tt.want, tt.out) { t.Errorf("#%d: After roundtrip UnmarshalJSON\ngot: \t%v\nwant:\t%v", i, tt.out, tt.want) } if !bytes.Equal(mBlob, uBlob) { t.Errorf("#%d: After roundtrip MarshalJSON\ngot: \t%s\nwant:\t%s", i, uBlob, mBlob) } } } func intPtr(i int) *int { return &i } func floatPtr(f float64) *float64 { return &f } type noFields struct{} type noExportedFields struct { a int b string } type oneExportedField struct { _Foo int A string b string } type aPointerField struct { Foo *int Name string `json:"nm,omitempty"` } type doublyEmbedded struct { Inner *aPointerFieldAndEmbeddedField Year int64 `json:"year"` } type aPointerFieldAndEmbeddedField struct { Foo *int Name string `json:"nm,omitempty"` *oneExportedField B *oneExportedField `json:"bz,omitempty"` } type customJSONMarshaler int var _ json.Marshaler = (*customJSONMarshaler)(nil) func (cm customJSONMarshaler) MarshalJSON() ([]byte, error) { return []byte(`"Tendermint"`), nil } type withCustomMarshaler struct { F customJSONMarshaler `json:"fx"` A *aPointerField } type Transport struct { Vehicle Capacity int } type Vehicle interface { Move() error } type Asset interface { Value() float64 } func (c Car) Value() float64 { return 60000.0 } type BalanceSheet struct { Assets []Asset `json:"assets"` } type Car string type Boat string type Plane struct { Name string MaxAltitude int64 } type insurancePlan int func (ip insurancePlan) Value() float64 { return float64(ip) } func (c Car) Move() error { return nil } func (b Boat) Move() error { return nil } func (p Plane) Move() error { return nil } func interfacePtr(v interface{}) *interface{} { return &v } Fix tests package amino_test import ( "bytes" "encoding/json" "reflect" "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/tendermint/go-amino" ) func registerTransports(cdc *amino.Codec) { cdc.RegisterConcrete(&Transport{}, "our/transport", nil) cdc.RegisterInterface((*Vehicle)(nil), &amino.InterfaceOptions{AlwaysDisambiguate: true}) cdc.RegisterInterface((*Asset)(nil), &amino.InterfaceOptions{AlwaysDisambiguate: true}) cdc.RegisterConcrete(Car(""), "car", nil) cdc.RegisterConcrete(insurancePlan(0), "insuranceplan", nil) cdc.RegisterConcrete(Boat(""), "boat", nil) cdc.RegisterConcrete(Plane{}, "plane", nil) } func TestMarshalJSON(t *testing.T) { t.Parallel() var cdc = amino.NewCodec() registerTransports(cdc) cases := []struct { in interface{} want string wantErr string }{ {&noFields{}, "{}", ""}, {&noExportedFields{a: 10, b: "foo"}, "{}", ""}, {nil, "null", ""}, {&oneExportedField{}, `{"A":""}`, ""}, {Vehicle(Car("Tesla")), `{"type":"2B2961A431B238","value":"Tesla"}`, ""}, {Car("Tesla"), `{"type":"2B2961A431B238","value":"Tesla"}`, ""}, {&oneExportedField{A: "Z"}, `{"A":"Z"}`, ""}, {[]string{"a", "bc"}, `["a","bc"]`, ""}, {[]interface{}{"a", "bc", 10, 10.93, 1e3}, ``, "Unregistered"}, {aPointerField{Foo: new(int), Name: "name"}, `{"Foo":0,"nm":"name"}`, ""}, { aPointerFieldAndEmbeddedField{intPtr(11), "ap", nil, &oneExportedField{A: "foo"}}, `{"Foo":11,"nm":"ap","bz":{"A":"foo"}}`, "", }, { doublyEmbedded{ Inner: &aPointerFieldAndEmbeddedField{ intPtr(11), "ap", nil, &oneExportedField{A: "foo"}, }, }, `{"Inner":{"Foo":11,"nm":"ap","bz":{"A":"foo"}},"year":0}`, "", }, { struct{}{}, `{}`, "", }, { struct{ A int }{A: 10}, `{"A":10}`, "", }, { Transport{}, `{"type":"AEB127E121A6B0","value":{"Vehicle":null,"Capacity":0}}`, "", }, { Transport{Vehicle: Car("Bugatti")}, `{"type":"AEB127E121A6B0","value":{"Vehicle":{"type":"2B2961A431B238","value":"Bugatti"},"Capacity":0}}`, "", }, { BalanceSheet{Assets: []Asset{Car("Corolla"), insurancePlan(1e7)}}, `{"assets":[{"type":"2B2961A431B238","value":"Corolla"},{"type":"7DF0BC76182A18","value":10000000}]}`, "", }, { Transport{Vehicle: Boat("Poseidon"), Capacity: 1789}, `{"type":"AEB127E121A6B0","value":{"Vehicle":{"type":"25CDB46D8D2110","value":"Poseidon"},"Capacity":1789}}`, "", }, { withCustomMarshaler{A: &aPointerField{Foo: intPtr(12)}, F: customJSONMarshaler(10)}, `{"fx":"Tendermint","A":{"Foo":12}}`, "", }, { func() json.Marshaler { v := customJSONMarshaler(10); return &v }(), `"Tendermint"`, "", }, // We don't yet support interface pointer registration i.e. `*interface{}` {interfacePtr("a"), "", "Unregistered interface interface {}"}, {&fp{"Foo", 10}, "<FP-MARSHALJSON>", ""}, {(*fp)(nil), "null", ""}, {struct { FP *fp Package string }{FP: &fp{"Foo", 10}, Package: "bytes"}, `{"FP":<FP-MARSHALJSON>,"Package":"bytes"}`, ""}, } for i, tt := range cases { t.Logf("Trying case #%v", i) blob, err := cdc.MarshalJSON(tt.in) if tt.wantErr != "" { if err == nil || !strings.Contains(err.Error(), tt.wantErr) { t.Errorf("#%d:\ngot:\n\t%q\nwant non-nil error containing\n\t%q", i, err, tt.wantErr) } continue } if err != nil { t.Errorf("#%d: unexpected error: %v\nblob: %v", i, err, tt.in) continue } if g, w := string(blob), tt.want; g != w { t.Errorf("#%d:\ngot:\n\t%s\nwant:\n\t%s", i, g, w) } } } func TestMarshalJSONWithMonotonicTime(t *testing.T) { var cdc = amino.NewCodec() registerTransports(cdc) type SimpleStruct struct { String string Bytes []byte Time time.Time } s := SimpleStruct{ String: "hello", Bytes: []byte("goodbye"), Time: time.Now().UTC().Truncate(time.Millisecond), // strip monotonic and timezone. } b, err := cdc.MarshalJSON(s) assert.Nil(t, err) var s2 SimpleStruct err = cdc.UnmarshalJSON(b, &s2) assert.Nil(t, err) assert.Equal(t, s, s2) } type fp struct { Name string Version int } func (f *fp) MarshalJSON() ([]byte, error) { return []byte("<FP-MARSHALJSON>"), nil } func (f *fp) UnmarshalJSON(blob []byte) error { f.Name = string(blob) return nil } var _ json.Marshaler = (*fp)(nil) var _ json.Unmarshaler = (*fp)(nil) type innerFP struct { PC uint64 FP *fp } func TestUnmarshalMap(t *testing.T) { binBytes := []byte(`dontcare`) jsonBytes := []byte(`{"2": 2}`) obj := new(map[string]int) cdc := amino.NewCodec() // Binary doesn't support decoding to a map... assert.Panics(t, func() { err := cdc.UnmarshalBinary(binBytes, &obj) assert.Fail(t, "should have paniced but got err: %v", err) }) assert.Panics(t, func() { err := cdc.UnmarshalBinary(binBytes, obj) assert.Fail(t, "should have paniced but got err: %v", err) }) // ... nor encoding it. assert.Panics(t, func() { bz, err := cdc.MarshalBinary(obj) assert.Fail(t, "should have paniced but got bz: %X err: %v", bz, err) }) // JSON doesn't support decoding to a map... assert.Panics(t, func() { err := cdc.UnmarshalJSON(jsonBytes, &obj) assert.Fail(t, "should have paniced but got err: %v", err) }) assert.Panics(t, func() { err := cdc.UnmarshalJSON(jsonBytes, obj) assert.Fail(t, "should have paniced but got err: %v", err) }) // ... nor encoding it. assert.Panics(t, func() { bz, err := cdc.MarshalJSON(obj) assert.Fail(t, "should have paniced but got bz: %X err: %v", bz, err) }) } func TestUnmarshalFunc(t *testing.T) { binBytes := []byte(`dontcare`) jsonBytes := []byte(`"dontcare"`) obj := func() {} cdc := amino.NewCodec() // Binary doesn't support decoding to a func... assert.Panics(t, func() { err := cdc.UnmarshalBinary(binBytes, &obj) assert.Fail(t, "should have paniced but got err: %v", err) }) assert.Panics(t, func() { err := cdc.UnmarshalBinary(binBytes, obj) assert.Fail(t, "should have paniced but got err: %v", err) }) // ... nor encoding it. assert.Panics(t, func() { bz, err := cdc.MarshalBinary(obj) assert.Fail(t, "should have paniced but got bz: %X err: %v", bz, err) }) // JSON doesn't support decoding to a func... assert.Panics(t, func() { err := cdc.UnmarshalJSON(jsonBytes, &obj) assert.Fail(t, "should have paniced but got err: %v", err) }) assert.Panics(t, func() { err := cdc.UnmarshalJSON(jsonBytes, obj) assert.Fail(t, "should have paniced but got err: %v", err) }) // ... nor encoding it. assert.Panics(t, func() { bz, err := cdc.MarshalJSON(obj) assert.Fail(t, "should have paniced but got bz: %X err: %v", bz, err) }) } func TestUnmarshalJSON(t *testing.T) { t.Parallel() var cdc = amino.NewCodec() registerTransports(cdc) cases := []struct { blob string in interface{} want interface{} wantErr string }{ { "null", 2, nil, "expects a pointer", }, { "null", new(int), new(int), "", }, { "2", new(int), intPtr(2), "", }, { `{"null"}`, new(int), nil, "invalid character", }, { `{"type":"AEB127E121A6B0","value":{"Vehicle":null,"Capacity":0}}`, new(Transport), new(Transport), "", }, { `{"type":"AEB127E121A6B0","value":{"Vehicle":{"type":"2B2961A431B238","value":"Bugatti"},"Capacity":10}}`, new(Transport), &Transport{ Vehicle: Car("Bugatti"), Capacity: 10, }, "", }, { `{"type":"2B2961A431B238","value":"Bugatti"}`, new(Car), func() *Car { c := Car("Bugatti"); return &c }(), "", }, { `[1, 2, 3]`, new([]int), func() interface{} { v := []int{1, 2, 3} return &v }(), "", }, { `["1", "2", "3"]`, new([]string), func() interface{} { v := []string{"1", "2", "3"} return &v }(), "", }, { `[1, "2", ["foo", "bar"]]`, new([]interface{}), nil, "Unregistered", }, { `2.34`, floatPtr(2.34), nil, "float* support requires", }, {"<FooBar>", new(fp), &fp{"<FooBar>", 0}, ""}, {"10", new(fp), &fp{Name: "10"}, ""}, {`{"PC":125,"FP":"10"}`, new(innerFP), &innerFP{PC: 125, FP: &fp{Name: `"10"`}}, ""}, {`{"PC":125,"FP":"<FP-FOO>"}`, new(innerFP), &innerFP{PC: 125, FP: &fp{Name: `"<FP-FOO>"`}}, ""}, } for i, tt := range cases { err := cdc.UnmarshalJSON([]byte(tt.blob), tt.in) if tt.wantErr != "" { if err == nil || !strings.Contains(err.Error(), tt.wantErr) { t.Errorf("#%d:\ngot:\n\t%q\nwant non-nil error containing\n\t%q", i, err, tt.wantErr) } continue } if err != nil { t.Errorf("#%d: unexpected error: %v\nblob: %s\nin: %+v\n", i, err, tt.blob, tt.in) continue } if g, w := tt.in, tt.want; !reflect.DeepEqual(g, w) { gb, _ := json.MarshalIndent(g, "", " ") wb, _ := json.MarshalIndent(w, "", " ") t.Errorf("#%d:\ngot:\n\t%#v\n(%s)\n\nwant:\n\t%#v\n(%s)", i, g, gb, w, wb) } } } func TestJSONCodecRoundTrip(t *testing.T) { var cdc = amino.NewCodec() registerTransports(cdc) type allInclusive struct { Tr Transport `json:"trx"` Vehicle Vehicle `json:"v,omitempty"` Comment string Data []byte } cases := []struct { in interface{} want interface{} out interface{} wantErr string }{ 0: { in: &allInclusive{ Tr: Transport{ Vehicle: Boat("Oracle"), }, Comment: "To the Cosmos! баллинг в космос", Data: []byte("祝你好运"), }, out: new(allInclusive), want: &allInclusive{ Tr: Transport{ Vehicle: Boat("Oracle"), }, Comment: "To the Cosmos! баллинг в космос", Data: []byte("祝你好运"), }, }, 1: { in: Transport{Vehicle: Plane{Name: "G6", MaxAltitude: 51e3}, Capacity: 18}, out: new(Transport), want: &Transport{Vehicle: Plane{Name: "G6", MaxAltitude: 51e3}, Capacity: 18}, }, } for i, tt := range cases { mBlob, err := cdc.MarshalJSON(tt.in) if tt.wantErr != "" { if err == nil || !strings.Contains(err.Error(), tt.wantErr) { t.Errorf("#%d:\ngot:\n\t%q\nwant non-nil error containing\n\t%q", i, err, tt.wantErr) } continue } if err != nil { t.Errorf("#%d: unexpected error after MarshalJSON: %v", i, err) continue } if err := cdc.UnmarshalJSON(mBlob, tt.out); err != nil { t.Errorf("#%d: unexpected error after UnmarshalJSON: %v\nmBlob: %s", i, err, mBlob) continue } // Now check that the input is exactly equal to the output uBlob, err := cdc.MarshalJSON(tt.out) if err := cdc.UnmarshalJSON(mBlob, tt.out); err != nil { t.Errorf("#%d: unexpected error after second MarshalJSON: %v", i, err) continue } if !reflect.DeepEqual(tt.want, tt.out) { t.Errorf("#%d: After roundtrip UnmarshalJSON\ngot: \t%v\nwant:\t%v", i, tt.out, tt.want) } if !bytes.Equal(mBlob, uBlob) { t.Errorf("#%d: After roundtrip MarshalJSON\ngot: \t%s\nwant:\t%s", i, uBlob, mBlob) } } } func intPtr(i int) *int { return &i } func floatPtr(f float64) *float64 { return &f } type noFields struct{} type noExportedFields struct { a int b string } type oneExportedField struct { _Foo int A string b string } type aPointerField struct { Foo *int Name string `json:"nm,omitempty"` } type doublyEmbedded struct { Inner *aPointerFieldAndEmbeddedField Year int64 `json:"year"` } type aPointerFieldAndEmbeddedField struct { Foo *int Name string `json:"nm,omitempty"` *oneExportedField B *oneExportedField `json:"bz,omitempty"` } type customJSONMarshaler int var _ json.Marshaler = (*customJSONMarshaler)(nil) func (cm customJSONMarshaler) MarshalJSON() ([]byte, error) { return []byte(`"Tendermint"`), nil } type withCustomMarshaler struct { F customJSONMarshaler `json:"fx"` A *aPointerField } type Transport struct { Vehicle Capacity int } type Vehicle interface { Move() error } type Asset interface { Value() float64 } func (c Car) Value() float64 { return 60000.0 } type BalanceSheet struct { Assets []Asset `json:"assets"` } type Car string type Boat string type Plane struct { Name string MaxAltitude int64 } type insurancePlan int func (ip insurancePlan) Value() float64 { return float64(ip) } func (c Car) Move() error { return nil } func (b Boat) Move() error { return nil } func (p Plane) Move() error { return nil } func interfacePtr(v interface{}) *interface{} { return &v }
package ovn import ( "fmt" "strings" "time" util "github.com/openvswitch/ovn-kubernetes/go-controller/pkg/util" "github.com/sirupsen/logrus" kapi "k8s.io/api/core/v1" ) func (oc *Controller) syncPods(pods []interface{}) { // get the list of logical switch ports (equivalent to pods) expectedLogicalPorts := make(map[string]bool) for _, podInterface := range pods { pod, ok := podInterface.(*kapi.Pod) if !ok { logrus.Errorf("Spurious object in syncPods: %v", podInterface) continue } logicalPort := fmt.Sprintf("%s_%s", pod.Namespace, pod.Name) expectedLogicalPorts[logicalPort] = true } // get the list of logical ports from OVN output, stderr, err := util.RunOVNNbctl("--data=bare", "--no-heading", "--columns=name", "find", "logical_switch_port", "external_ids:pod=true") if err != nil { logrus.Errorf("Error in obtaining list of logical ports, "+ "stderr: %q, err: %v", stderr, err) return } existingLogicalPorts := strings.Fields(output) for _, existingPort := range existingLogicalPorts { if _, ok := expectedLogicalPorts[existingPort]; !ok { // not found, delete this logical port logrus.Infof("Stale logical port found: %s. This logical port will be deleted.", existingPort) out, stderr, err := util.RunOVNNbctl("--if-exists", "lsp-del", existingPort) if err != nil { logrus.Errorf("Error in deleting pod's logical port "+ "stdout: %q, stderr: %q err: %v", out, stderr, err) } if !oc.portGroupSupport { oc.deletePodAcls(existingPort) } } } } func (oc *Controller) deletePodAcls(logicalPort string) { // delete the ACL rules on OVN that corresponding pod has been deleted uuids, stderr, err := util.RunOVNNbctl("--data=bare", "--no-heading", "--columns=_uuid", "find", "ACL", fmt.Sprintf("external_ids:logical_port=%s", logicalPort)) if err != nil { logrus.Errorf("Error in getting list of acls "+ "stdout: %q, stderr: %q, error: %v", uuids, stderr, err) return } if uuids == "" { logrus.Debugf("deletePodAcls: returning because find " + "returned no ACLs") return } uuidSlice := strings.Fields(uuids) for _, uuid := range uuidSlice { // Get logical switch out, stderr, err := util.RunOVNNbctl("--data=bare", "--no-heading", "--columns=_uuid", "find", "logical_switch", fmt.Sprintf("acls{>=}%s", uuid)) if err != nil { logrus.Errorf("find failed to get the logical_switch of acl "+ "uuid=%s, stderr: %q, (%v)", uuid, stderr, err) continue } if out == "" { continue } logicalSwitch := out _, stderr, err = util.RunOVNNbctl("--if-exists", "remove", "logical_switch", logicalSwitch, "acls", uuid) if err != nil { logrus.Errorf("failed to delete the allow-from rule %s for"+ " logical_switch=%s, logical_port=%s, stderr: %q, (%v)", uuid, logicalSwitch, logicalPort, stderr, err) continue } } } func (oc *Controller) getLogicalPortUUID(logicalPort string) string { if oc.logicalPortUUIDCache[logicalPort] != "" { return oc.logicalPortUUIDCache[logicalPort] } out, stderr, err := util.RunOVNNbctl("--if-exists", "get", "logical_switch_port", logicalPort, "_uuid") if err != nil { logrus.Errorf("Error while getting uuid for logical_switch_port "+ "%s, stderr: %q, err: %v", logicalPort, stderr, err) return "" } if out == "" { return out } oc.logicalPortUUIDCache[logicalPort] = out return oc.logicalPortUUIDCache[logicalPort] } func (oc *Controller) getGatewayFromSwitch(logicalSwitch string) (string, string, error) { var gatewayIPMaskStr, stderr string var ok bool var err error oc.lsMutex.Lock() defer oc.lsMutex.Unlock() if gatewayIPMaskStr, ok = oc.gatewayCache[logicalSwitch]; !ok { gatewayIPMaskStr, stderr, err = util.RunOVNNbctl("--if-exists", "get", "logical_switch", logicalSwitch, "external_ids:gateway_ip") if err != nil { logrus.Errorf("Failed to get gateway IP: %s, stderr: %q, %v", gatewayIPMaskStr, stderr, err) return "", "", err } if gatewayIPMaskStr == "" { return "", "", fmt.Errorf("Empty gateway IP in logical switch %s", logicalSwitch) } oc.gatewayCache[logicalSwitch] = gatewayIPMaskStr } gatewayIPMask := strings.Split(gatewayIPMaskStr, "/") gatewayIP := gatewayIPMask[0] mask := gatewayIPMask[1] logrus.Debugf("Gateway IP: %s, Mask: %s", gatewayIP, mask) return gatewayIP, mask, nil } func (oc *Controller) deleteLogicalPort(pod *kapi.Pod) { if pod.Spec.HostNetwork { return } logrus.Infof("Deleting pod: %s", pod.Name) logicalPort := fmt.Sprintf("%s_%s", pod.Namespace, pod.Name) out, stderr, err := util.RunOVNNbctl("--if-exists", "lsp-del", logicalPort) if err != nil { logrus.Errorf("Error in deleting pod logical port "+ "stdout: %q, stderr: %q, (%v)", out, stderr, err) } ipAddress := oc.getIPFromOvnAnnotation(pod.Annotations["ovn"]) delete(oc.logicalPortCache, logicalPort) oc.lspMutex.Lock() delete(oc.lspIngressDenyCache, logicalPort) delete(oc.lspEgressDenyCache, logicalPort) delete(oc.logicalPortUUIDCache, logicalPort) oc.lspMutex.Unlock() if !oc.portGroupSupport { oc.deleteACLDenyOld(pod.Namespace, pod.Spec.NodeName, logicalPort, "Ingress") oc.deleteACLDenyOld(pod.Namespace, pod.Spec.NodeName, logicalPort, "Egress") } oc.deletePodFromNamespaceAddressSet(pod.Namespace, ipAddress) return } func (oc *Controller) addLogicalPort(pod *kapi.Pod) { var out, stderr string var err error if pod.Spec.HostNetwork { return } logicalSwitch := pod.Spec.NodeName if logicalSwitch == "" { logrus.Errorf("Failed to find the logical switch for pod %s/%s", pod.Namespace, pod.Name) return } oc.lsMutex.Lock() if !oc.logicalSwitchCache[logicalSwitch] { oc.logicalSwitchCache[logicalSwitch] = true oc.addAllowACLFromNode(logicalSwitch) } oc.lsMutex.Unlock() portName := fmt.Sprintf("%s_%s", pod.Namespace, pod.Name) logrus.Debugf("Creating logical port for %s on switch %s", portName, logicalSwitch) annotation, isStaticIP := pod.Annotations["ovn"] // If pod already has annotations, just add the lsp with static ip/mac. // Else, create the lsp with dynamic addresses. if isStaticIP { ipAddress := oc.getIPFromOvnAnnotation(annotation) macAddress := oc.getMacFromOvnAnnotation(annotation) out, stderr, err = util.RunOVNNbctl("--may-exist", "lsp-add", logicalSwitch, portName, "--", "lsp-set-addresses", portName, fmt.Sprintf("%s %s", macAddress, ipAddress), "--", "--if-exists", "clear", "logical_switch_port", portName, "dynamic_addresses") if err != nil { logrus.Errorf("Failed to add logical port to switch "+ "stdout: %q, stderr: %q (%v)", out, stderr, err) return } } else { out, stderr, err = util.RunOVNNbctl("--wait=sb", "--", "--may-exist", "lsp-add", logicalSwitch, portName, "--", "lsp-set-addresses", portName, "dynamic", "--", "set", "logical_switch_port", portName, "external-ids:namespace="+pod.Namespace, "external-ids:logical_switch="+logicalSwitch, "external-ids:pod=true") if err != nil { logrus.Errorf("Error while creating logical port %s "+ "stdout: %q, stderr: %q (%v)", portName, out, stderr, err) return } } oc.logicalPortCache[portName] = logicalSwitch gatewayIP, mask, err := oc.getGatewayFromSwitch(logicalSwitch) if err != nil { logrus.Errorf("Error obtaining gateway address for switch %s", logicalSwitch) return } count := 30 for count > 0 { if isStaticIP { out, stderr, err = util.RunOVNNbctl("get", "logical_switch_port", portName, "addresses") } else { out, stderr, err = util.RunOVNNbctl("get", "logical_switch_port", portName, "dynamic_addresses") } if err == nil && out != "[]" { break } if err != nil { logrus.Errorf("Error while obtaining addresses for %s - %v", portName, err) return } time.Sleep(time.Second) count-- } if count == 0 { logrus.Errorf("Error while obtaining addresses for %s "+ "stdout: %q, stderr: %q, (%v)", portName, out, stderr, err) return } // static addresses have format ["0a:00:00:00:00:01 192.168.1.3"], while // dynamic addresses have format "0a:00:00:00:00:01 192.168.1.3". outStr := strings.TrimLeft(out, `[`) outStr = strings.TrimRight(outStr, `]`) outStr = strings.Trim(outStr, `"`) addresses := strings.Split(outStr, " ") if len(addresses) != 2 { logrus.Errorf("Error while obtaining addresses for %s", portName) return } if !isStaticIP { annotation = fmt.Sprintf(`{\"ip_address\":\"%s/%s\", \"mac_address\":\"%s\", \"gateway_ip\": \"%s\"}`, addresses[1], mask, addresses[0], gatewayIP) logrus.Debugf("Annotation values: ip=%s/%s ; mac=%s ; gw=%s\nAnnotation=%s", addresses[1], mask, addresses[0], gatewayIP, annotation) err = oc.kube.SetAnnotationOnPod(pod, "ovn", annotation) if err != nil { logrus.Errorf("Failed to set annotation on pod %s - %v", pod.Name, err) } } oc.addPodToNamespaceAddressSet(pod.Namespace, addresses[1]) return } // AddLogicalPortWithIP add logical port with static ip address // and mac adddress for the pod func (oc *Controller) AddLogicalPortWithIP(pod *kapi.Pod) { if pod.Spec.HostNetwork { return } portName := fmt.Sprintf("%s_%s", pod.Namespace, pod.Name) logicalSwitch := pod.Spec.NodeName logrus.Debugf("Creating logical port for %s on switch %s", portName, logicalSwitch) annotation, ok := pod.Annotations["ovn"] if !ok { logrus.Errorf("Failed to get ovn annotation from pod!") return } ipAddress := oc.getIPFromOvnAnnotation(annotation) macAddress := oc.getMacFromOvnAnnotation(annotation) stdout, stderr, err := util.RunOVNNbctl("--", "--may-exist", "lsp-add", logicalSwitch, portName, "--", "lsp-set-addresses", portName, fmt.Sprintf("%s %s", macAddress, ipAddress)) if err != nil { logrus.Errorf("Failed to add logical port to switch, stdout: %q, "+ "stderr: %q, error: %v", stdout, stderr, err) return } } pods: use GetPortAddress() Signed-off-by: Dan Williams <aeade43d0f8ae14e7c44fa81fe17c1635ae376fe@redhat.com> package ovn import ( "fmt" "strings" "time" util "github.com/openvswitch/ovn-kubernetes/go-controller/pkg/util" "github.com/sirupsen/logrus" kapi "k8s.io/api/core/v1" ) func (oc *Controller) syncPods(pods []interface{}) { // get the list of logical switch ports (equivalent to pods) expectedLogicalPorts := make(map[string]bool) for _, podInterface := range pods { pod, ok := podInterface.(*kapi.Pod) if !ok { logrus.Errorf("Spurious object in syncPods: %v", podInterface) continue } logicalPort := fmt.Sprintf("%s_%s", pod.Namespace, pod.Name) expectedLogicalPorts[logicalPort] = true } // get the list of logical ports from OVN output, stderr, err := util.RunOVNNbctl("--data=bare", "--no-heading", "--columns=name", "find", "logical_switch_port", "external_ids:pod=true") if err != nil { logrus.Errorf("Error in obtaining list of logical ports, "+ "stderr: %q, err: %v", stderr, err) return } existingLogicalPorts := strings.Fields(output) for _, existingPort := range existingLogicalPorts { if _, ok := expectedLogicalPorts[existingPort]; !ok { // not found, delete this logical port logrus.Infof("Stale logical port found: %s. This logical port will be deleted.", existingPort) out, stderr, err := util.RunOVNNbctl("--if-exists", "lsp-del", existingPort) if err != nil { logrus.Errorf("Error in deleting pod's logical port "+ "stdout: %q, stderr: %q err: %v", out, stderr, err) } if !oc.portGroupSupport { oc.deletePodAcls(existingPort) } } } } func (oc *Controller) deletePodAcls(logicalPort string) { // delete the ACL rules on OVN that corresponding pod has been deleted uuids, stderr, err := util.RunOVNNbctl("--data=bare", "--no-heading", "--columns=_uuid", "find", "ACL", fmt.Sprintf("external_ids:logical_port=%s", logicalPort)) if err != nil { logrus.Errorf("Error in getting list of acls "+ "stdout: %q, stderr: %q, error: %v", uuids, stderr, err) return } if uuids == "" { logrus.Debugf("deletePodAcls: returning because find " + "returned no ACLs") return } uuidSlice := strings.Fields(uuids) for _, uuid := range uuidSlice { // Get logical switch out, stderr, err := util.RunOVNNbctl("--data=bare", "--no-heading", "--columns=_uuid", "find", "logical_switch", fmt.Sprintf("acls{>=}%s", uuid)) if err != nil { logrus.Errorf("find failed to get the logical_switch of acl "+ "uuid=%s, stderr: %q, (%v)", uuid, stderr, err) continue } if out == "" { continue } logicalSwitch := out _, stderr, err = util.RunOVNNbctl("--if-exists", "remove", "logical_switch", logicalSwitch, "acls", uuid) if err != nil { logrus.Errorf("failed to delete the allow-from rule %s for"+ " logical_switch=%s, logical_port=%s, stderr: %q, (%v)", uuid, logicalSwitch, logicalPort, stderr, err) continue } } } func (oc *Controller) getLogicalPortUUID(logicalPort string) string { if oc.logicalPortUUIDCache[logicalPort] != "" { return oc.logicalPortUUIDCache[logicalPort] } out, stderr, err := util.RunOVNNbctl("--if-exists", "get", "logical_switch_port", logicalPort, "_uuid") if err != nil { logrus.Errorf("Error while getting uuid for logical_switch_port "+ "%s, stderr: %q, err: %v", logicalPort, stderr, err) return "" } if out == "" { return out } oc.logicalPortUUIDCache[logicalPort] = out return oc.logicalPortUUIDCache[logicalPort] } func (oc *Controller) getGatewayFromSwitch(logicalSwitch string) (string, string, error) { var gatewayIPMaskStr, stderr string var ok bool var err error oc.lsMutex.Lock() defer oc.lsMutex.Unlock() if gatewayIPMaskStr, ok = oc.gatewayCache[logicalSwitch]; !ok { gatewayIPMaskStr, stderr, err = util.RunOVNNbctl("--if-exists", "get", "logical_switch", logicalSwitch, "external_ids:gateway_ip") if err != nil { logrus.Errorf("Failed to get gateway IP: %s, stderr: %q, %v", gatewayIPMaskStr, stderr, err) return "", "", err } if gatewayIPMaskStr == "" { return "", "", fmt.Errorf("Empty gateway IP in logical switch %s", logicalSwitch) } oc.gatewayCache[logicalSwitch] = gatewayIPMaskStr } gatewayIPMask := strings.Split(gatewayIPMaskStr, "/") gatewayIP := gatewayIPMask[0] mask := gatewayIPMask[1] logrus.Debugf("Gateway IP: %s, Mask: %s", gatewayIP, mask) return gatewayIP, mask, nil } func (oc *Controller) deleteLogicalPort(pod *kapi.Pod) { if pod.Spec.HostNetwork { return } logrus.Infof("Deleting pod: %s", pod.Name) logicalPort := fmt.Sprintf("%s_%s", pod.Namespace, pod.Name) out, stderr, err := util.RunOVNNbctl("--if-exists", "lsp-del", logicalPort) if err != nil { logrus.Errorf("Error in deleting pod logical port "+ "stdout: %q, stderr: %q, (%v)", out, stderr, err) } ipAddress := oc.getIPFromOvnAnnotation(pod.Annotations["ovn"]) delete(oc.logicalPortCache, logicalPort) oc.lspMutex.Lock() delete(oc.lspIngressDenyCache, logicalPort) delete(oc.lspEgressDenyCache, logicalPort) delete(oc.logicalPortUUIDCache, logicalPort) oc.lspMutex.Unlock() if !oc.portGroupSupport { oc.deleteACLDenyOld(pod.Namespace, pod.Spec.NodeName, logicalPort, "Ingress") oc.deleteACLDenyOld(pod.Namespace, pod.Spec.NodeName, logicalPort, "Egress") } oc.deletePodFromNamespaceAddressSet(pod.Namespace, ipAddress) return } func (oc *Controller) addLogicalPort(pod *kapi.Pod) { var out, stderr string var err error if pod.Spec.HostNetwork { return } logicalSwitch := pod.Spec.NodeName if logicalSwitch == "" { logrus.Errorf("Failed to find the logical switch for pod %s/%s", pod.Namespace, pod.Name) return } oc.lsMutex.Lock() if !oc.logicalSwitchCache[logicalSwitch] { oc.logicalSwitchCache[logicalSwitch] = true oc.addAllowACLFromNode(logicalSwitch) } oc.lsMutex.Unlock() portName := fmt.Sprintf("%s_%s", pod.Namespace, pod.Name) logrus.Debugf("Creating logical port for %s on switch %s", portName, logicalSwitch) annotation, isStaticIP := pod.Annotations["ovn"] // If pod already has annotations, just add the lsp with static ip/mac. // Else, create the lsp with dynamic addresses. if isStaticIP { ipAddress := oc.getIPFromOvnAnnotation(annotation) macAddress := oc.getMacFromOvnAnnotation(annotation) out, stderr, err = util.RunOVNNbctl("--may-exist", "lsp-add", logicalSwitch, portName, "--", "lsp-set-addresses", portName, fmt.Sprintf("%s %s", macAddress, ipAddress), "--", "--if-exists", "clear", "logical_switch_port", portName, "dynamic_addresses") if err != nil { logrus.Errorf("Failed to add logical port to switch "+ "stdout: %q, stderr: %q (%v)", out, stderr, err) return } } else { out, stderr, err = util.RunOVNNbctl("--wait=sb", "--", "--may-exist", "lsp-add", logicalSwitch, portName, "--", "lsp-set-addresses", portName, "dynamic", "--", "set", "logical_switch_port", portName, "external-ids:namespace="+pod.Namespace, "external-ids:logical_switch="+logicalSwitch, "external-ids:pod=true") if err != nil { logrus.Errorf("Error while creating logical port %s "+ "stdout: %q, stderr: %q (%v)", portName, out, stderr, err) return } } oc.logicalPortCache[portName] = logicalSwitch gatewayIP, mask, err := oc.getGatewayFromSwitch(logicalSwitch) if err != nil { logrus.Errorf("Error obtaining gateway address for switch %s", logicalSwitch) return } var podMac, podIP string count := 30 for count > 0 { podMac, podIP, err = util.GetPortAddresses(portName, isStaticIP) if err == nil && podMac != "" && podIP != "" { break } if err != nil { logrus.Errorf("Error while obtaining addresses for %s - %v", portName, err) return } time.Sleep(time.Second) count-- } if count == 0 { logrus.Errorf("Error while obtaining addresses for %s "+ "stdout: %q, stderr: %q, (%v)", portName, out, stderr, err) return } if !isStaticIP { annotation = fmt.Sprintf(`{\"ip_address\":\"%s/%s\", \"mac_address\":\"%s\", \"gateway_ip\": \"%s\"}`, podIP, mask, podMac, gatewayIP) logrus.Debugf("Annotation values: ip=%s/%s ; mac=%s ; gw=%s\nAnnotation=%s", podMac, mask, podIP, gatewayIP, annotation) err = oc.kube.SetAnnotationOnPod(pod, "ovn", annotation) if err != nil { logrus.Errorf("Failed to set annotation on pod %s - %v", pod.Name, err) } } oc.addPodToNamespaceAddressSet(pod.Namespace, podIP) return } // AddLogicalPortWithIP add logical port with static ip address // and mac adddress for the pod func (oc *Controller) AddLogicalPortWithIP(pod *kapi.Pod) { if pod.Spec.HostNetwork { return } portName := fmt.Sprintf("%s_%s", pod.Namespace, pod.Name) logicalSwitch := pod.Spec.NodeName logrus.Debugf("Creating logical port for %s on switch %s", portName, logicalSwitch) annotation, ok := pod.Annotations["ovn"] if !ok { logrus.Errorf("Failed to get ovn annotation from pod!") return } ipAddress := oc.getIPFromOvnAnnotation(annotation) macAddress := oc.getMacFromOvnAnnotation(annotation) stdout, stderr, err := util.RunOVNNbctl("--", "--may-exist", "lsp-add", logicalSwitch, portName, "--", "lsp-set-addresses", portName, fmt.Sprintf("%s %s", macAddress, ipAddress)) if err != nil { logrus.Errorf("Failed to add logical port to switch, stdout: %q, "+ "stderr: %q, error: %v", stdout, stderr, err) return } }
// Copyright (c) 2015, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License0. package mixnet import ( "bytes" "crypto/rand" "crypto/x509/pkix" "encoding/base64" "encoding/binary" "errors" "fmt" "io" "io/ioutil" "net" "os" "path" "testing" "time" "github.com/golang/protobuf/proto" "github.com/jlmucb/cloudproxy/go/tao" ) var password = make([]byte, 32) var network = "tcp" var localAddr = "127.0.0.1:0" var timeout, _ = time.ParseDuration("1s") var configDirName = "mixnet_test_domain" var id = pkix.Name{ Organization: []string{"Mixnet tester"}, } // genHostname() generates a random hostname. func genHostname() string { rb := make([]byte, 16) rand.Read(rb) return base64.URLEncoding.EncodeToString(rb) } func makeTrivialDomain(configDir string) (*tao.Domain, error) { var policyDomainConfig tao.DomainConfig policyDomainConfig.SetDefaults() policyDomainConfig.DomainInfo.GuardType = proto.String("AllowAll") configPath := path.Join(configDir, "tao.config") return tao.CreateDomain(policyDomainConfig, configPath, password) } func makeContext(batchSize int) (*RouterContext, *ProxyContext, *tao.Domain, error) { tempDir, err := ioutil.TempDir("", configDirName) if err != nil { return nil, nil, nil, err } // Create a domain with a LiberalGuard. d, err := makeTrivialDomain(tempDir) if err != nil { return nil, nil, nil, err } // Create a SoftTao from the domain. st, err := tao.NewSoftTao(tempDir, password) if err != nil { return nil, nil, nil, err } // Create router context. This loads the domain and binds a // socket and an anddress. router, err := NewRouterContext(d.ConfigPath, network, localAddr, localAddr, batchSize, timeout, &id, st) if err != nil { return nil, nil, nil, err } // Create a proxy context. This just loads the domain. proxy, err := NewProxyContext(d.ConfigPath, network, localAddr, timeout) if err != nil { router.Close() return nil, nil, nil, err } return router, proxy, d, nil } func makeProxyContext(proxyAddr string, domain *tao.Domain) (*ProxyContext, error) { // Create a proxy context. This just loads the domain. proxy, err := NewProxyContext(domain.ConfigPath, network, proxyAddr, timeout) if err != nil { return nil, err } return proxy, nil } type testResult struct { err error msg []byte } // Router accepts a connection from a proxy and reads a cell. func runRouterReadCell(router *RouterContext, ch chan<- testResult) { c, err := router.AcceptProxy() if err != nil { ch <- testResult{err, []byte{}} return } cell := make([]byte, CellBytes) if _, err := c.Read(cell); err != nil { ch <- testResult{err, cell} } else { ch <- testResult{nil, cell} } } // Proxy dials a router and sends a cell. func runProxyWriteCell(proxy *ProxyContext, addr string, msg []byte) error { c, err := proxy.DialRouter(network, addr) if err != nil { return err } if _, err := c.Write(msg); err != nil { return err } return nil } // Router accepts a connection from a proxy and handles a number of // requests. func runRouterHandleOneProxy(router *RouterContext, requestCount int, ch chan<- testResult) { _, err := router.AcceptProxy() if err != nil { ch <- testResult{err, []byte{}} return } for i := 0; i < requestCount; i++ { if err = <-router.errs; err != nil { ch <- testResult{err, nil} } } ch <- testResult{nil, nil} } // Router accepts a connection from a proxy and handles a number of // requests. func runRouterHandleOneRouter(router *RouterContext, requestCount int, ch chan<- testResult) { _, err := router.AcceptRouter() if err != nil { ch <- testResult{err, []byte{}} return } for i := 0; i < requestCount; i++ { if err = <-router.errs; err != nil { ch <- testResult{err, nil} } } ch <- testResult{nil, nil} } // Router accepts a connection from a proxy with multiple circuits func runRouterHandleOneProxyMultCircuits(router *RouterContext, numCircuits int, requestCounts []int, ch chan<- testResult) { _, err := router.AcceptProxy() if err != nil { ch <- testResult{err, []byte{}} return } for circ := 0; circ < numCircuits; circ++ { for i := 0; i < requestCounts[circ]; i++ { if err = <-router.errs; err != nil { ch <- testResult{err, nil} } } } ch <- testResult{nil, nil} } func runRouterHandleProxy(router *RouterContext, clientCt, requestCt int, ch chan<- testResult) { for i := 0; i < clientCt; i++ { _, err := router.AcceptProxy() if err != nil { ch <- testResult{err, []byte{}} return } } for i := 0; i < clientCt*requestCt; i++ { if err := <-router.errs; err != nil { ch <- testResult{err, nil} } } ch <- testResult{nil, nil} } // Proxy dials a router, creates a circuit, and sends a message over // the circuit. func runProxySendMessage(proxy *ProxyContext, rAddr, dAddr string, msg []byte) ([]byte, error) { id, err := proxy.CreateCircuit(rAddr, dAddr) if err != nil { return nil, err } c := proxy.circuits[id] if err = c.SendMessage(id, msg); err != nil { return nil, err } // dummyServer receives one message and replies. Without this line, // the router will report a broken pipe. msg, err = c.ReceiveMessage(id) return msg, err } // Test connection set up. func TestProxyRouterConnect(t *testing.T) { router, proxy, domain, err := makeContext(1) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) routerAddr := router.proxyListener.Addr().String() // Wait for a connection from the proxy. ch := make(chan bool) go func(ch chan<- bool) { c, _ := router.AcceptProxy() defer c.Close() ch <- true }(ch) c, err := proxy.DialRouter(network, routerAddr) if err != nil { router.Close() t.Fatal(err) } defer c.Close() <-ch } // Test CREATE and DESTROY. func TestCreateDestroy(t *testing.T) { router, proxy, domain, err := makeContext(1) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) rAddr := router.proxyListener.Addr().String() // The address doesn't matter here because no packets will be sent on // the established circuit. fakeAddr := "127.0.0.1:0" ch := make(chan testResult) go runRouterHandleOneProxy(router, 3, ch) id, err := proxy.CreateCircuit(rAddr, fakeAddr) if err != nil { t.Error("Error creating circuit:", err) } c := proxy.circuits[id] if err = c.SendMessage(id, []byte("hola!")); err != nil { t.Error("Error sending message:", err) } // (kwonalbert) Actually receive the error message first; // This gets rid of a race condition between error sending and destroying connection if _, err = c.ReceiveMessage(id); err == nil { t.Error("Expecting cannot establish connection error") } if err = proxy.DestroyCircuit(id); err != nil { t.Error("Error destroying circuit:", err) } res := <-ch if res.err != nil { t.Error("Unexpected router error:", res.err) } if len(router.conns) != 0 { t.Error("Expecting 0 connections, but have", len(router.conns)) } } // Test multiplexing for proxy func TestMultiplexProxyCircuit(t *testing.T) { router, proxy, domain, err := makeContext(1) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) rAddr := router.proxyListener.Addr().String() // The address doesn't matter here because no packets will be sent on // the established circuit. ch := make(chan testResult) clientCt := 2 numReqs := make([]int, clientCt) fakeAddrs := make([]string, clientCt) ids := make([]uint64, clientCt) for i := range numReqs { numReqs[i] = 2 fakeAddrs[i] = fmt.Sprintf("127.0.0.1:%d", -i) } go runRouterHandleOneProxyMultCircuits(router, clientCt, numReqs, ch) for i := range numReqs { ids[i], err = proxy.CreateCircuit(rAddr, fakeAddrs[i]) if err != nil { t.Error("Couldn't create circuit:", err) } } unique := make(map[*Conn]bool) for _, conn := range proxy.circuits { unique[conn] = true } if len(unique) != 1 { t.Error(errors.New("Should only have one connection")) } for i := range numReqs { err = proxy.DestroyCircuit(ids[i]) if err != nil { t.Error("Couldn't destroy circuit:", err) } } res := <-ch if res.err != nil { t.Error("Unexpected router error:", res.err) } } // Test sending a cell. func TestProxyRouterCell(t *testing.T) { router, proxy, domain, err := makeContext(1) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) ch := make(chan testResult) cell := make([]byte, CellBytes+1) for i := 0; i < len(cell); i++ { cell[i] = byte(i) } // This cell is just right. go runRouterReadCell(router, ch) if err = runProxyWriteCell(proxy, router.proxyListener.Addr().String(), cell[:CellBytes]); err != nil { t.Error(err) } res := <-ch if res.err != nil && res.err != io.EOF { t.Error(res.err) } else if bytes.Compare(res.msg, cell[:CellBytes]) != 0 { t.Errorf("Server got: %s", res.msg) } // This cell is too big. go runRouterReadCell(router, ch) if err = runProxyWriteCell(proxy, router.proxyListener.Addr().String(), cell); err != errCellLength { t.Error("runProxyWriteCell(): should have returned errCellLength") } res = <-ch if err := res.err.(net.Error); !err.Timeout() { t.Error("runRouterReadCell(): should have timed out") } } // Test setting up a circuit and relay a message to destination. Try // messages of various lengths. func TestProxyRouterRelay(t *testing.T) { router, proxy, domain, err := makeContext(1) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) routerCh := make(chan testResult) dstCh := make(chan testResult) dstAddrCh := make(chan string) // Create a long message. msg := make([]byte, (CellBytes*5)+237) for i := 0; i < len(msg); i++ { msg[i] = byte(i) } var res testResult trials := []int{ 37, // A short message CellBytes - (BODY + 8), // A cell len(msg), // A long message } go runDummyServer(len(trials), 1, dstCh, dstAddrCh) dstAddr := <-dstAddrCh rAddr := router.proxyListener.Addr().String() // First two messages fits in one cell, the last one is over multiple cells // Funky counting, since the cells contain some meta data.. longReqCt := 1 + ((len(msg)-(CellBytes-(BODY+8)))/(CellBytes-BODY) + 1) reqCts := []int{2, 2, longReqCt + 1} go runRouterHandleOneProxyMultCircuits(router, len(trials), reqCts, routerCh) for _, l := range trials { reply, err := runProxySendMessage(proxy, rAddr, dstAddr, msg[:l]) if err != nil { t.Errorf("relay (length=%d): %s", l, err) } res = <-dstCh if res.err != nil { t.Error(res.err) } else if bytes.Compare(reply, msg[:l]) != 0 { t.Errorf("relay (length=%d): received: %v", l, reply) t.Errorf("relay (length=%d): sent: %x", l, msg[:l]) } } res = <-routerCh if res.err != nil { t.Errorf("relay error", res.err) } } // Test sending malformed messages from the proxy to the router. func TestMaliciousProxyRouterRelay(t *testing.T) { router, proxy, domain, err := makeContext(1) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) routerAddr := router.proxyListener.Addr().String() ch := make(chan testResult) go runRouterHandleOneProxyMultCircuits(router, 2, []int{3, 2}, ch) fakeAddr := "127.0.0.1:0" id, err := proxy.CreateCircuit(routerAddr, fakeAddr) if err != nil { t.Error(err) } cell := make([]byte, CellBytes) binary.PutUvarint(cell[ID:], id) c := proxy.circuits[id] // Unrecognized cell type. cell[TYPE] = 0xff if _, err = c.Write(cell); err != nil { t.Error(err) } _, err = c.ReceiveMessage(id) if err == nil { t.Error("ReceiveMessage incorrectly succeeded") } // Message too long. cell[TYPE] = msgCell binary.PutUvarint(cell[BODY:], uint64(MaxMsgBytes+1)) if _, err := c.Write(cell); err != nil { t.Error(err) } _, err = c.ReceiveMessage(id) if err == nil { t.Error("ReceiveMessage incorrectly succeeded") } // Bogus destination. id, err = proxy.CreateCircuit(routerAddr, "127.0.0.1:9999") if err != nil { t.Error(err) } c = proxy.circuits[id] if err = c.SendMessage(id, []byte("Are you there?")); err != nil { t.Error(err) } _, err = c.ReceiveMessage(id) if err == nil { t.Error("Receive message incorrectly succeeded") } res := <-ch if res.err != nil { t.Error("Not expecting any router errors, but got", res.err) } // Multihop circuits not supported yet. // go runRouterHandleOneProxy(router, 1, ch) // c, err = proxy.CreateCircuit(routerAddr, "one:234", "two:34", "three:4") // if err == nil { // t.Error("should have gotten \"multi-hop circuits not implemented\" from router") // } // <-ch // c.Close() } // Test timeout on CreateMessage(). func TestCreateTimeout(t *testing.T) { router, proxy, domain, err := makeContext(2) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) routerAddr := router.proxyListener.Addr().String() ch := make(chan testResult) // The proxy should get a timeout if it's the only connecting client. go runRouterHandleProxy(router, 1, 1, ch) hostAddr := genHostname() + ":80" _, err = proxy.CreateCircuit(routerAddr, hostAddr) if err == nil { t.Errorf("proxy.CreateCircuit(%s, %s) incorrectly succeeded when it should have timed out", routerAddr, hostAddr) } res := <-ch if res.err != nil { t.Error(res.err) } } // Test timeout on ReceiveMessage(). func TestSendMessageTimeout(t *testing.T) { router, proxy, domain, err := makeContext(2) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) routerAddr := router.proxyListener.Addr().String() ch := make(chan testResult) done := make(chan bool) go runRouterHandleProxy(router, 2, 2, ch) // Proxy 1 creates a circuit, sends a message and awaits a reply. go func() { id, err := proxy.CreateCircuit(routerAddr, genHostname()+":80") if err != nil { t.Error(err) } c := proxy.circuits[id] if err = c.SendMessage(id, []byte("hello")); err != nil { t.Error(err) } if _, err = c.ReceiveMessage(id); err == nil { t.Error("receiveMessage incorrectly succeeded") } done <- true }() // Proxy 2 just creates a circuit. go func() { _, err = proxy.CreateCircuit(routerAddr, genHostname()+":80") if err != nil { t.Error(err) } done <- true }() <-done <-done } // Test mixnet end-to-end with many clients. Proxy a protocol through mixnet. // The client sends the server a message and the server echoes it back. func TestMixnetSingleHop(t *testing.T) { clientCt := 10 router, proxy, domain, err := makeContext(clientCt) if err != nil { t.Fatal(err) } proxy.Close() defer router.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) routerAddr := router.proxyListener.Addr().String() var res testResult clientCh := make(chan testResult, clientCt) proxyCh := make(chan testResult, clientCt) routerCh := make(chan testResult) dstCh := make(chan testResult, clientCt) dstAddrCh := make(chan string) go runRouterHandleProxy(router, clientCt, 3, routerCh) go runDummyServer(clientCt, 1, dstCh, dstAddrCh) dstAddr := <-dstAddrCh for i := 0; i < clientCt; i++ { go func(pid int, ch chan<- testResult) { pa := "127.0.0.1:0" proxy, err := makeProxyContext(pa, domain) if err != nil { ch <- testResult{err, nil} return } defer proxy.Close() proxyAddr := proxy.listener.Addr().String() go runSocksServerOne(proxy, routerAddr, proxyCh) msg := []byte(fmt.Sprintf("Hello, my name is %d", pid)) ch <- runSocksClient(proxyAddr, dstAddr, msg) }(i, clientCh) } // Wait for clients to finish. for i := 0; i < clientCt; i++ { res = <-clientCh if res.err != nil { t.Error(res.err) } else { t.Log("client got:", string(res.msg)) } } // Wait for proxies to finish. for i := 0; i < clientCt; i++ { res = <-proxyCh if res.err != nil { t.Error(res.err) } } // Wait for server to finish. for i := 0; i < clientCt; i++ { res = <-dstCh if res.err != nil { t.Error(res.err) } } // Wait for router to finish. res = <-routerCh if res.err != nil { t.Error("Unexpected router error:", res.err) } } adding test for multihop circuit creation // Copyright (c) 2015, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License0. package mixnet import ( "bytes" "crypto/rand" "crypto/x509/pkix" "encoding/base64" "encoding/binary" "errors" "fmt" "io" "io/ioutil" "net" "os" "path" "testing" "time" "github.com/golang/protobuf/proto" "github.com/jlmucb/cloudproxy/go/tao" ) var password = make([]byte, 32) var network = "tcp" var localAddr = "127.0.0.1:0" var timeout, _ = time.ParseDuration("1s") var configDirName = "mixnet_test_domain" var id = pkix.Name{ Organization: []string{"Mixnet tester"}, } // genHostname() generates a random hostname. func genHostname() string { rb := make([]byte, 16) rand.Read(rb) return base64.URLEncoding.EncodeToString(rb) } func makeTrivialDomain(configDir string) (*tao.Domain, error) { var policyDomainConfig tao.DomainConfig policyDomainConfig.SetDefaults() policyDomainConfig.DomainInfo.GuardType = proto.String("AllowAll") configPath := path.Join(configDir, "tao.config") return tao.CreateDomain(policyDomainConfig, configPath, password) } func makeContext(batchSize int) (*RouterContext, *ProxyContext, *tao.Domain, error) { tempDir, err := ioutil.TempDir("", configDirName) if err != nil { return nil, nil, nil, err } // Create a domain with a LiberalGuard. d, err := makeTrivialDomain(tempDir) if err != nil { return nil, nil, nil, err } // Create a SoftTao from the domain. st, err := tao.NewSoftTao(tempDir, password) if err != nil { return nil, nil, nil, err } // Create router context. This loads the domain and binds a // socket and an anddress. router, err := NewRouterContext(d.ConfigPath, network, localAddr, localAddr, batchSize, timeout, &id, st) if err != nil { return nil, nil, nil, err } // Create a proxy context. This just loads the domain. proxy, err := NewProxyContext(d.ConfigPath, network, localAddr, timeout) if err != nil { router.Close() return nil, nil, nil, err } return router, proxy, d, nil } func makeProxyContext(proxyAddr string, domain *tao.Domain) (*ProxyContext, error) { // Create a proxy context. This just loads the domain. proxy, err := NewProxyContext(domain.ConfigPath, network, proxyAddr, timeout) if err != nil { return nil, err } return proxy, nil } type testResult struct { err error msg []byte } // Router accepts a connection from a proxy and reads a cell. func runRouterReadCell(router *RouterContext, ch chan<- testResult) { c, err := router.AcceptProxy() if err != nil { ch <- testResult{err, []byte{}} return } cell := make([]byte, CellBytes) if _, err := c.Read(cell); err != nil { ch <- testResult{err, cell} } else { ch <- testResult{nil, cell} } } // Proxy dials a router and sends a cell. func runProxyWriteCell(proxy *ProxyContext, addr string, msg []byte) error { c, err := proxy.DialRouter(network, addr) if err != nil { return err } if _, err := c.Write(msg); err != nil { return err } return nil } // Router accepts a connection from a proxy and handles a number of // requests. func runRouterHandleOneProxy(router *RouterContext, requestCount int, ch chan<- testResult) { _, err := router.AcceptProxy() if err != nil { ch <- testResult{err, []byte{}} return } for i := 0; i < requestCount; i++ { if err = <-router.errs; err != nil { ch <- testResult{err, nil} } } ch <- testResult{nil, nil} } // Router accepts a connection from a proxy and handles a number of // requests. func runRouterHandleOneRouter(router *RouterContext, requestCount int, ch chan<- testResult) { _, err := router.AcceptRouter() if err != nil { ch <- testResult{err, []byte{}} return } for i := 0; i < requestCount; i++ { if err = <-router.errs; err != nil { ch <- testResult{err, nil} } } ch <- testResult{nil, nil} } // Router accepts a connection from a proxy with multiple circuits func runRouterHandleOneProxyMultCircuits(router *RouterContext, numCircuits int, requestCounts []int, ch chan<- testResult) { _, err := router.AcceptProxy() if err != nil { ch <- testResult{err, []byte{}} return } for circ := 0; circ < numCircuits; circ++ { for i := 0; i < requestCounts[circ]; i++ { if err = <-router.errs; err != nil { ch <- testResult{err, nil} } } } ch <- testResult{nil, nil} } func runRouterHandleProxy(router *RouterContext, clientCt, requestCt int, ch chan<- testResult) { for i := 0; i < clientCt; i++ { _, err := router.AcceptProxy() if err != nil { ch <- testResult{err, []byte{}} return } } for i := 0; i < clientCt*requestCt; i++ { if err := <-router.errs; err != nil { ch <- testResult{err, nil} } } ch <- testResult{nil, nil} } // Proxy dials a router, creates a circuit, and sends a message over // the circuit. func runProxySendMessage(proxy *ProxyContext, rAddr, dAddr string, msg []byte) ([]byte, error) { id, err := proxy.CreateCircuit(rAddr, dAddr) if err != nil { return nil, err } c := proxy.circuits[id] if err = c.SendMessage(id, msg); err != nil { return nil, err } // dummyServer receives one message and replies. Without this line, // the router will report a broken pipe. msg, err = c.ReceiveMessage(id) return msg, err } // Test connection set up. func TestProxyRouterConnect(t *testing.T) { router, proxy, domain, err := makeContext(1) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) routerAddr := router.proxyListener.Addr().String() // Wait for a connection from the proxy. ch := make(chan bool) go func(ch chan<- bool) { c, _ := router.AcceptProxy() defer c.Close() ch <- true }(ch) c, err := proxy.DialRouter(network, routerAddr) if err != nil { router.Close() t.Fatal(err) } defer c.Close() <-ch } // Test CREATE and DESTROY. func TestCreateDestroy(t *testing.T) { router, proxy, domain, err := makeContext(1) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) rAddr := router.proxyListener.Addr().String() // The address doesn't matter here because no packets will be sent on // the established circuit. fakeAddr := "127.0.0.1:0" ch := make(chan testResult) go runRouterHandleOneProxy(router, 3, ch) id, err := proxy.CreateCircuit(rAddr, fakeAddr) if err != nil { t.Error("Error creating circuit:", err) } c := proxy.circuits[id] if err = c.SendMessage(id, []byte("hola!")); err != nil { t.Error("Error sending message:", err) } // (kwonalbert) Actually receive the error message first; // This gets rid of a race condition between error sending and destroying connection if _, err = c.ReceiveMessage(id); err == nil { t.Error("Expecting cannot establish connection error") } if err = proxy.DestroyCircuit(id); err != nil { t.Error("Error destroying circuit:", err) } res := <-ch if res.err != nil { t.Error("Unexpected router error:", res.err) } if len(router.conns) != 0 { t.Error("Expecting 0 connections, but have", len(router.conns)) } } func TestCreateDestroyMultiHop(t *testing.T) { router1, proxy, domain, err := makeContext(1) if err != nil { t.Fatal(err) } router2, proxy2, _, err := makeContext(1) if err != nil { t.Fatal(err) } proxy2.Close() defer router1.Close() defer router2.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) rAddr1 := router1.proxyListener.Addr().String() rAddr2 := router2.routerListener.Addr().String() // The address doesn't matter here because no packets will be sent on // the established circuit. fakeAddr := "127.0.0.1:0" ch1 := make(chan testResult) ch2 := make(chan testResult) go runRouterHandleOneProxy(router1, 6, ch1) go runRouterHandleOneRouter(router2, 3, ch2) id, err := proxy.CreateCircuit(rAddr1, rAddr2, fakeAddr) if err != nil { t.Error(err) } c := proxy.circuits[id] if err = c.SendMessage(id, []byte("hola!")); err != nil { t.Error(err) } // (kwonalbert) Actually receive the error message first; // This gets rid of a race condition between error sending and destroying connection if _, err = c.ReceiveMessage(id); err == nil { t.Error(errors.New("Expecting cannot establish connection error")) } if err = proxy.DestroyCircuit(id); err != nil { t.Error(err) } res := <-ch1 if res.err != nil { t.Error("Unexpected router error:", res.err) } res = <-ch2 if res.err != nil { t.Error("Unexpected router error:", res.err) } if len(router1.conns) != 0 { t.Error("Expecting 0 connections, but have", len(router1.conns)) } else if len(router2.conns) != 0 { t.Error("Expecting 0 connections, but have", len(router2.conns)) } } // Test multiplexing for proxy func TestMultiplexProxyCircuit(t *testing.T) { router, proxy, domain, err := makeContext(1) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) rAddr := router.proxyListener.Addr().String() // The address doesn't matter here because no packets will be sent on // the established circuit. ch := make(chan testResult) clientCt := 2 numReqs := make([]int, clientCt) fakeAddrs := make([]string, clientCt) ids := make([]uint64, clientCt) for i := range numReqs { numReqs[i] = 2 fakeAddrs[i] = fmt.Sprintf("127.0.0.1:%d", -i) } go runRouterHandleOneProxyMultCircuits(router, clientCt, numReqs, ch) for i := range numReqs { ids[i], err = proxy.CreateCircuit(rAddr, fakeAddrs[i]) if err != nil { t.Error("Couldn't create circuit:", err) } } unique := make(map[*Conn]bool) for _, conn := range proxy.circuits { unique[conn] = true } if len(unique) != 1 { t.Error(errors.New("Should only have one connection")) } for i := range numReqs { err = proxy.DestroyCircuit(ids[i]) if err != nil { t.Error("Couldn't destroy circuit:", err) } } res := <-ch if res.err != nil { t.Error("Unexpected router error:", res.err) } } // Test sending a cell. func TestProxyRouterCell(t *testing.T) { router, proxy, domain, err := makeContext(1) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) ch := make(chan testResult) cell := make([]byte, CellBytes+1) for i := 0; i < len(cell); i++ { cell[i] = byte(i) } // This cell is just right. go runRouterReadCell(router, ch) if err = runProxyWriteCell(proxy, router.proxyListener.Addr().String(), cell[:CellBytes]); err != nil { t.Error(err) } res := <-ch if res.err != nil && res.err != io.EOF { t.Error(res.err) } else if bytes.Compare(res.msg, cell[:CellBytes]) != 0 { t.Errorf("Server got: %s", res.msg) } // This cell is too big. go runRouterReadCell(router, ch) if err = runProxyWriteCell(proxy, router.proxyListener.Addr().String(), cell); err != errCellLength { t.Error("runProxyWriteCell(): should have returned errCellLength") } res = <-ch if err := res.err.(net.Error); !err.Timeout() { t.Error("runRouterReadCell(): should have timed out") } } // Test setting up a circuit and relay a message to destination. Try // messages of various lengths. func TestProxyRouterRelay(t *testing.T) { router, proxy, domain, err := makeContext(1) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) routerCh := make(chan testResult) dstCh := make(chan testResult) dstAddrCh := make(chan string) // Create a long message. msg := make([]byte, (CellBytes*5)+237) for i := 0; i < len(msg); i++ { msg[i] = byte(i) } var res testResult trials := []int{ 37, // A short message CellBytes - (BODY + 8), // A cell len(msg), // A long message } go runDummyServer(len(trials), 1, dstCh, dstAddrCh) dstAddr := <-dstAddrCh rAddr := router.proxyListener.Addr().String() // First two messages fits in one cell, the last one is over multiple cells // Funky counting, since the cells contain some meta data.. longReqCt := 1 + ((len(msg)-(CellBytes-(BODY+8)))/(CellBytes-BODY) + 1) reqCts := []int{2, 2, longReqCt + 1} go runRouterHandleOneProxyMultCircuits(router, len(trials), reqCts, routerCh) for _, l := range trials { reply, err := runProxySendMessage(proxy, rAddr, dstAddr, msg[:l]) if err != nil { t.Errorf("relay (length=%d): %s", l, err) } res = <-dstCh if res.err != nil { t.Error(res.err) } else if bytes.Compare(reply, msg[:l]) != 0 { t.Errorf("relay (length=%d): received: %v", l, reply) t.Errorf("relay (length=%d): sent: %x", l, msg[:l]) } } res = <-routerCh if res.err != nil { t.Errorf("relay error", res.err) } } // Test sending malformed messages from the proxy to the router. func TestMaliciousProxyRouterRelay(t *testing.T) { router, proxy, domain, err := makeContext(1) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) routerAddr := router.proxyListener.Addr().String() ch := make(chan testResult) go runRouterHandleOneProxyMultCircuits(router, 2, []int{3, 2}, ch) fakeAddr := "127.0.0.1:0" id, err := proxy.CreateCircuit(routerAddr, fakeAddr) if err != nil { t.Error(err) } cell := make([]byte, CellBytes) binary.LittleEndian.PutUint64(cell[ID:], id) c := proxy.circuits[id] // Unrecognized cell type. cell[TYPE] = 0xff if _, err = c.Write(cell); err != nil { t.Error(err) } _, err = c.ReceiveMessage(id) if err == nil { t.Error("ReceiveMessage incorrectly succeeded") } // Message too long. cell[TYPE] = msgCell binary.LittleEndian.PutUint64(cell[BODY:], uint64(MaxMsgBytes+1)) if _, err := c.Write(cell); err != nil { t.Error(err) } _, err = c.ReceiveMessage(id) if err == nil { t.Error("ReceiveMessage incorrectly succeeded") } // Bogus destination. id, err = proxy.CreateCircuit(routerAddr, "127.0.0.1:9999") if err != nil { t.Error(err) } c = proxy.circuits[id] if err = c.SendMessage(id, []byte("Are you there?")); err != nil { t.Error(err) } _, err = c.ReceiveMessage(id) if err == nil { t.Error("Receive message incorrectly succeeded") } res := <-ch if res.err != nil { t.Error("Not expecting any router errors, but got", res.err) } } // Test timeout on CreateMessage(). func TestCreateTimeout(t *testing.T) { router, proxy, domain, err := makeContext(2) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) routerAddr := router.proxyListener.Addr().String() ch := make(chan testResult) // The proxy should get a timeout if it's the only connecting client. go runRouterHandleProxy(router, 1, 1, ch) hostAddr := genHostname() + ":80" _, err = proxy.CreateCircuit(routerAddr, hostAddr) if err == nil { t.Errorf("proxy.CreateCircuit(%s, %s) incorrectly succeeded when it should have timed out", routerAddr, hostAddr) } res := <-ch if res.err != nil { t.Error(res.err) } } // Test timeout on ReceiveMessage(). func TestSendMessageTimeout(t *testing.T) { router, proxy, domain, err := makeContext(2) if err != nil { t.Fatal(err) } defer router.Close() defer proxy.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) routerAddr := router.proxyListener.Addr().String() ch := make(chan testResult) done := make(chan bool) go runRouterHandleProxy(router, 2, 2, ch) // Proxy 1 creates a circuit, sends a message and awaits a reply. go func() { id, err := proxy.CreateCircuit(routerAddr, genHostname()+":80") if err != nil { t.Error(err) } c := proxy.circuits[id] if err = c.SendMessage(id, []byte("hello")); err != nil { t.Error(err) } if _, err = c.ReceiveMessage(id); err == nil { t.Error("receiveMessage incorrectly succeeded") } done <- true }() // Proxy 2 just creates a circuit. go func() { _, err = proxy.CreateCircuit(routerAddr, genHostname()+":80") if err != nil { t.Error(err) } done <- true }() <-done <-done } // Test mixnet end-to-end with many clients. Proxy a protocol through mixnet. // The client sends the server a message and the server echoes it back. func TestMixnetSingleHop(t *testing.T) { clientCt := 10 router, proxy, domain, err := makeContext(clientCt) if err != nil { t.Fatal(err) } proxy.Close() defer router.Close() defer os.RemoveAll(path.Base(domain.ConfigPath)) routerAddr := router.proxyListener.Addr().String() var res testResult clientCh := make(chan testResult, clientCt) proxyCh := make(chan testResult, clientCt) routerCh := make(chan testResult) dstCh := make(chan testResult, clientCt) dstAddrCh := make(chan string) go runRouterHandleProxy(router, clientCt, 3, routerCh) go runDummyServer(clientCt, 1, dstCh, dstAddrCh) dstAddr := <-dstAddrCh for i := 0; i < clientCt; i++ { go func(pid int, ch chan<- testResult) { pa := "127.0.0.1:0" proxy, err := makeProxyContext(pa, domain) if err != nil { ch <- testResult{err, nil} return } defer proxy.Close() proxyAddr := proxy.listener.Addr().String() go runSocksServerOne(proxy, routerAddr, proxyCh) msg := []byte(fmt.Sprintf("Hello, my name is %d", pid)) ch <- runSocksClient(proxyAddr, dstAddr, msg) }(i, clientCh) } // Wait for clients to finish. for i := 0; i < clientCt; i++ { res = <-clientCh if res.err != nil { t.Error(res.err) } else { t.Log("client got:", string(res.msg)) } } // Wait for proxies to finish. for i := 0; i < clientCt; i++ { res = <-proxyCh if res.err != nil { t.Error(res.err) } } // Wait for server to finish. for i := 0; i < clientCt; i++ { res = <-dstCh if res.err != nil { t.Error(res.err) } } // Wait for router to finish. res = <-routerCh if res.err != nil { t.Error("Unexpected router error:", res.err) } }
package chat import ( "bytes" "errors" "image" "image/color/palette" "image/draw" "image/gif" "image/jpeg" "io" "golang.org/x/net/context" "github.com/keybase/client/go/protocol/keybase1" "github.com/nfnt/resize" "camlistore.org/pkg/images" ) const ( previewImageWidth = 320 previewImageHeight = 320 ) type BufferSource struct { buf *bytes.Buffer basename string } func newBufferSource(buf *bytes.Buffer, basename string) *BufferSource { return &BufferSource{ buf: buf, basename: basename, } } func (b *BufferSource) Basename() string { return b.basename } func (b *BufferSource) FileSize() int { return b.buf.Len() } func (b *BufferSource) Open(sessionID int, cli *keybase1.StreamUiClient) (ReadResetter, error) { if b.buf == nil { return nil, errors.New("nil buf in BufferSource") } return newBufReadResetter(b.buf.Bytes()), nil } func (b *BufferSource) Close() error { b.buf.Reset() return nil } type bufReadResetter struct { buf []byte r *bytes.Reader } func newBufReadResetter(buf []byte) *bufReadResetter { return &bufReadResetter{ buf: buf, r: bytes.NewReader(buf), } } func (b *bufReadResetter) Read(p []byte) (int, error) { return b.r.Read(p) } func (b *bufReadResetter) Reset() error { b.r.Reset(b.buf) return nil } type PreviewRes struct { Source *BufferSource ContentType string BaseWidth int BaseHeight int PreviewWidth int PreviewHeight int } // Preview creates preview assets from src. It returns an in-memory BufferSource // and the content type of the preview asset. func Preview(ctx context.Context, src io.Reader, contentType, basename string) (*PreviewRes, error) { switch contentType { case "image/jpeg", "image/png": return previewImage(ctx, src, basename) case "image/gif": return previewGIF(ctx, src, basename) } return nil, nil } // previewImage will resize a single-frame image into a jpeg. func previewImage(ctx context.Context, src io.Reader, basename string) (*PreviewRes, error) { // images.Decode in camlistore correctly handles exif orientation information. img, _, err := images.Decode(src, nil) if err != nil { return nil, err } width, height := previewDimensions(img.Bounds()) // nfnt/resize with NearestNeighbor is the fastest I've found. preview := resize.Resize(width, height, img, resize.NearestNeighbor) var buf bytes.Buffer if err := jpeg.Encode(&buf, preview, nil); err != nil { return nil, err } return &PreviewRes{ Source: newBufferSource(&buf, basename), ContentType: "image/jpeg", BaseWidth: img.Bounds().Dx(), BaseHeight: img.Bounds().Dy(), PreviewWidth: int(width), PreviewHeight: int(height), }, nil } // previewGIF handles resizing multiple frames in an animated gif. // Based on code in https://github.com/dpup/go-scratch/blob/master/gif-resize/gif-resize.go func previewGIF(ctx context.Context, src io.Reader, basename string) (*PreviewRes, error) { g, err := gif.DecodeAll(src) if err != nil { return nil, err } if len(g.Image) == 0 { return nil, errors.New("no image frames in GIF") } // create a new image based on the first frame to draw // the incremental frames origBounds := g.Image[0].Bounds() img := image.NewRGBA(origBounds) // draw each frame, then resize it, replacing the existing frames. width, height := previewDimensions(origBounds) for index, frame := range g.Image { bounds := frame.Bounds() draw.Draw(img, bounds, frame, bounds.Min, draw.Over) g.Image[index] = imageToPaletted(resize.Resize(width, height, img, resize.NearestNeighbor)) } // change the image Config to the new size g.Config.Width = int(width) g.Config.Height = int(height) // encode all the frames into buf var buf bytes.Buffer if err := gif.EncodeAll(&buf, g); err != nil { return nil, err } return &PreviewRes{ Source: newBufferSource(&buf, basename), ContentType: "image/gif", BaseWidth: origBounds.Dx(), BaseHeight: origBounds.Dy(), PreviewWidth: int(width), PreviewHeight: int(height), }, nil } func previewDimensions(origBounds image.Rectangle) (uint, uint) { origWidth := uint(origBounds.Dx()) origHeight := uint(origBounds.Dy()) if previewImageWidth >= origWidth && previewImageHeight >= origHeight { return origWidth, origHeight } newWidth, newHeight := origWidth, origHeight // Preserve aspect ratio if origWidth > previewImageWidth { newHeight = uint(origHeight * previewImageWidth / origWidth) if newHeight < 1 { newHeight = 1 } newWidth = previewImageWidth } if newHeight > previewImageHeight { newWidth = uint(newWidth * previewImageHeight / newHeight) if newWidth < 1 { newWidth = 1 } newHeight = previewImageHeight } return newWidth, newHeight } // imageToPaletted converts image.Image to *image.Paletted. // From https://github.com/dpup/go-scratch/blob/master/gif-resize/gif-resize.go func imageToPaletted(img image.Image) *image.Paletted { b := img.Bounds() pm := image.NewPaletted(b, palette.Plan9) draw.FloydSteinberg.Draw(pm, b, img, image.ZP) return pm } Resize tweaks, 640 package chat import ( "bytes" "errors" "image" "image/color/palette" "image/draw" "image/gif" "image/jpeg" "image/png" "io" "golang.org/x/net/context" "github.com/keybase/client/go/protocol/keybase1" "github.com/nfnt/resize" "camlistore.org/pkg/images" ) const ( previewImageWidth = 640 previewImageHeight = 640 ) type BufferSource struct { buf *bytes.Buffer basename string } func newBufferSource(buf *bytes.Buffer, basename string) *BufferSource { return &BufferSource{ buf: buf, basename: basename, } } func (b *BufferSource) Basename() string { return b.basename } func (b *BufferSource) FileSize() int { return b.buf.Len() } func (b *BufferSource) Open(sessionID int, cli *keybase1.StreamUiClient) (ReadResetter, error) { if b.buf == nil { return nil, errors.New("nil buf in BufferSource") } return newBufReadResetter(b.buf.Bytes()), nil } func (b *BufferSource) Close() error { b.buf.Reset() return nil } type bufReadResetter struct { buf []byte r *bytes.Reader } func newBufReadResetter(buf []byte) *bufReadResetter { return &bufReadResetter{ buf: buf, r: bytes.NewReader(buf), } } func (b *bufReadResetter) Read(p []byte) (int, error) { return b.r.Read(p) } func (b *bufReadResetter) Reset() error { b.r.Reset(b.buf) return nil } type PreviewRes struct { Source *BufferSource ContentType string BaseWidth int BaseHeight int PreviewWidth int PreviewHeight int } // Preview creates preview assets from src. It returns an in-memory BufferSource // and the content type of the preview asset. func Preview(ctx context.Context, src io.Reader, contentType, basename string) (*PreviewRes, error) { switch contentType { case "image/jpeg", "image/png": return previewImage(ctx, src, basename, contentType) case "image/gif": return previewGIF(ctx, src, basename) } return nil, nil } // previewImage will resize a single-frame image. func previewImage(ctx context.Context, src io.Reader, basename, contentType string) (*PreviewRes, error) { // images.Decode in camlistore correctly handles exif orientation information. img, _, err := images.Decode(src, nil) if err != nil { return nil, err } width, height := previewDimensions(img.Bounds()) // nfnt/resize with NearestNeighbor is the fastest I've found. preview := resize.Resize(width, height, img, resize.Bicubic) var buf bytes.Buffer var encodeContentType string if contentType == "image/png" { encodeContentType = "image/png" if err := png.Encode(&buf, preview); err != nil { return nil, err } } else { encodeContentType = "image/jpeg" if err := jpeg.Encode(&buf, preview, &jpeg.Options{Quality: 90}); err != nil { return nil, err } } return &PreviewRes{ Source: newBufferSource(&buf, basename), ContentType: encodeContentType, BaseWidth: img.Bounds().Dx(), BaseHeight: img.Bounds().Dy(), PreviewWidth: int(width), PreviewHeight: int(height), }, nil } // previewGIF handles resizing multiple frames in an animated gif. // Based on code in https://github.com/dpup/go-scratch/blob/master/gif-resize/gif-resize.go func previewGIF(ctx context.Context, src io.Reader, basename string) (*PreviewRes, error) { g, err := gif.DecodeAll(src) if err != nil { return nil, err } if len(g.Image) == 0 { return nil, errors.New("no image frames in GIF") } // create a new image based on the first frame to draw // the incremental frames origBounds := g.Image[0].Bounds() img := image.NewRGBA(origBounds) // draw each frame, then resize it, replacing the existing frames. width, height := previewDimensions(origBounds) for index, frame := range g.Image { bounds := frame.Bounds() draw.Draw(img, bounds, frame, bounds.Min, draw.Over) g.Image[index] = imageToPaletted(resize.Resize(width, height, img, resize.Bicubic)) } // change the image Config to the new size g.Config.Width = int(width) g.Config.Height = int(height) // encode all the frames into buf var buf bytes.Buffer if err := gif.EncodeAll(&buf, g); err != nil { return nil, err } return &PreviewRes{ Source: newBufferSource(&buf, basename), ContentType: "image/gif", BaseWidth: origBounds.Dx(), BaseHeight: origBounds.Dy(), PreviewWidth: int(width), PreviewHeight: int(height), }, nil } func previewDimensions(origBounds image.Rectangle) (uint, uint) { origWidth := uint(origBounds.Dx()) origHeight := uint(origBounds.Dy()) if previewImageWidth >= origWidth && previewImageHeight >= origHeight { return origWidth, origHeight } newWidth, newHeight := origWidth, origHeight // Preserve aspect ratio if origWidth > previewImageWidth { newHeight = uint(origHeight * previewImageWidth / origWidth) if newHeight < 1 { newHeight = 1 } newWidth = previewImageWidth } if newHeight > previewImageHeight { newWidth = uint(newWidth * previewImageHeight / newHeight) if newWidth < 1 { newWidth = 1 } newHeight = previewImageHeight } return newWidth, newHeight } // imageToPaletted converts image.Image to *image.Paletted. // From https://github.com/dpup/go-scratch/blob/master/gif-resize/gif-resize.go func imageToPaletted(img image.Image) *image.Paletted { b := img.Bounds() pm := image.NewPaletted(b, palette.Plan9) draw.FloydSteinberg.Draw(pm, b, img, image.ZP) return pm }
package oskite import ( "encoding/json" "errors" "fmt" "io/ioutil" "koding/db/models" "koding/tools/dnode" "koding/tools/kite" "koding/virt" "net/http" "labix.org/v2/mgo" "labix.org/v2/mgo/bson" ) const ( ErrTokenRequired = "TOKEN_REQUIRED" ErrUsernameRequired = "USERNAME_REQUIRED" ErrGroupRequired = "GROUPNAME_REQUIRED" ErrKiteNotFound = "KITE_NOT_FOUND" ErrUserNotFound = "USER_NOT_FOUND" ErrGroupNotFound = "GROUP_NOT_FOUND" ErrInvalid = "NOT_A_MEMBER_OF_GROUP" ErrKiteNoPlan = "KITE_HAS_NO_PLAN" ErrNoSubscription = "NO_SUBSCRIPTION" ) type Plan struct { CPU int `json:"cpu"` RAM int `json:"ram"` // Memory usage in MB Disk int `json:"disk"` // Disk in MB TotalVMs int `json:"totalVMs"` AlwaysOnVMs int `json:"alwaysOnVMs"` } type PlanResponse struct { CPU string `json:"cpu"` RAM string `json:"ram"` // Memory usage in MB Disk string `json:"disk"` // Disk in MB TotalVMs string `json:"totalVMs"` AlwaysOnVMs string `json:"alwaysOnVMs"` } type subscriptionResp struct { Plan string `json:"plan"` PlanId string `json:"planId"` Err string `json:"err"` } var ( plans = map[string]Plan{ "free": {CPU: 1, RAM: 1000, Disk: 3000, TotalVMs: 1, AlwaysOnVMs: 0}, "1x": {CPU: 2, RAM: 2000, Disk: 10000, TotalVMs: 2, AlwaysOnVMs: 1}, "2x": {CPU: 4, RAM: 4000, Disk: 20000, TotalVMs: 4, AlwaysOnVMs: 2}, "3x": {CPU: 6, RAM: 6000, Disk: 40000, TotalVMs: 6, AlwaysOnVMs: 3}, "4x": {CPU: 8, RAM: 8000, Disk: 80000, TotalVMs: 8, AlwaysOnVMs: 4}, "5x": {CPU: 10, RAM: 10000, Disk: 100000, TotalVMs: 10, AlwaysOnVMs: 5}, } okString = "ok" quotaExceeded = "quota exceeded." ) func NewPlanResponse() *PlanResponse { return &PlanResponse{ CPU: okString, RAM: okString, Disk: okString, TotalVMs: okString, AlwaysOnVMs: okString, } } func NewUsage(vos *virt.VOS) (*Plan, error) { vms := make([]*models.VM, 0) query := func(c *mgo.Collection) error { return c.Find(bson.M{"webHome": vos.VM.WebHome}).Iter().All(&vms) } err := mongodbConn.Run("jVMs", query) if err != nil { return nil, fmt.Errorf("vm fetching err for user %s. err: %s", vos.VM.WebHome, err) } usage := new(Plan) usage.TotalVMs = len(vms) for _, vm := range vms { if vm.AlwaysOn { usage.AlwaysOnVMs++ } usage.CPU += vm.NumCPUs usage.RAM += vm.MaxMemoryInMB usage.Disk += vm.DiskSizeInMB } return usage, nil } func (p *Plan) checkLimits(username, groupname string) (*PlanResponse, error) { planID, err := getSubscription(username, groupname) if err != nil { log.Critical("oskite checkLimits err: %v", err) return nil, errors.New("couldn't fetch subscription") } plan, ok := plans[planID] if !ok { return nil, errors.New("plan doesn't exist") } resp := NewPlanResponse() if p.AlwaysOnVMs >= plan.AlwaysOnVMs { resp.AlwaysOnVMs = quotaExceeded } if p.TotalVMs >= plan.TotalVMs { resp.TotalVMs = quotaExceeded } return resp, nil } func vmUsage(args *dnode.Partial, vos *virt.VOS, username string) (interface{}, error) { var params struct { GroupName string } if args == nil { return nil, &kite.ArgumentError{Expected: "empy argument passed"} } if args.Unmarshal(&params) != nil || params.GroupName == "" { return nil, &kite.ArgumentError{Expected: "{ groupName: [string] }"} } usage, err := NewUsage(vos) if err != nil { log.Info("vm.usage [%s] err: %v", vos.VM.HostnameAlias, err) return nil, errors.New("vm.usage couldn't be retrieved. please consult to support.") } return usage.checkLimits(username, params.GroupName) } type KiteStore struct { Id bson.ObjectId `bson:"_id"` Name string `bson:"name"` Description string `bson:"description"` KiteCode string `bson:"kiteCode"` } func getKiteCode() (string, error) { kiteStore := new(KiteStore) query := func(c *mgo.Collection) error { return c.Find(bson.M{"name": OSKITE_NAME}).One(kiteStore) } err := mongodbConn.Run("jKites", query) if err != nil { return "", err } return kiteStore.KiteCode, nil } func getSubscription(username, groupname string) (string, error) { // TODO: get it once code, err := getKiteCode() if err != nil { return "", err } if code == "" { return "", errors.New("kite code is empty") } resp, err := http.Get(conf.SubscriptionEndpoint + code + "/" + username + "/" + groupname) if err != nil { return "", err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } var s = new(subscriptionResp) if err := json.Unmarshal(body, s); err != nil { return "", errors.New("Subscription data is malformed") } if resp.StatusCode != 200 { if s.Err != "" { return "", errors.New(s.Err) } return "", errors.New("api not allowed") } return s.Plan, nil } oskite/usage: cache kitecode package oskite import ( "encoding/json" "errors" "fmt" "io/ioutil" "koding/db/models" "koding/tools/dnode" "koding/tools/kite" "koding/virt" "net/http" "labix.org/v2/mgo" "labix.org/v2/mgo/bson" ) const ( ErrTokenRequired = "TOKEN_REQUIRED" ErrUsernameRequired = "USERNAME_REQUIRED" ErrGroupRequired = "GROUPNAME_REQUIRED" ErrKiteNotFound = "KITE_NOT_FOUND" ErrUserNotFound = "USER_NOT_FOUND" ErrGroupNotFound = "GROUP_NOT_FOUND" ErrInvalid = "NOT_A_MEMBER_OF_GROUP" ErrKiteNoPlan = "KITE_HAS_NO_PLAN" ErrNoSubscription = "NO_SUBSCRIPTION" ) type Plan struct { CPU int `json:"cpu"` RAM int `json:"ram"` // Memory usage in MB Disk int `json:"disk"` // Disk in MB TotalVMs int `json:"totalVMs"` AlwaysOnVMs int `json:"alwaysOnVMs"` } type PlanResponse struct { CPU string `json:"cpu"` RAM string `json:"ram"` // Memory usage in MB Disk string `json:"disk"` // Disk in MB TotalVMs string `json:"totalVMs"` AlwaysOnVMs string `json:"alwaysOnVMs"` } type subscriptionResp struct { Plan string `json:"plan"` PlanId string `json:"planId"` Err string `json:"err"` } var ( plans = map[string]Plan{ "free": {CPU: 1, RAM: 1000, Disk: 3000, TotalVMs: 1, AlwaysOnVMs: 0}, "1x": {CPU: 2, RAM: 2000, Disk: 10000, TotalVMs: 2, AlwaysOnVMs: 1}, "2x": {CPU: 4, RAM: 4000, Disk: 20000, TotalVMs: 4, AlwaysOnVMs: 2}, "3x": {CPU: 6, RAM: 6000, Disk: 40000, TotalVMs: 6, AlwaysOnVMs: 3}, "4x": {CPU: 8, RAM: 8000, Disk: 80000, TotalVMs: 8, AlwaysOnVMs: 4}, "5x": {CPU: 10, RAM: 10000, Disk: 100000, TotalVMs: 10, AlwaysOnVMs: 5}, } okString = "ok" quotaExceeded = "quota exceeded." kiteCode string ) func NewPlanResponse() *PlanResponse { return &PlanResponse{ CPU: okString, RAM: okString, Disk: okString, TotalVMs: okString, AlwaysOnVMs: okString, } } func NewUsage(vos *virt.VOS) (*Plan, error) { vms := make([]*models.VM, 0) query := func(c *mgo.Collection) error { return c.Find(bson.M{"webHome": vos.VM.WebHome}).Iter().All(&vms) } err := mongodbConn.Run("jVMs", query) if err != nil { return nil, fmt.Errorf("vm fetching err for user %s. err: %s", vos.VM.WebHome, err) } usage := new(Plan) usage.TotalVMs = len(vms) for _, vm := range vms { if vm.AlwaysOn { usage.AlwaysOnVMs++ } usage.CPU += vm.NumCPUs usage.RAM += vm.MaxMemoryInMB usage.Disk += vm.DiskSizeInMB } return usage, nil } func (p *Plan) checkLimits(username, groupname string) (*PlanResponse, error) { planID, err := getSubscription(username, groupname) if err != nil { log.Critical("oskite checkLimits err: %v", err) return nil, errors.New("couldn't fetch subscription") } plan, ok := plans[planID] if !ok { return nil, errors.New("plan doesn't exist") } resp := NewPlanResponse() if p.AlwaysOnVMs >= plan.AlwaysOnVMs { resp.AlwaysOnVMs = quotaExceeded } if p.TotalVMs >= plan.TotalVMs { resp.TotalVMs = quotaExceeded } return resp, nil } func vmUsage(args *dnode.Partial, vos *virt.VOS, username string) (interface{}, error) { var params struct { GroupName string } if args == nil { return nil, &kite.ArgumentError{Expected: "empy argument passed"} } if args.Unmarshal(&params) != nil || params.GroupName == "" { return nil, &kite.ArgumentError{Expected: "{ groupName: [string] }"} } usage, err := NewUsage(vos) if err != nil { log.Info("vm.usage [%s] err: %v", vos.VM.HostnameAlias, err) return nil, errors.New("vm.usage couldn't be retrieved. please consult to support.") } return usage.checkLimits(username, params.GroupName) } type KiteStore struct { Id bson.ObjectId `bson:"_id"` Name string `bson:"name"` Description string `bson:"description"` KiteCode string `bson:"kiteCode"` } // getKiteCode returns the API token to be used with Koding's subscription // endpoint. func getKiteCode() (string, error) { if kiteCode != "" { return kiteCode, nil } kiteStore := new(KiteStore) query := func(c *mgo.Collection) error { return c.Find(bson.M{"name": OSKITE_NAME}).One(kiteStore) } err := mongodbConn.Run("jKites", query) if err != nil { return "", err } kiteCode = kiteStore.KiteCode return kiteStore.KiteCode, nil } func getSubscription(username, groupname string) (string, error) { // TODO: get it once code, err := getKiteCode() if err != nil { return "", err } if code == "" { return "", errors.New("kite code is empty") } endpointURL := conf.SubscriptionEndpoint + code + "/" + username + "/" + groupname resp, err := http.Get(endpointURL) if err != nil { return "", err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } var s = new(subscriptionResp) if err := json.Unmarshal(body, s); err != nil { return "", errors.New("Subscription data is malformed") } if resp.StatusCode != 200 { if s.Err != "" { return "", errors.New(s.Err) } return "", errors.New("api not allowed") } return s.Plan, nil }
package systests import ( "fmt" "testing" "golang.org/x/net/context" keybase1 "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/client/go/teams" "github.com/stretchr/testify/require" ) func findMember(user *smuUser, members []keybase1.TeamMemberDetails) *keybase1.TeamMemberDetails { for _, member := range members { if member.Username == user.username { return &member } } return nil } func TestTeamList(t *testing.T) { ctx := newSMUContext(t) defer ctx.cleanup() // Step 1 - create the initial team with mix of normal members, // reset members, pukless users, social invites etc. ann := ctx.installKeybaseForUser("ann", 10) ann.signup() t.Logf("Signed up ann (%s)", ann.username) bob := ctx.installKeybaseForUser("bob", 10) bob.signup() t.Logf("Signed up bob (%s)", bob.username) pam := ctx.installKeybaseForUser("pam", 10) pam.signup() t.Logf("Signed up pam (%s)", pam.username) john := ctx.installKeybaseForUser("john", 10) john.signupNoPUK() t.Logf("Signed up PUK-less user john (%s)", john.username) ed := ctx.installKeybaseForUser("ed", 10) ed.signup() ed.reset() ed.loginAfterResetNoPUK(10) t.Logf("Signed up ed (%s), reset, and reprovisioned without PUK", ed.username) team := ann.createTeam([]*smuUser{bob, pam}) t.Logf("Team created (%s)", team.name) pam.reset() t.Logf("Pam resets (%s)", pam.username) ann.addWriter(team, john) t.Logf("Adding john (%s)", john.username) ann.addWriter(team, ed) t.Logf("Adding ed (%s)", ed.username) teamCli := ann.getTeamsClient() rootername := randomUser("arbitrary").username _, err := teamCli.TeamAddMember(context.TODO(), keybase1.TeamAddMemberArg{ Name: team.name, Username: rootername + "@rooter", Role: keybase1.TeamRole_WRITER, }) require.NoError(t, err) t.Logf("Added rooter (%s@rooter)", rootername) // Examine results from TeamGet details, err := teamCli.TeamGet(context.TODO(), keybase1.TeamGetArg{ Name: team.name, ForceRepoll: true, }) require.NoError(t, err) require.Equal(t, 1, len(details.Members.Owners)) require.Equal(t, 0, len(details.Members.Admins)) require.Equal(t, 4, len(details.Members.Writers)) require.Equal(t, 0, len(details.Members.Readers)) annMember := findMember(ann, details.Members.Owners) require.NotNil(t, annMember) require.True(t, annMember.Active) require.False(t, annMember.NeedsPUK) bobMember := findMember(bob, details.Members.Writers) require.NotNil(t, bobMember) require.True(t, bobMember.Active) require.False(t, bobMember.NeedsPUK) pamMember := findMember(pam, details.Members.Writers) require.NotNil(t, pamMember) require.False(t, pamMember.Active) require.False(t, pamMember.NeedsPUK) johnMember := findMember(john, details.Members.Writers) require.NotNil(t, johnMember) require.True(t, johnMember.Active) require.True(t, johnMember.NeedsPUK) edMember := findMember(ed, details.Members.Writers) require.NotNil(t, edMember) require.True(t, edMember.Active) require.True(t, edMember.NeedsPUK) require.Equal(t, 1, len(details.AnnotatedActiveInvites)) for _, invite := range details.AnnotatedActiveInvites { // There should be only one invite require.EqualValues(t, rootername, invite.Name) } // Examine results from TeamList (mostly MemberCount) check := func(list *keybase1.AnnotatedTeamList) { require.Equal(t, 1, len(list.Teams)) require.Equal(t, 0, len(list.AnnotatedActiveInvites)) teamInfo := list.Teams[0] require.Equal(t, team.name, teamInfo.FqName) require.Equal(t, 5, teamInfo.MemberCount) } list, err := teamCli.TeamListVerified(context.TODO(), keybase1.TeamListVerifiedArg{}) require.NoError(t, err) check(&list) list, err = teamCli.TeamListUnverified(context.TODO(), keybase1.TeamListUnverifiedArg{}) require.NoError(t, err) check(&list) } func TestTeamListOpenTeams(t *testing.T) { tt := newTeamTester(t) defer tt.cleanup() ann := tt.addUser("ann") t.Logf("Signed up ann (%s)", ann.username) team1 := ann.createTeam() t.Logf("Team 1 created (%s)", team1) team2 := ann.createTeam() t.Logf("Team 2 created (%s)", team2) ann.teamSetSettings(team2, keybase1.TeamSettings{ Open: true, JoinAs: keybase1.TeamRole_WRITER, }) check := func(list *keybase1.AnnotatedTeamList) { require.Equal(t, 2, len(list.Teams)) require.Equal(t, 0, len(list.AnnotatedActiveInvites)) for _, teamInfo := range list.Teams { if teamInfo.FqName == team1 { require.False(t, teamInfo.IsOpenTeam) } else if teamInfo.FqName == team2 { require.True(t, teamInfo.IsOpenTeam) } else { t.Fatalf("Unexpected team name %v", teamInfo) } require.Equal(t, 1, teamInfo.MemberCount) } } teamCli := ann.teamsClient list, err := teamCli.TeamListVerified(context.Background(), keybase1.TeamListVerifiedArg{}) require.NoError(t, err) check(&list) list, err = teamCli.TeamListUnverified(context.Background(), keybase1.TeamListUnverifiedArg{}) require.NoError(t, err) check(&list) } func TestTeamDuplicateUIDList(t *testing.T) { tt := newTeamTester(t) defer tt.cleanup() ann := tt.addUser("ann") t.Logf("Signed up ann (%s)", ann.username) // We have to disable caching in UIDMapper because after bob // resets and provisions, we have no way to be aware of that, and // we might see cached bob in subsequent teamList calls. ann.tc.G.UIDMapper.SetTestingNoCachingMode(true) bob := tt.addPuklessUser("bob") t.Logf("Signed up PUK-less user bob (%s)", bob.username) team := ann.createTeam() t.Logf("Team created (%s)", team) fmt.Printf("-- TestTeamDuplicateUIDList A\n") ann.addTeamMember(team, bob.username, keybase1.TeamRole_WRITER) bob.reset() bob.loginAfterReset() t.Logf("Bob (%s) resets and reprovisions", bob.username) ann.addTeamMember(team, bob.username, keybase1.TeamRole_WRITER) teamCli := ann.teamsClient details, err := teamCli.TeamGet(context.TODO(), keybase1.TeamGetArg{ Name: team, ForceRepoll: true, }) require.NoError(t, err) fmt.Printf("-- TestTeamDuplicateUIDList B\n") // Expecting just the active writer here, and not inactive // (because of reset) invite. require.Equal(t, 1, len(details.Members.Writers)) member := details.Members.Writers[0] require.True(t, member.Active) require.False(t, member.NeedsPUK) // Check both functions: slow TeamListVerified, and fast (server // trust) TeamList. // TeamList reports memberCount of two: ann and bob. Second bob is // ignored, because memberCount is set to number of unique UIDs. check := func(list *keybase1.AnnotatedTeamList) { require.Equal(t, 1, len(list.Teams)) require.Equal(t, 0, len(list.AnnotatedActiveInvites)) teamInfo := list.Teams[0] require.Equal(t, team, teamInfo.FqName) require.Equal(t, 2, teamInfo.MemberCount) } t.Logf("Calling TeamListVerified") list, err := teamCli.TeamListVerified(context.TODO(), keybase1.TeamListVerifiedArg{}) require.NoError(t, err) check(&list) fmt.Printf("-- TestTeamDuplicateUIDList C\n") t.Logf("Calling TeamList") list, err = teamCli.TeamListUnverified(context.TODO(), keybase1.TeamListUnverifiedArg{}) require.NoError(t, err) check(&list) fmt.Printf("-- TestTeamDuplicateUIDList D\n") } func TestTeamTree(t *testing.T) { tt := newTeamTester(t) defer tt.cleanup() ann := tt.addUser("ann") t.Logf("Signed up ann (%s)", ann.username) team := ann.createTeam() t.Logf("Team created (%s)", team) TeamNameFromString := func(str string) keybase1.TeamName { ret, err := keybase1.TeamNameFromString(str) require.NoError(t, err) return ret } createSubteam := func(parentName, subteamName string) string { subteam, err := teams.CreateSubteam(context.Background(), ann.tc.G, subteamName, TeamNameFromString(parentName), keybase1.TeamRole_NONE /* addSelfAs */) require.NoError(t, err) subteamObj, err := teams.Load(context.Background(), ann.tc.G, keybase1.LoadTeamArg{ID: *subteam}) require.NoError(t, err) return subteamObj.Name().String() } subTeam1 := createSubteam(team, "staff") fmt.Printf("-- TestTeamTree A\n") sub1SubTeam1 := createSubteam(subTeam1, "legal") sub1SubTeam2 := createSubteam(subTeam1, "hr") subTeam2 := createSubteam(team, "offtopic") fmt.Printf("-- TestTeamTree B\n") sub2SubTeam1 := createSubteam(subTeam2, "games") sub2SubTeam2 := createSubteam(subTeam2, "crypto") sub2SubTeam3 := createSubteam(subTeam2, "cryptocurrency") fmt.Printf("-- TestTeamTree C\n") checkTeamTree := func(teamName string, expectedTree ...string) { set := make(map[string]bool) for _, v := range expectedTree { set[v] = false } set[teamName] = false tree, err := teams.TeamTree(context.Background(), ann.tc.G, keybase1.TeamTreeArg{Name: TeamNameFromString(teamName)}) require.NoError(t, err) for _, entry := range tree.Entries { name := entry.Name.String() alreadyFound, exists := set[name] if !exists { t.Fatalf("Found unexpected team %s in tree of %s", name, teamName) } else if alreadyFound { t.Fatalf("Duplicate team %s in tree of %s", name, teamName) } set[name] = true } } checkTeamTree(team, subTeam1, subTeam2, sub1SubTeam1, sub1SubTeam2, sub2SubTeam1, sub2SubTeam2, sub2SubTeam3) checkTeamTree(subTeam1, sub1SubTeam1, sub1SubTeam2) checkTeamTree(subTeam2, sub2SubTeam1, sub2SubTeam2, sub2SubTeam3) fmt.Printf("-- TestTeamTree D\n") checkTeamTree(sub2SubTeam1) checkTeamTree(sub2SubTeam2) checkTeamTree(sub2SubTeam3) fmt.Printf("-- TestTeamTree E\n") } fix fmt package systests import ( "fmt" "testing" keybase1 "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/client/go/teams" "github.com/stretchr/testify/require" "golang.org/x/net/context" ) func findMember(user *smuUser, members []keybase1.TeamMemberDetails) *keybase1.TeamMemberDetails { for _, member := range members { if member.Username == user.username { return &member } } return nil } func TestTeamList(t *testing.T) { ctx := newSMUContext(t) defer ctx.cleanup() // Step 1 - create the initial team with mix of normal members, // reset members, pukless users, social invites etc. ann := ctx.installKeybaseForUser("ann", 10) ann.signup() t.Logf("Signed up ann (%s)", ann.username) bob := ctx.installKeybaseForUser("bob", 10) bob.signup() t.Logf("Signed up bob (%s)", bob.username) pam := ctx.installKeybaseForUser("pam", 10) pam.signup() t.Logf("Signed up pam (%s)", pam.username) john := ctx.installKeybaseForUser("john", 10) john.signupNoPUK() t.Logf("Signed up PUK-less user john (%s)", john.username) ed := ctx.installKeybaseForUser("ed", 10) ed.signup() ed.reset() ed.loginAfterResetNoPUK(10) t.Logf("Signed up ed (%s), reset, and reprovisioned without PUK", ed.username) team := ann.createTeam([]*smuUser{bob, pam}) t.Logf("Team created (%s)", team.name) pam.reset() t.Logf("Pam resets (%s)", pam.username) ann.addWriter(team, john) t.Logf("Adding john (%s)", john.username) ann.addWriter(team, ed) t.Logf("Adding ed (%s)", ed.username) teamCli := ann.getTeamsClient() rootername := randomUser("arbitrary").username _, err := teamCli.TeamAddMember(context.TODO(), keybase1.TeamAddMemberArg{ Name: team.name, Username: rootername + "@rooter", Role: keybase1.TeamRole_WRITER, }) require.NoError(t, err) t.Logf("Added rooter (%s@rooter)", rootername) // Examine results from TeamGet details, err := teamCli.TeamGet(context.TODO(), keybase1.TeamGetArg{ Name: team.name, ForceRepoll: true, }) require.NoError(t, err) require.Equal(t, 1, len(details.Members.Owners)) require.Equal(t, 0, len(details.Members.Admins)) require.Equal(t, 4, len(details.Members.Writers)) require.Equal(t, 0, len(details.Members.Readers)) annMember := findMember(ann, details.Members.Owners) require.NotNil(t, annMember) require.True(t, annMember.Active) require.False(t, annMember.NeedsPUK) bobMember := findMember(bob, details.Members.Writers) require.NotNil(t, bobMember) require.True(t, bobMember.Active) require.False(t, bobMember.NeedsPUK) pamMember := findMember(pam, details.Members.Writers) require.NotNil(t, pamMember) require.False(t, pamMember.Active) require.False(t, pamMember.NeedsPUK) johnMember := findMember(john, details.Members.Writers) require.NotNil(t, johnMember) require.True(t, johnMember.Active) require.True(t, johnMember.NeedsPUK) edMember := findMember(ed, details.Members.Writers) require.NotNil(t, edMember) require.True(t, edMember.Active) require.True(t, edMember.NeedsPUK) require.Equal(t, 1, len(details.AnnotatedActiveInvites)) for _, invite := range details.AnnotatedActiveInvites { // There should be only one invite require.EqualValues(t, rootername, invite.Name) } // Examine results from TeamList (mostly MemberCount) check := func(list *keybase1.AnnotatedTeamList) { require.Equal(t, 1, len(list.Teams)) require.Equal(t, 0, len(list.AnnotatedActiveInvites)) teamInfo := list.Teams[0] require.Equal(t, team.name, teamInfo.FqName) require.Equal(t, 5, teamInfo.MemberCount) } list, err := teamCli.TeamListVerified(context.TODO(), keybase1.TeamListVerifiedArg{}) require.NoError(t, err) check(&list) list, err = teamCli.TeamListUnverified(context.TODO(), keybase1.TeamListUnverifiedArg{}) require.NoError(t, err) check(&list) } func TestTeamListOpenTeams(t *testing.T) { tt := newTeamTester(t) defer tt.cleanup() ann := tt.addUser("ann") t.Logf("Signed up ann (%s)", ann.username) team1 := ann.createTeam() t.Logf("Team 1 created (%s)", team1) team2 := ann.createTeam() t.Logf("Team 2 created (%s)", team2) ann.teamSetSettings(team2, keybase1.TeamSettings{ Open: true, JoinAs: keybase1.TeamRole_WRITER, }) check := func(list *keybase1.AnnotatedTeamList) { require.Equal(t, 2, len(list.Teams)) require.Equal(t, 0, len(list.AnnotatedActiveInvites)) for _, teamInfo := range list.Teams { if teamInfo.FqName == team1 { require.False(t, teamInfo.IsOpenTeam) } else if teamInfo.FqName == team2 { require.True(t, teamInfo.IsOpenTeam) } else { t.Fatalf("Unexpected team name %v", teamInfo) } require.Equal(t, 1, teamInfo.MemberCount) } } teamCli := ann.teamsClient list, err := teamCli.TeamListVerified(context.Background(), keybase1.TeamListVerifiedArg{}) require.NoError(t, err) check(&list) list, err = teamCli.TeamListUnverified(context.Background(), keybase1.TeamListUnverifiedArg{}) require.NoError(t, err) check(&list) } func TestTeamDuplicateUIDList(t *testing.T) { tt := newTeamTester(t) defer tt.cleanup() ann := tt.addUser("ann") t.Logf("Signed up ann (%s)", ann.username) // We have to disable caching in UIDMapper because after bob // resets and provisions, we have no way to be aware of that, and // we might see cached bob in subsequent teamList calls. ann.tc.G.UIDMapper.SetTestingNoCachingMode(true) bob := tt.addPuklessUser("bob") t.Logf("Signed up PUK-less user bob (%s)", bob.username) team := ann.createTeam() t.Logf("Team created (%s)", team) fmt.Printf("-- TestTeamDuplicateUIDList A\n") ann.addTeamMember(team, bob.username, keybase1.TeamRole_WRITER) bob.reset() bob.loginAfterReset() t.Logf("Bob (%s) resets and reprovisions", bob.username) ann.addTeamMember(team, bob.username, keybase1.TeamRole_WRITER) teamCli := ann.teamsClient details, err := teamCli.TeamGet(context.TODO(), keybase1.TeamGetArg{ Name: team, ForceRepoll: true, }) require.NoError(t, err) fmt.Printf("-- TestTeamDuplicateUIDList B\n") // Expecting just the active writer here, and not inactive // (because of reset) invite. require.Equal(t, 1, len(details.Members.Writers)) member := details.Members.Writers[0] require.True(t, member.Active) require.False(t, member.NeedsPUK) // Check both functions: slow TeamListVerified, and fast (server // trust) TeamList. // TeamList reports memberCount of two: ann and bob. Second bob is // ignored, because memberCount is set to number of unique UIDs. check := func(list *keybase1.AnnotatedTeamList) { require.Equal(t, 1, len(list.Teams)) require.Equal(t, 0, len(list.AnnotatedActiveInvites)) teamInfo := list.Teams[0] require.Equal(t, team, teamInfo.FqName) require.Equal(t, 2, teamInfo.MemberCount) } t.Logf("Calling TeamListVerified") list, err := teamCli.TeamListVerified(context.TODO(), keybase1.TeamListVerifiedArg{}) require.NoError(t, err) check(&list) fmt.Printf("-- TestTeamDuplicateUIDList C\n") t.Logf("Calling TeamList") list, err = teamCli.TeamListUnverified(context.TODO(), keybase1.TeamListUnverifiedArg{}) require.NoError(t, err) check(&list) fmt.Printf("-- TestTeamDuplicateUIDList D\n") } func TestTeamTree(t *testing.T) { tt := newTeamTester(t) defer tt.cleanup() ann := tt.addUser("ann") t.Logf("Signed up ann (%s)", ann.username) team := ann.createTeam() t.Logf("Team created (%s)", team) TeamNameFromString := func(str string) keybase1.TeamName { ret, err := keybase1.TeamNameFromString(str) require.NoError(t, err) return ret } createSubteam := func(parentName, subteamName string) string { subteam, err := teams.CreateSubteam(context.Background(), ann.tc.G, subteamName, TeamNameFromString(parentName), keybase1.TeamRole_NONE /* addSelfAs */) require.NoError(t, err) subteamObj, err := teams.Load(context.Background(), ann.tc.G, keybase1.LoadTeamArg{ID: *subteam}) require.NoError(t, err) return subteamObj.Name().String() } subTeam1 := createSubteam(team, "staff") fmt.Printf("-- TestTeamTree A\n") sub1SubTeam1 := createSubteam(subTeam1, "legal") sub1SubTeam2 := createSubteam(subTeam1, "hr") subTeam2 := createSubteam(team, "offtopic") fmt.Printf("-- TestTeamTree B\n") sub2SubTeam1 := createSubteam(subTeam2, "games") sub2SubTeam2 := createSubteam(subTeam2, "crypto") sub2SubTeam3 := createSubteam(subTeam2, "cryptocurrency") fmt.Printf("-- TestTeamTree C\n") checkTeamTree := func(teamName string, expectedTree ...string) { set := make(map[string]bool) for _, v := range expectedTree { set[v] = false } set[teamName] = false tree, err := teams.TeamTree(context.Background(), ann.tc.G, keybase1.TeamTreeArg{Name: TeamNameFromString(teamName)}) require.NoError(t, err) for _, entry := range tree.Entries { name := entry.Name.String() alreadyFound, exists := set[name] if !exists { t.Fatalf("Found unexpected team %s in tree of %s", name, teamName) } else if alreadyFound { t.Fatalf("Duplicate team %s in tree of %s", name, teamName) } set[name] = true } } checkTeamTree(team, subTeam1, subTeam2, sub1SubTeam1, sub1SubTeam2, sub2SubTeam1, sub2SubTeam2, sub2SubTeam3) checkTeamTree(subTeam1, sub1SubTeam1, sub1SubTeam2) checkTeamTree(subTeam2, sub2SubTeam1, sub2SubTeam2, sub2SubTeam3) fmt.Printf("-- TestTeamTree D\n") checkTeamTree(sub2SubTeam1) checkTeamTree(sub2SubTeam2) checkTeamTree(sub2SubTeam3) fmt.Printf("-- TestTeamTree E\n") }
/* Copyright 2019 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package sqlparser import ( "bufio" "bytes" "fmt" "math/rand" "os" "strings" "sync" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( validSQL = []struct { input string output string partialDDL bool }{{ input: "select 1", output: "select 1 from dual", }, { input: "select 1 from t", }, { input: "select * from information_schema.columns", output: "select * from information_schema.`columns`", }, { input: "select * from information_schema.processlist", output: "select * from information_schema.`processlist`", }, { input: "select .1 from t", }, { input: "select 1.2e1 from t", }, { input: "select 1.2e+1 from t", }, { input: "select 1.2e-1 from t", }, { input: "select 08.3 from t", }, { input: "select -1 from t where b = -2", }, { input: "select - -1 from t", output: "select 1 from t", }, { input: "select a from t", }, { input: "select $ from t", }, { input: "select a.b as a$b from $test$", }, { input: "select 1 from t // aa\n", output: "select 1 from t", }, { input: "select 1 from t -- aa\n", output: "select 1 from t", }, { input: "select 1 from t # aa\n", output: "select 1 from t", }, { input: "select 1 --aa\nfrom t", output: "select 1 from t", }, { input: "select 1 #aa\nfrom t", output: "select 1 from t", }, { input: "select /* simplest */ 1 from t", }, { input: "select /* double star **/ 1 from t", }, { input: "select /* double */ /* comment */ 1 from t", }, { input: "select /* back-quote keyword */ `By` from t", }, { input: "select /* back-quote num */ `2a` from t", }, { input: "select /* back-quote . */ `a.b` from t", }, { input: "select /* back-quote back-quote */ `a``b` from t", }, { input: "select /* back-quote unnecessary */ 1 from `t`", output: "select /* back-quote unnecessary */ 1 from t", }, { input: "select /* back-quote idnum */ 1 from `a1`", output: "select /* back-quote idnum */ 1 from a1", }, { input: "select /* @ */ @@a from b", }, { input: "select /* \\0 */ '\\0' from a", }, { input: "select 1 /* drop this comment */ from t", output: "select 1 from t", }, { input: "select /* union */ 1 from t union select 1 from t", }, { input: "select /* double union */ 1 from t union select 1 from t union select 1 from t", }, { input: "select /* union all */ 1 from t union all select 1 from t", }, { input: "select /* union distinct */ 1 from t union distinct select 1 from t", output: "select /* union distinct */ 1 from t union select 1 from t", }, { input: "(select /* union parenthesized select */ 1 from t order by a) union select 1 from t", output: "(select /* union parenthesized select */ 1 from t order by a asc) union select 1 from t", }, { input: "select /* union parenthesized select 2 */ 1 from t union (select 1 from t)", }, { input: "select /* union order by */ 1 from t union select 1 from t order by a", output: "select /* union order by */ 1 from t union select 1 from t order by a asc", }, { input: "select /* union order by limit lock */ 1 from t union select 1 from t order by a limit 1 for update", output: "select /* union order by limit lock */ 1 from t union select 1 from t order by a asc limit 1 for update", }, { input: "select /* union with limit on lhs */ 1 from t limit 1 union select 1 from t", }, { input: "(select id, a from t order by id limit 1) union (select id, b as a from s order by id limit 1) order by a limit 1", output: "(select id, a from t order by id asc limit 1) union (select id, b as a from s order by id asc limit 1) order by a asc limit 1", }, { input: "select a from (select 1 as a from tbl1 union select 2 from tbl2) as t", }, { input: "select * from t1 join (select * from t2 union select * from t3) as t", }, { // Ensure this doesn't generate: ""select * from t1 join t2 on a = b join t3 on a = b". input: "select * from t1 join t2 on a = b join t3", }, { input: "select * from t1 where col in (select 1 from dual union select 2 from dual)", }, { input: "select * from t1 where exists (select a from t2 union select b from t3)", }, { input: "select 1 from dual union select 2 from dual union all select 3 from dual union select 4 from dual union all select 5 from dual", }, { input: "(select 1 from dual) order by 1 asc limit 2", }, { input: "(select 1 from dual order by 1 desc) order by 1 asc limit 2", }, { input: "(select 1 from dual)", }, { input: "((select 1 from dual))", }, { input: "select 1 from (select 1 from dual) as t", }, { input: "select 1 from (select 1 from dual union select 2 from dual) as t", }, { input: "select 1 from ((select 1 from dual) union select 2 from dual) as t", }, { input: "select /* distinct */ distinct 1 from t", }, { input: "select /* straight_join */ straight_join 1 from t", }, { input: "select /* for update */ 1 from t for update", }, { input: "select /* lock in share mode */ 1 from t lock in share mode", }, { input: "select /* select list */ 1, 2 from t", }, { input: "select /* * */ * from t", }, { input: "select /* a.* */ a.* from t", }, { input: "select /* a.b.* */ a.b.* from t", }, { input: "select /* column alias */ a b from t", output: "select /* column alias */ a as b from t", }, { input: "select /* column alias with as */ a as b from t", }, { input: "select /* keyword column alias */ a as `By` from t", }, { input: "select /* column alias as string */ a as \"b\" from t", output: "select /* column alias as string */ a as b from t", }, { input: "select /* column alias as string without as */ a \"b\" from t", output: "select /* column alias as string without as */ a as b from t", }, { input: "select /* a.* */ a.* from t", }, { input: "select next value for t", output: "select next 1 values from t", }, { input: "select next value from t", output: "select next 1 values from t", }, { input: "select next 10 values from t", }, { input: "select next :a values from t", }, { input: "select /* `By`.* */ `By`.* from t", }, { input: "select /* select with bool expr */ a = b from t", }, { input: "select /* case_when */ case when a = b then c end from t", }, { input: "select /* case_when_else */ case when a = b then c else d end from t", }, { input: "select /* case_when_when_else */ case when a = b then c when b = d then d else d end from t", }, { input: "select /* case */ case aa when a = b then c end from t", }, { input: "select /* parenthesis */ 1 from (t)", }, { input: "select /* parenthesis multi-table */ 1 from (t1, t2)", }, { input: "select /* table list */ 1 from t1, t2", }, { input: "select /* parenthessis in table list 1 */ 1 from (t1), t2", }, { input: "select /* parenthessis in table list 2 */ 1 from t1, (t2)", }, { input: "select /* use */ 1 from t1 use index (a) where b = 1", }, { input: "select /* use */ 1 from t1 use index () where b = 1", }, { input: "select /* keyword index */ 1 from t1 use index (`By`) where b = 1", }, { input: "select /* ignore */ 1 from t1 as t2 ignore index (a), t3 use index (b) where b = 1", }, { input: "select /* use */ 1 from t1 as t2 use index (a), t3 use index (b) where b = 1", }, { input: "select /* force */ 1 from t1 as t2 force index (a), t3 force index (b) where b = 1", }, { input: "select /* table alias */ 1 from t t1", output: "select /* table alias */ 1 from t as t1", }, { input: "select /* table alias with as */ 1 from t as t1", }, { input: "select /* string table alias */ 1 from t as 't1'", output: "select /* string table alias */ 1 from t as t1", }, { input: "select /* string table alias without as */ 1 from t 't1'", output: "select /* string table alias without as */ 1 from t as t1", }, { input: "select /* keyword table alias */ 1 from t as `By`", }, { input: "select /* join */ 1 from t1 join t2", }, { input: "select /* join on */ 1 from t1 join t2 on a = b", }, { input: "select /* join on */ 1 from t1 join t2 using (a)", }, { input: "select /* inner join */ 1 from t1 inner join t2", output: "select /* inner join */ 1 from t1 join t2", }, { input: "select /* cross join */ 1 from t1 cross join t2", output: "select /* cross join */ 1 from t1 join t2", }, { input: "select /* straight_join */ 1 from t1 straight_join t2", }, { input: "select /* straight_join on */ 1 from t1 straight_join t2 on a = b", }, { input: "select /* left join */ 1 from t1 left join t2 on a = b", }, { input: "select /* left join */ 1 from t1 left join t2 using (a)", }, { input: "select /* left outer join */ 1 from t1 left outer join t2 on a = b", output: "select /* left outer join */ 1 from t1 left join t2 on a = b", }, { input: "select /* left outer join */ 1 from t1 left outer join t2 using (a)", output: "select /* left outer join */ 1 from t1 left join t2 using (a)", }, { input: "select /* right join */ 1 from t1 right join t2 on a = b", }, { input: "select /* right join */ 1 from t1 right join t2 using (a)", }, { input: "select /* right outer join */ 1 from t1 right outer join t2 on a = b", output: "select /* right outer join */ 1 from t1 right join t2 on a = b", }, { input: "select /* right outer join */ 1 from t1 right outer join t2 using (a)", output: "select /* right outer join */ 1 from t1 right join t2 using (a)", }, { input: "select /* natural join */ 1 from t1 natural join t2", }, { input: "select /* natural left join */ 1 from t1 natural left join t2", }, { input: "select /* natural left outer join */ 1 from t1 natural left join t2", output: "select /* natural left outer join */ 1 from t1 natural left join t2", }, { input: "select /* natural right join */ 1 from t1 natural right join t2", }, { input: "select /* natural right outer join */ 1 from t1 natural right join t2", output: "select /* natural right outer join */ 1 from t1 natural right join t2", }, { input: "select /* join on */ 1 from t1 join t2 on a = b", }, { input: "select /* join using */ 1 from t1 join t2 using (a)", }, { input: "select /* join using (a, b, c) */ 1 from t1 join t2 using (a, b, c)", }, { input: "select /* s.t */ 1 from s.t", }, { input: "select /* keyword schema & table name */ 1 from `By`.`bY`", }, { input: "select /* select in from */ 1 from (select 1 from t) as a", }, { input: "select /* select in from with no as */ 1 from (select 1 from t) a", output: "select /* select in from with no as */ 1 from (select 1 from t) as a", }, { input: "select /* where */ 1 from t where a = b", }, { input: "select /* and */ 1 from t where a = b and a = c", }, { input: "select /* && */ 1 from t where a = b && a = c", output: "select /* && */ 1 from t where a = b and a = c", }, { input: "select /* or */ 1 from t where a = b or a = c", }, { input: "select /* || */ 1 from t where a = b || a = c", output: "select /* || */ 1 from t where a = b or a = c", }, { input: "select /* not */ 1 from t where not a = b", }, { input: "select /* ! */ 1 from t where a = !1", }, { input: "select /* bool is */ 1 from t where a = b is null", }, { input: "select /* bool is not */ 1 from t where a = b is not false", }, { input: "select /* true */ 1 from t where true", }, { input: "select /* false */ 1 from t where false", }, { input: "select /* false on left */ 1 from t where false = 0", }, { input: "select /* exists */ 1 from t where exists (select 1 from t)", }, { input: "select /* (boolean) */ 1 from t where not (a = b)", output: "select /* (boolean) */ 1 from t where not a = b", }, { input: "select /* in value list */ 1 from t where a in (b, c)", }, { input: "select /* in select */ 1 from t where a in (select 1 from t)", }, { input: "select /* not in */ 1 from t where a not in (b, c)", }, { input: "select /* like */ 1 from t where a like b", }, { input: "select /* like escape */ 1 from t where a like b escape '!'", }, { input: "select /* not like */ 1 from t where a not like b", }, { input: "select /* not like escape */ 1 from t where a not like b escape '$'", }, { input: "select /* regexp */ 1 from t where a regexp b", }, { input: "select /* not regexp */ 1 from t where a not regexp b", }, { input: "select /* rlike */ 1 from t where a rlike b", output: "select /* rlike */ 1 from t where a regexp b", }, { input: "select /* not rlike */ 1 from t where a not rlike b", output: "select /* not rlike */ 1 from t where a not regexp b", }, { input: "select /* between */ 1 from t where a between b and c", }, { input: "select /* not between */ 1 from t where a not between b and c", }, { input: "select /* is null */ 1 from t where a is null", }, { input: "select /* is not null */ 1 from t where a is not null", }, { input: "select /* is true */ 1 from t where a is true", }, { input: "select /* is not true */ 1 from t where a is not true", }, { input: "select /* is false */ 1 from t where a is false", }, { input: "select /* is not false */ 1 from t where a is not false", }, { input: "select /* < */ 1 from t where a < b", }, { input: "select /* <= */ 1 from t where a <= b", }, { input: "select /* >= */ 1 from t where a >= b", }, { input: "select /* > */ 1 from t where a > b", }, { input: "select /* != */ 1 from t where a != b", }, { input: "select /* <> */ 1 from t where a <> b", output: "select /* <> */ 1 from t where a != b", }, { input: "select /* <=> */ 1 from t where a <=> b", }, { input: "select /* != */ 1 from t where a != b", }, { input: "select /* single value expre list */ 1 from t where a in (b)", }, { input: "select /* select as a value expression */ 1 from t where a = (select a from t)", }, { input: "select /* parenthesised value */ 1 from t where a = (b)", output: "select /* parenthesised value */ 1 from t where a = b", }, { input: "select /* over-parenthesize */ ((1)) from t where ((a)) in (((1))) and ((a, b)) in ((((1, 1))), ((2, 2)))", output: "select /* over-parenthesize */ 1 from t where a in (1) and (a, b) in ((1, 1), (2, 2))", }, { input: "select /* dot-parenthesize */ (a.b) from t where (b.c) = 2", output: "select /* dot-parenthesize */ a.b from t where b.c = 2", }, { input: "select /* & */ 1 from t where a = b & c", }, { input: "select /* & */ 1 from t where a = b & c", }, { input: "select /* | */ 1 from t where a = b | c", }, { input: "select /* ^ */ 1 from t where a = b ^ c", }, { input: "select /* + */ 1 from t where a = b + c", }, { input: "select /* - */ 1 from t where a = b - c", }, { input: "select /* * */ 1 from t where a = b * c", }, { input: "select /* / */ 1 from t where a = b / c", }, { input: "select /* % */ 1 from t where a = b % c", }, { input: "select /* div */ 1 from t where a = b div c", }, { input: "select /* MOD */ 1 from t where a = b MOD c", output: "select /* MOD */ 1 from t where a = b % c", }, { input: "select /* << */ 1 from t where a = b << c", }, { input: "select /* >> */ 1 from t where a = b >> c", }, { input: "select /* % no space */ 1 from t where a = b%c", output: "select /* % no space */ 1 from t where a = b % c", }, { input: "select /* u+ */ 1 from t where a = +b", }, { input: "select /* u- */ 1 from t where a = -b", }, { input: "select /* u~ */ 1 from t where a = ~b", }, { input: "select /* -> */ a.b -> 'ab' from t", }, { input: "select /* -> */ a.b ->> 'ab' from t", }, { input: "select /* empty function */ 1 from t where a = b()", }, { input: "select /* function with 1 param */ 1 from t where a = b(c)", }, { input: "select /* function with many params */ 1 from t where a = b(c, d)", }, { input: "select /* function with distinct */ count(distinct a) from t", }, { input: "select count(distinctrow(1)) from (select (1) from dual union all select 1 from dual) a", output: "select count(distinct 1) from (select 1 from dual union all select 1 from dual) as a", }, { input: "select /* if as func */ 1 from t where a = if(b)", }, { input: "select /* current_timestamp */ current_timestamp() from t", }, { input: "select /* current_timestamp as func */ current_timestamp() from t", }, { input: "select /* current_timestamp with fsp */ current_timestamp(3) from t", }, { input: "select /* current_date */ current_date() from t", }, { input: "select /* current_date as func */ current_date() from t", }, { input: "select /* current_time */ current_time() from t", }, { input: "select /* current_time as func */ current_time() from t", }, { input: "select /* current_time with fsp */ current_time(1) from t", }, { input: "select /* utc_timestamp */ utc_timestamp() from t", }, { input: "select /* utc_timestamp as func */ utc_timestamp() from t", }, { input: "select /* utc_timestamp with fsp */ utc_timestamp(0) from t", }, { input: "select /* utc_time */ utc_time() from t", }, { input: "select /* utc_time as func */ utc_time() from t", }, { input: "select /* utc_time with fsp */ utc_time(4) from t", }, { input: "select /* utc_date */ utc_date() from t", }, { input: "select /* utc_date as func */ utc_date() from t", }, { input: "select /* localtime */ localtime() from t", }, { input: "select /* localtime as func */ localtime() from t", }, { input: "select /* localtime with fsp */ localtime(5) from t", }, { input: "select /* localtimestamp */ localtimestamp() from t", }, { input: "select /* localtimestamp as func */ localtimestamp() from t", }, { input: "select /* localtimestamp with fsp */ localtimestamp(7) from t", }, { input: "select /* mod as func */ a from tab where mod(b, 2) = 0", }, { input: "select /* database as func no param */ database() from t", }, { input: "select /* database as func 1 param */ database(1) from t", }, { input: "select /* a */ a from t", }, { input: "select /* a.b */ a.b from t", }, { input: "select /* a.b.c */ a.b.c from t", }, { input: "select /* keyword a.b */ `By`.`bY` from t", }, { input: "select /* string */ 'a' from t", }, { input: "select /* double quoted string */ \"a\" from t", output: "select /* double quoted string */ 'a' from t", }, { input: "select /* quote quote in string */ 'a''a' from t", output: "select /* quote quote in string */ 'a\\'a' from t", }, { input: "select /* double quote quote in string */ \"a\"\"a\" from t", output: "select /* double quote quote in string */ 'a\\\"a' from t", }, { input: "select /* quote in double quoted string */ \"a'a\" from t", output: "select /* quote in double quoted string */ 'a\\'a' from t", }, { input: "select /* backslash quote in string */ 'a\\'a' from t", }, { input: "select /* literal backslash in string */ 'a\\\\na' from t", }, { input: "select /* all escapes */ '\\0\\'\\\"\\b\\n\\r\\t\\Z\\\\' from t", }, { input: "select /* non-escape */ '\\x' from t", output: "select /* non-escape */ 'x' from t", }, { input: "select /* unescaped backslash */ '\\n' from t", }, { input: "select /* value argument */ :a from t", }, { input: "select /* value argument with digit */ :a1 from t", }, { input: "select /* value argument with dot */ :a.b from t", }, { input: "select /* positional argument */ ? from t", output: "select /* positional argument */ :v1 from t", }, { input: "select /* multiple positional arguments */ ?, ? from t", output: "select /* multiple positional arguments */ :v1, :v2 from t", }, { input: "select /* list arg */ * from t where a in ::list", }, { input: "select /* list arg not in */ * from t where a not in ::list", }, { input: "select /* null */ null from t", }, { input: "select /* octal */ 010 from t", }, { input: "select /* hex */ x'f0A1' from t", output: "select /* hex */ X'f0A1' from t", }, { input: "select /* hex caps */ X'F0a1' from t", }, { input: "select /* bit literal */ b'0101' from t", output: "select /* bit literal */ B'0101' from t", }, { input: "select /* bit literal caps */ B'010011011010' from t", }, { input: "select /* 0x */ 0xf0 from t", }, { input: "select /* float */ 0.1 from t", }, { input: "select /* group by */ 1 from t group by a", }, { input: "select /* having */ 1 from t having a = b", }, { input: "select /* simple order by */ 1 from t order by a", output: "select /* simple order by */ 1 from t order by a asc", }, { input: "select /* order by asc */ 1 from t order by a asc", }, { input: "select /* order by desc */ 1 from t order by a desc", }, { input: "select /* order by null */ 1 from t order by null", }, { input: "select /* limit a */ 1 from t limit a", }, { input: "select /* limit a,b */ 1 from t limit a, b", }, { input: "select /* binary unary */ a- -b from t", output: "select /* binary unary */ a - -b from t", }, { input: "select /* - - */ - -b from t", }, { input: "select /* binary binary */ binary binary b from t", }, { input: "select /* binary ~ */ binary ~b from t", }, { input: "select /* ~ binary */ ~ binary b from t", }, { input: "select /* interval */ adddate('2008-01-02', interval 31 day) from t", }, { input: "select /* interval keyword */ adddate('2008-01-02', interval 1 year) from t", }, { input: "select /* TIMESTAMPADD */ TIMESTAMPADD(MINUTE, 1, '2008-01-04') from t", output: "select /* TIMESTAMPADD */ timestampadd(MINUTE, 1, '2008-01-04') from t", }, { input: "select /* TIMESTAMPDIFF */ TIMESTAMPDIFF(MINUTE, '2008-01-02', '2008-01-04') from t", output: "select /* TIMESTAMPDIFF */ timestampdiff(MINUTE, '2008-01-02', '2008-01-04') from t", }, { input: "select /* dual */ 1 from dual", }, { input: "select /* Dual */ 1 from Dual", output: "select /* Dual */ 1 from dual", }, { input: "select /* DUAL */ 1 from Dual", output: "select /* DUAL */ 1 from dual", }, { input: "select /* column as bool in where */ a from t where b", }, { input: "select /* OR of columns in where */ * from t where a or b", }, { input: "select /* OR of mixed columns in where */ * from t where a = 5 or b and c is not null", }, { input: "select /* OR in select columns */ (a or b) from t where c = 5", output: "select /* OR in select columns */ a or b from t where c = 5", }, { input: "select /* XOR of columns in where */ * from t where a xor b", }, { input: "select /* XOR of mixed columns in where */ * from t where a = 5 xor b and c is not null", }, { input: "select /* XOR in select columns */ (a xor b) from t where c = 5", output: "select /* XOR in select columns */ a xor b from t where c = 5", }, { input: "select /* XOR in select columns */ * from t where (1 xor c1 > 0)", output: "select /* XOR in select columns */ * from t where 1 xor c1 > 0", }, { input: "select /* bool as select value */ a, true from t", }, { input: "select /* bool column in ON clause */ * from t join s on t.id = s.id and s.foo where t.bar", }, { input: "select /* bool in order by */ * from t order by a is null or b asc", }, { input: "select /* string in case statement */ if(max(case a when 'foo' then 1 else 0 end) = 1, 'foo', 'bar') as foobar from t", }, { input: "/*!show databases*/", output: "show databases", }, { input: "select /*!40101 * from*/ t", output: "select * from t", }, { input: "select /*! * from*/ t", output: "select * from t", }, { input: "select /*!* from*/ t", output: "select * from t", }, { input: "select /*!401011 from*/ t", output: "select 1 from t", }, { input: "select /* dual */ 1 from dual", }, { input: "select * from (select 'tables') tables", output: "select * from (select 'tables' from dual) as `tables`", }, { input: "insert /* simple */ into a values (1)", }, { input: "insert /* a.b */ into a.b values (1)", }, { input: "insert /* multi-value */ into a values (1, 2)", }, { input: "insert /* multi-value list */ into a values (1, 2), (3, 4)", }, { input: "insert /* no values */ into a values ()", }, { input: "insert /* set */ into a set a = 1, b = 2", output: "insert /* set */ into a(a, b) values (1, 2)", }, { input: "insert /* set default */ into a set a = default, b = 2", output: "insert /* set default */ into a(a, b) values (default, 2)", }, { input: "insert /* value expression list */ into a values (a + 1, 2 * 3)", }, { input: "insert /* default */ into a values (default, 2 * 3)", }, { input: "insert /* column list */ into a(a, b) values (1, 2)", }, { input: "insert into a(a, b) values (1, ifnull(null, default(b)))", }, { input: "insert /* qualified column list */ into a(a, b) values (1, 2)", }, { input: "insert /* qualified columns */ into t (t.a, t.b) values (1, 2)", output: "insert /* qualified columns */ into t(a, b) values (1, 2)", }, { input: "insert /* select */ into a select b, c from d", }, { input: "insert /* it accepts columns with keyword action */ into a(action, b) values (1, 2)", output: "insert /* it accepts columns with keyword action */ into a(`action`, b) values (1, 2)", }, { input: "insert /* no cols & paren select */ into a (select * from t)", }, { input: "insert /* cols & paren select */ into a(a, b, c) (select * from t)", }, { input: "insert /* cols & union with paren select */ into a(b, c) (select d, e from f) union (select g from h)", }, { input: "insert /* on duplicate */ into a values (1, 2) on duplicate key update b = func(a), c = d", }, { input: "insert /* bool in insert value */ into a values (1, true, false)", }, { input: "insert /* bool in on duplicate */ into a values (1, 2) on duplicate key update b = false, c = d", }, { input: "insert /* bool in on duplicate */ into a values (1, 2, 3) on duplicate key update b = values(b), c = d", }, { input: "insert /* bool in on duplicate */ into a values (1, 2, 3) on duplicate key update b = values(a.b), c = d", }, { input: "insert /* bool expression on duplicate */ into a values (1, 2) on duplicate key update b = func(a), c = a > d", }, { input: "insert into user(username, `status`) values ('Chuck', default(`status`))", }, { input: "insert into user(format, tree, vitess) values ('Chuck', 42, 'Barry')", output: "insert into user(`format`, `tree`, `vitess`) values ('Chuck', 42, 'Barry')", }, { input: "insert into customer () values ()", output: "insert into customer values ()", }, { input: "update /* simple */ a set b = 3", }, { input: "update /* a.b */ a.b set b = 3", }, { input: "update /* list */ a set b = 3, c = 4", }, { input: "update /* expression */ a set b = 3 + 4", }, { input: "update /* where */ a set b = 3 where a = b", }, { input: "update /* order */ a set b = 3 order by c desc", }, { input: "update /* limit */ a set b = 3 limit c", }, { input: "update /* bool in update */ a set b = true", }, { input: "update /* bool expr in update */ a set b = 5 > 2", }, { input: "update /* bool in update where */ a set b = 5 where c", }, { input: "update /* table qualifier */ a set a.b = 3", }, { input: "update /* table qualifier */ a set t.a.b = 3", }, { input: "update /* table alias */ tt aa set aa.cc = 3", output: "update /* table alias */ tt as aa set aa.cc = 3", }, { input: "update (select id from foo) subqalias set id = 4", output: "update (select id from foo) as subqalias set id = 4", }, { input: "update foo f, bar b set f.id = b.id where b.name = 'test'", output: "update foo as f, bar as b set f.id = b.id where b.`name` = 'test'", }, { input: "update foo f join bar b on f.name = b.name set f.id = b.id where b.name = 'test'", output: "update foo as f join bar as b on f.`name` = b.`name` set f.id = b.id where b.`name` = 'test'", }, { input: "update /* ignore */ ignore a set b = 3", }, { input: "delete /* simple */ from a", }, { input: "delete /* a.b */ from a.b", }, { input: "delete /* where */ from a where a = b", }, { input: "delete /* order */ from a order by b desc", }, { input: "delete /* limit */ from a limit b", }, { input: "delete a from a join b on a.id = b.id where b.name = 'test'", output: "delete a from a join b on a.id = b.id where b.`name` = 'test'", }, { input: "delete a, b from a, b where a.id = b.id and b.name = 'test'", output: "delete a, b from a, b where a.id = b.id and b.`name` = 'test'", }, { input: "delete /* simple */ ignore from a", }, { input: "delete ignore from a", }, { input: "delete /* limit */ ignore from a", }, { input: "delete from a1, a2 using t1 as a1 inner join t2 as a2 where a1.id=a2.id", output: "delete a1, a2 from t1 as a1 join t2 as a2 where a1.id = a2.id", }, { input: "set /* simple */ a = 3", }, { input: "set #simple\n b = 4", }, { input: "set character_set_results = utf8", }, { input: "set @@session.autocommit = true", }, { input: "set @@session.`autocommit` = true", }, { input: "set @@session.'autocommit' = true", }, { input: "set @@session.\"autocommit\" = true", }, { input: "set @@session.autocommit = ON", output: "set @@session.autocommit = 'on'", }, { input: "set @@session.autocommit= OFF", output: "set @@session.autocommit = 'off'", }, { input: "set autocommit = on", output: "set autocommit = 'on'", }, { input: "set autocommit = off", output: "set autocommit = 'off'", }, { input: "set names utf8 collate foo", output: "set names 'utf8'", }, { input: "set names utf8 collate 'foo'", output: "set names 'utf8'", }, { input: "set character set utf8", output: "set charset 'utf8'", }, { input: "set character set 'utf8'", output: "set charset 'utf8'", }, { input: "set character set \"utf8\"", output: "set charset 'utf8'", }, { input: "set charset default", output: "set charset default", }, { input: "set session wait_timeout = 3600", output: "set session wait_timeout = 3600", }, { input: "set session wait_timeout = 3600, session autocommit = off", output: "set session wait_timeout = 3600, session autocommit = 'off'", }, { input: "set session wait_timeout = 3600, @@global.autocommit = off", output: "set session wait_timeout = 3600, @@global.autocommit = 'off'", }, { input: "set /* list */ a = 3, b = 4", }, { input: "set /* mixed list */ a = 3, names 'utf8', charset 'ascii', b = 4", }, { input: "set session transaction isolation level repeatable read", }, { input: "set transaction isolation level repeatable read", }, { input: "set global transaction isolation level repeatable read", }, { input: "set transaction isolation level repeatable read", }, { input: "set transaction isolation level read committed", }, { input: "set transaction isolation level read uncommitted", }, { input: "set transaction isolation level serializable", }, { input: "set transaction read write", }, { input: "set transaction read only", }, { input: "set tx_read_only = 1", }, { input: "set tx_read_only = 0", }, { input: "set transaction_read_only = 1", }, { input: "set transaction_read_only = 0", }, { input: "set tx_isolation = 'repeatable read'", }, { input: "set tx_isolation = 'read committed'", }, { input: "set tx_isolation = 'read uncommitted'", }, { input: "set tx_isolation = 'serializable'", }, { input: "set sql_safe_updates = 0", }, { input: "set sql_safe_updates = 1", }, { input: "set @variable = 42", }, { input: "set @period.variable = 42", }, { input: "alter table a add foo", output: "alter table a", }, { input: "alter table a add spatial key foo (column1)", output: "alter table a", }, { input: "alter table a add fulltext key foo (column1)", output: "alter table a", }, { input: "alter table a add unique key foo (column1)", output: "alter table a", }, { input: "alter table `By` add foo", output: "alter table `By`", }, { input: "alter table a alter foo", output: "alter table a", }, { input: "alter table a change foo", output: "alter table a", }, { input: "alter table a modify foo", output: "alter table a", }, { input: "alter table a drop foo", output: "alter table a", }, { input: "alter table a disable foo", output: "alter table a", }, { input: "alter table a enable foo", output: "alter table a", }, { input: "alter table a order foo", output: "alter table a", }, { input: "alter table a default foo", output: "alter table a", }, { input: "alter table a discard foo", output: "alter table a", }, { input: "alter table a import foo", output: "alter table a", }, { input: "alter table a rename b", output: "rename table a to b", }, { input: "alter table `By` rename `bY`", output: "rename table `By` to `bY`", }, { input: "alter table a rename to b", output: "rename table a to b", }, { input: "alter table a rename as b", output: "rename table a to b", }, { input: "alter table a rename index foo to bar", output: "alter table a", }, { input: "alter table a rename key foo to bar", output: "alter table a", }, { input: "alter table e auto_increment = 20", output: "alter table e", }, { input: "alter table e character set = 'ascii'", output: "alter table e", }, { input: "alter table e default character set = 'ascii'", output: "alter table e", }, { input: "alter table e comment = 'hello'", output: "alter table e", }, { input: "alter table a reorganize partition b into (partition c values less than (?), partition d values less than (maxvalue))", output: "alter table a reorganize partition b into (partition c values less than (:v1), partition d values less than (maxvalue))", }, { input: "alter table a partition by range (id) (partition p0 values less than (10), partition p1 values less than (maxvalue))", output: "alter table a", }, { input: "alter table a add column id int", output: "alter table a", }, { input: "alter table a add index idx (id)", output: "alter table a", }, { input: "alter table a add fulltext index idx (id)", output: "alter table a", }, { input: "alter table a add spatial index idx (id)", output: "alter table a", }, { input: "alter table a add fulltext index idx (id)", output: "alter table a", }, { input: "alter table a add foreign key", output: "alter table a", }, { input: "alter table a add primary key", output: "alter table a", }, { input: "alter table a add constraint", output: "alter table a", }, { input: "alter table a add id", output: "alter table a", }, { input: "alter table a drop column id int", output: "alter table a", }, { input: "alter table a drop partition p2712", output: "alter table a", }, { input: "alter table a drop index idx (id)", output: "alter table a", }, { input: "alter table a drop fulltext index idx (id)", output: "alter table a", }, { input: "alter table a drop spatial index idx (id)", output: "alter table a", }, { input: "alter table a drop fulltext index idx (id)", output: "alter table a", }, { input: "alter table a add check ch_1", output: "alter table a", }, { input: "alter table a drop check ch_1", output: "alter table a", }, { input: "alter table a drop foreign key", output: "alter table a", }, { input: "alter table a drop primary key", output: "alter table a", }, { input: "alter table a drop constraint", output: "alter table a", }, { input: "alter table a drop id", output: "alter table a", }, { input: "alter database character set geostd8", }, { input: "alter database d character set geostd8", }, { input: "alter database d default collate 'utf8_bin'", }, { input: "alter database default collate 'utf8_bin'", }, { input: "alter database d upgrade data directory name", }, { input: "alter database d collate = 'utf8_bin'", output: "alter database d collate 'utf8_bin'", }, { input: "alter schema d default character set = geostd8", output: "alter database d default character set geostd8", }, { input: "alter schema d character set = geostd8", output: "alter database d character set geostd8", }, { input: "alter schema d default collate = 'utf8_bin'", output: "alter database d default collate 'utf8_bin'", }, { input: "alter schema d collate = 'utf8_bin' character set = geostd8 character set = geostd8", output: "alter database d collate 'utf8_bin' character set geostd8 character set geostd8", }, { input: "create table a", }, { input: "create table a (\n\t`a` int\n)", output: "create table a (\n\ta int\n)", }, { input: "create table `by` (\n\t`by` char\n)", }, { input: "create table test (\n\t__year year(4)\n)", }, { input: "create table if not exists a (\n\t`a` int\n)", output: "create table a (\n\ta int\n)", }, { input: "create table a ignore me this is garbage", output: "create table a", }, { input: "create table a (a int, b char, c garbage)", output: "create table a", }, { input: "create table a (b1 bool not null primary key, b2 boolean not null)", output: "create table a (\n\tb1 bool not null primary key,\n\tb2 boolean not null\n)", }, { input: "alter vschema create vindex hash_vdx using hash", }, { input: "alter vschema create vindex keyspace.hash_vdx using hash", }, { input: "alter vschema create vindex lookup_vdx using lookup with owner=user, table=name_user_idx, from=name, to=user_id", }, { input: "alter vschema create vindex xyz_vdx using xyz with param1=hello, param2='world', param3=123", }, { input: "alter vschema drop vindex hash_vdx", }, { input: "alter vschema drop vindex ks.hash_vdx", }, { input: "alter vschema add table a", }, { input: "alter vschema add table ks.a", }, { input: "alter vschema add sequence a_seq", }, { input: "alter vschema add sequence ks.a_seq", }, { input: "alter vschema on a add auto_increment id using a_seq", }, { input: "alter vschema on ks.a add auto_increment id using a_seq", }, { input: "alter vschema drop table a", }, { input: "alter vschema drop table ks.a", }, { input: "alter vschema on a add vindex hash (id)", }, { input: "alter vschema on ks.a add vindex hash (id)", }, { input: "alter vschema on a add vindex `hash` (`id`)", output: "alter vschema on a add vindex hash (id)", }, { input: "alter vschema on `ks`.a add vindex `hash` (`id`)", output: "alter vschema on ks.a add vindex hash (id)", }, { input: "alter vschema on a add vindex hash (id) using `hash`", output: "alter vschema on a add vindex hash (id) using hash", }, { input: "alter vschema on a add vindex `add` (`add`)", }, { input: "alter vschema on a add vindex hash (id) using hash", }, { input: "alter vschema on a add vindex hash (id) using `hash`", output: "alter vschema on a add vindex hash (id) using hash", }, { input: "alter vschema on user add vindex name_lookup_vdx (name) using lookup_hash with owner=user, table=name_user_idx, from=name, to=user_id", output: "alter vschema on user add vindex name_lookup_vdx (`name`) using lookup_hash with owner=user, table=name_user_idx, from=name, to=user_id", }, { input: "alter vschema on user2 add vindex name_lastname_lookup_vdx (name,lastname) using lookup with owner=`user`, table=`name_lastname_keyspace_id_map`, from=`name,lastname`, to=`keyspace_id`", output: "alter vschema on user2 add vindex name_lastname_lookup_vdx (`name`, lastname) using lookup with owner=user, table=name_lastname_keyspace_id_map, from=name,lastname, to=keyspace_id", }, { input: "alter vschema on a drop vindex hash", }, { input: "alter vschema on ks.a drop vindex hash", }, { input: "alter vschema on a drop vindex `hash`", output: "alter vschema on a drop vindex hash", }, { input: "alter vschema on a drop vindex hash", output: "alter vschema on a drop vindex hash", }, { input: "alter vschema on a drop vindex `add`", output: "alter vschema on a drop vindex `add`", }, { input: "create index a on b (col1)", }, { input: "create unique index a on b (col1)", }, { input: "create unique index a using foo on b (col1 desc)", }, { input: "create fulltext index a using foo on b (col1)", }, { input: "create spatial index a using foo on b (col1)", }, { input: "create index a on b (col1) using btree key_block_size 12 with parser 'a' comment 'string' algorithm inplace lock none", }, { input: "create index a on b ((col1 + col2), (col1*col2))", output: "create index a on b ()", partialDDL: true, }, { input: "create view a", output: "create table a", }, { input: "create or replace view a", output: "create table a", }, { input: "alter view a", output: "alter table a", }, { input: "rename table a to b", output: "rename table a to b", }, { input: "rename table a to b, b to c", output: "rename table a to b, b to c", }, { input: "drop view a", output: "drop table a", }, { input: "drop table a", output: "drop table a", }, { input: "drop table a, b", output: "drop table a, b", }, { input: "drop table if exists a", output: "drop table if exists a", }, { input: "drop view if exists a", output: "drop table if exists a", }, { input: "drop index b on a", output: "alter table a", }, { input: "analyze table a", output: "otherread", }, { input: "flush tables", output: "flush", }, { input: "flush tables with read lock", output: "flush", }, { input: "show binary logs", output: "show binary logs", }, { input: "show binlog events", output: "show binlog", }, { input: "show character set", output: "show charset", }, { input: "show character set like '%foo'", output: "show charset like '%foo'", }, { input: "show charset", output: "show charset", }, { input: "show charset like '%foo'", output: "show charset like '%foo'", }, { input: "show charset where 'charset' = 'utf8'", output: "show charset where 'charset' = 'utf8'", }, { input: "show charset where 'charset' = '%foo'", output: "show charset where 'charset' = '%foo'", }, { input: "show collation", output: "show collation", }, { input: "show collation where `Charset` = 'utf8' and `Collation` = 'utf8_bin'", output: "show collation where `Charset` = 'utf8' and `Collation` = 'utf8_bin'", }, { input: "show create database d", output: "show create database", }, { input: "show create event e", output: "show create event", }, { input: "show create function f", output: "show create function", }, { input: "show create procedure p", output: "show create procedure", }, { input: "show create table t", output: "show create table t", }, { input: "show create trigger t", output: "show create trigger", }, { input: "show create user u", output: "show create user", }, { input: "show create view v", output: "show create view", }, { input: "show databases", output: "show databases", }, { input: "show databases like '%'", output: "show databases like '%'", }, { input: "show schemas", output: "show schemas", }, { input: "show schemas like '%'", output: "show schemas like '%'", }, { input: "show engine INNODB", output: "show engine", }, { input: "show engines", output: "show engines", }, { input: "show storage engines", output: "show storage", }, { input: "show errors", output: "show errors", }, { input: "show events", output: "show events", }, { input: "show function code func", output: "show function", }, { input: "show function status", output: "show function", }, { input: "show grants for 'root@localhost'", output: "show grants", }, { input: "show index from t", }, { input: "show indexes from t", }, { input: "show keys from t", }, { input: "show master status", output: "show master", }, { input: "show open tables", output: "show open", }, { input: "show plugins", output: "show plugins", }, { input: "show privileges", output: "show privileges", }, { input: "show procedure code p", output: "show procedure", }, { input: "show procedure status", output: "show procedure", }, { input: "show processlist", output: "show processlist", }, { input: "show full processlist", output: "show processlist", }, { input: "show profile cpu for query 1", output: "show profile", }, { input: "show profiles", output: "show profiles", }, { input: "show relaylog events", output: "show relaylog", }, { input: "show slave hosts", output: "show slave", }, { input: "show slave status", output: "show slave", }, { input: "show status", output: "show status", }, { input: "show global status", output: "show global status", }, { input: "show session status", output: "show session status", }, { input: "show table status", }, { input: "show table status from dbname", }, { input: "show table status in dbname", output: "show table status from dbname", }, { input: "show table status in dbname LIKE '%' ", output: "show table status from dbname like '%'", }, { input: "show table status from dbname Where col=42 ", output: "show table status from dbname where col = 42", }, { input: "show tables", }, { input: "show tables like '%keyspace%'", }, { input: "show tables where 1 = 0", }, { input: "show tables from a", }, { input: "show tables from a where 1 = 0", }, { input: "show tables from a like '%keyspace%'", }, { input: "show full tables", }, { input: "show full tables from a", }, { input: "show full tables in a", output: "show full tables from a", }, { input: "show full tables from a like '%keyspace%'", }, { input: "show full tables from a where 1 = 0", }, { input: "show full tables like '%keyspace%'", }, { input: "show full tables where 1 = 0", }, { input: "show full columns in a in b like '%'", output: "show full columns from a from b like '%'", }, { input: "show full columns from messages from test_keyspace like '%'", }, { input: "show full fields from a like '%'", output: "show full columns from a like '%'", }, { input: "show fields from a where 1 = 1", output: "show columns from a where 1 = 1", }, { input: "show triggers", output: "show triggers", }, { input: "show variables", output: "show variables", }, { input: "show global variables", output: "show global variables", }, { input: "show session variables", output: "show session variables", }, { input: "show vitess_keyspaces", }, { input: "show vitess_keyspaces like '%'", }, { input: "show vitess_shards", }, { input: "show vitess_shards like '%'", }, { input: "show vitess_tablets", }, { input: "show vitess_tablets like '%'", }, { input: "show vitess_tablets where hostname = 'some-tablet'", }, { input: "show vschema tables", }, { input: "show vschema vindexes", }, { input: "show vschema vindexes on t", }, { input: "show warnings", output: "show warnings", }, { input: "select warnings from t", output: "select `warnings` from t", }, { input: "show foobar", output: "show foobar", }, { input: "show foobar like select * from table where syntax is 'ignored'", output: "show foobar", }, { input: "use db", output: "use db", }, { input: "use duplicate", output: "use `duplicate`", }, { input: "use `ks:-80@master`", output: "use `ks:-80@master`", }, { input: "use @replica", output: "use `@replica`", }, { input: "use ks@replica", output: "use `ks@replica`", }, { input: "describe select * from t", output: "explain select * from t", }, { input: "desc select * from t", output: "explain select * from t", }, { input: "desc foobar", output: "otherread", }, { input: "explain t1", output: "otherread", }, { input: "explain t1 col", output: "otherread", }, { input: "explain select * from t", }, { input: "explain format = traditional select * from t", }, { input: "explain analyze select * from t", }, { input: "explain format = tree select * from t", }, { input: "explain format = json select * from t", }, { input: "explain format = vitess select * from t", }, { input: "describe format = vitess select * from t", output: "explain format = vitess select * from t", }, { input: "explain delete from t", }, { input: "explain insert into t(col1, col2) values (1, 2)", }, { input: "explain update t set col = 2", }, { input: "truncate table foo", output: "truncate table foo", }, { input: "truncate foo", output: "truncate table foo", }, { input: "repair foo", output: "otheradmin", }, { input: "optimize foo", output: "otheradmin", }, { input: "lock tables foo read", output: "lock tables foo read", }, { input: "lock tables foo write", output: "lock tables foo write", }, { input: "lock tables foo read local", output: "lock tables foo read local", }, { input: "lock tables foo low_priority write", output: "lock tables foo low_priority write", }, { input: "unlock tables", output: "unlock tables", }, { input: "select /* EQ true */ 1 from t where a = true", }, { input: "select /* EQ false */ 1 from t where a = false", }, { input: "select /* NE true */ 1 from t where a != true", }, { input: "select /* NE false */ 1 from t where a != false", }, { input: "select /* LT true */ 1 from t where a < true", }, { input: "select /* LT false */ 1 from t where a < false", }, { input: "select /* GT true */ 1 from t where a > true", }, { input: "select /* GT false */ 1 from t where a > false", }, { input: "select /* LE true */ 1 from t where a <= true", }, { input: "select /* LE false */ 1 from t where a <= false", }, { input: "select /* GE true */ 1 from t where a >= true", }, { input: "select /* GE false */ 1 from t where a >= false", }, { input: "select * from t order by a collate utf8_general_ci", output: "select * from t order by a collate utf8_general_ci asc", }, { input: "select k collate latin1_german2_ci as k1 from t1 order by k1 asc", }, { input: "select * from t group by a collate utf8_general_ci", }, { input: "select MAX(k collate latin1_german2_ci) from t1", }, { input: "select distinct k collate latin1_german2_ci from t1", }, { input: "select * from t1 where 'Müller' collate latin1_german2_ci = k", }, { input: "select * from t1 where k like 'Müller' collate latin1_german2_ci", }, { input: "select k from t1 group by k having k = 'Müller' collate latin1_german2_ci", }, { input: "select k from t1 join t2 order by a collate latin1_german2_ci asc, b collate latin1_german2_ci asc", }, { input: "select k collate 'latin1_german2_ci' as k1 from t1 order by k1 asc", output: "select k collate latin1_german2_ci as k1 from t1 order by k1 asc", }, { input: "select /* drop trailing semicolon */ 1 from dual;", output: "select /* drop trailing semicolon */ 1 from dual", }, { input: "select /* cache directive */ sql_no_cache 'foo' from t", }, { input: "select distinct sql_no_cache 'foo' from t", }, { input: "select sql_no_cache distinct 'foo' from t", output: "select distinct sql_no_cache 'foo' from t", }, { input: "select sql_no_cache straight_join distinct 'foo' from t", output: "select distinct sql_no_cache straight_join 'foo' from t", }, { input: "select straight_join distinct sql_no_cache 'foo' from t", output: "select distinct sql_no_cache straight_join 'foo' from t", }, { input: "select sql_calc_found_rows 'foo' from t", output: "select sql_calc_found_rows 'foo' from t", }, { input: "select binary 'a' = 'A' from t", }, { input: "select 1 from t where foo = _binary 'bar'", }, { input: "select 1 from t where foo = _utf8 'bar' and bar = _latin1 'sjösjuk'", }, { input: "select 1 from t where foo = _binary'bar'", output: "select 1 from t where foo = _binary 'bar'", }, { input: "select 1 from t where foo = _utf8mb4 'bar'", }, { input: "select 1 from t where foo = _utf8mb4'bar'", output: "select 1 from t where foo = _utf8mb4 'bar'", }, { input: "select match(a) against ('foo') from t", }, { input: "select match(a1, a2) against ('foo' in natural language mode with query expansion) from t", }, { input: "select database()", output: "select database() from dual", }, { input: "select schema()", output: "select schema() from dual", }, { input: "select title from video as v where match(v.title, v.tag) against ('DEMO' in boolean mode)", }, { input: "select name, group_concat(score) from t group by name", output: "select `name`, group_concat(score) from t group by `name`", }, { input: "select name, group_concat(distinct id, score order by id desc separator ':') from t group by name", output: "select `name`, group_concat(distinct id, score order by id desc separator ':') from t group by `name`", }, { input: "select name, group_concat(distinct id, score order by id desc separator ':' limit 1) from t group by name", output: "select `name`, group_concat(distinct id, score order by id desc separator ':' limit 1) from t group by `name`", }, { input: "select name, group_concat(distinct id, score order by id desc separator ':' limit 10, 2) from t group by name", output: "select `name`, group_concat(distinct id, score order by id desc separator ':' limit 10, 2) from t group by `name`", }, { input: "select * from t partition (p0)", }, { input: "select * from t partition (p0, p1)", }, { input: "select e.id, s.city from employees as e join stores partition (p1) as s on e.store_id = s.id", }, { input: "select truncate(120.3333, 2) from dual", }, { input: "update t partition (p0) set a = 1", }, { input: "insert into t partition (p0) values (1, 'asdf')", }, { input: "insert into t1 select * from t2 partition (p0)", }, { input: "replace into t partition (p0) values (1, 'asdf')", }, { input: "delete from t partition (p0) where a = 1", }, { input: "stream * from t", }, { input: "vstream * from t", }, { input: "stream /* comment */ * from t", }, { input: "begin", }, { input: "begin;", output: "begin", }, { input: "start transaction", output: "begin", }, { input: "commit", }, { input: "rollback", }, { input: "create database test_db", }, { input: "create schema test_db", output: "create database test_db", }, { input: "create database if not exists test_db", }, { input: "create schema if not exists test_db", output: "create database if not exists test_db", }, { input: "create database test_db default collate 'utf8mb4_general_ci' collate utf8mb4_general_ci", }, { input: "create database test_db character set geostd8", }, { input: "create database test_db character set * unparsable", output: "create database test_db", partialDDL: true, }, { input: "drop database test_db", }, { input: "drop schema test_db", output: "drop database test_db", }, { input: "drop database if exists test_db", }, { input: "delete a.*, b.* from tbl_a a, tbl_b b where a.id = b.id and b.name = 'test'", output: "delete a, b from tbl_a as a, tbl_b as b where a.id = b.id and b.`name` = 'test'", }, { input: "select distinctrow a.* from (select (1) from dual union all select 1 from dual) a", output: "select distinct a.* from (select 1 from dual union all select 1 from dual) as a", }, { input: "select `weird function name`() from t", }, { input: "select status() from t", // should not escape function names that are keywords }, { input: "select * from `weird table name`", }, { input: "SHOW FULL TABLES FROM `jiradb` LIKE 'AO_E8B6CC_ISSUE_MAPPING'", output: "show full tables from jiradb like 'AO_E8B6CC_ISSUE_MAPPING'", }, { input: "SHOW FULL COLUMNS FROM AO_E8B6CC_ISSUE_MAPPING FROM jiradb LIKE '%'", output: "show full columns from AO_E8B6CC_ISSUE_MAPPING from jiradb like '%'", }, { input: "SHOW KEYS FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", output: "show keys from AO_E8B6CC_ISSUE_MAPPING from jiradb", }, { input: "SHOW CREATE TABLE `jiradb`.`AO_E8B6CC_ISSUE_MAPPING`", output: "show create table jiradb.AO_E8B6CC_ISSUE_MAPPING", }, { input: "SHOW INDEX FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", output: "show index from AO_E8B6CC_ISSUE_MAPPING from jiradb", }, { input: "SHOW FULL TABLES FROM `jiradb` LIKE '%'", output: "show full tables from jiradb like '%'", }, { input: "SHOW EXTENDED INDEX FROM `AO_E8B6CC_PROJECT_MAPPING` FROM `jiradb`", output: "show extended index from AO_E8B6CC_PROJECT_MAPPING from jiradb", }, { input: "SHOW EXTENDED KEYS FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", output: "show extended keys from AO_E8B6CC_ISSUE_MAPPING from jiradb", }, { input: "SHOW CREATE TABLE `jiradb`.`AO_E8B6CC_ISSUE_MAPPING`", output: "show create table jiradb.AO_E8B6CC_ISSUE_MAPPING", }, { input: "create table t1 ( check (c1 <> c2), c1 int check (c1 > 10), c2 int constraint c2_positive check (c2 > 0), c3 int check (c3 < 100), constraint c1_nonzero check (c1 <> 0), check (c1 > c3))", output: "create table t1 (\n" + "\tc1 int,\n" + "\tc2 int,\n" + "\tc3 int,\n" + "\tcheck constraint on expression c1 != c2 enforced,\n" + "\tcheck constraint on expression c1 > 10 enforced,\n" + "\tconstraint c2_positive check constraint on expression c2 > 0 enforced,\n" + "\tcheck constraint on expression c3 < 100 enforced,\n" + "\tconstraint c1_nonzero check constraint on expression c1 != 0 enforced,\n" + "\tcheck constraint on expression c1 > c3 enforced\n)", }, { input: "SHOW INDEXES FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", output: "show indexes from AO_E8B6CC_ISSUE_MAPPING from jiradb", }, { input: "SHOW FULL TABLES FROM `jiradb` LIKE '%'", output: "show full tables from jiradb like '%'", }, { input: "SHOW EXTENDED INDEXES FROM `AO_E8B6CC_PROJECT_MAPPING` FROM `jiradb`", output: "show extended indexes from AO_E8B6CC_PROJECT_MAPPING from jiradb", }, { input: "SHOW EXTENDED INDEXES IN `AO_E8B6CC_PROJECT_MAPPING` IN `jiradb`", output: "show extended indexes from AO_E8B6CC_PROJECT_MAPPING from jiradb", }, { input: "do 1", output: "otheradmin", }, { input: "do funcCall(), 2 = 1, 3 + 1", output: "otheradmin", }, { input: "savepoint a", }, { input: "savepoint `@@@;a`", }, { input: "rollback to a", }, { input: "rollback to `@@@;a`", }, { input: "rollback work to a", output: "rollback to a", }, { input: "rollback to savepoint a", output: "rollback to a", }, { input: "rollback work to savepoint a", output: "rollback to a", }, { input: "release savepoint a", }, { input: "release savepoint `@@@;a`", }} ) func TestValid(t *testing.T) { for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } tree, err := Parse(tcase.input) require.NoError(t, err, tcase.input) out := String(tree) if tcase.output != out { t.Errorf("Parsing failed. \nExpected/Got:\n%s\n%s", tcase.output, out) } // CREATE INDEX currently only has 5.7 specifications. // For mysql 8.0 syntax, the query is not entirely parsed. // Add more structs as we go on adding full parsing support for DDL constructs for 5.7 syntax. switch x := tree.(type) { case *CreateIndex: assert.Equal(t, !tcase.partialDDL, x.IsFullyParsed()) case *CreateDatabase: assert.Equal(t, !tcase.partialDDL, x.IsFullyParsed()) case *AlterDatabase: assert.Equal(t, !tcase.partialDDL, x.IsFullyParsed()) } // This test just exercises the tree walking functionality. // There's no way automated way to verify that a node calls // all its children. But we can examine code coverage and // ensure that all walkSubtree functions were called. Walk(func(node SQLNode) (bool, error) { return true, nil }, tree) }) } } // Ensure there is no corruption from using a pooled yyParserImpl in Parse. func TestParallelValid(t *testing.T) { parallelism := 100 numIters := 1000 wg := sync.WaitGroup{} wg.Add(parallelism) for i := 0; i < parallelism; i++ { go func() { defer wg.Done() for j := 0; j < numIters; j++ { tcase := validSQL[rand.Intn(len(validSQL))] if tcase.output == "" { tcase.output = tcase.input } tree, err := Parse(tcase.input) if err != nil { t.Errorf("Parse(%q) err: %v, want nil", tcase.input, err) continue } out := String(tree) if out != tcase.output { t.Errorf("Parse(%q) = %q, want: %q", tcase.input, out, tcase.output) } } }() } wg.Wait() } func TestInvalid(t *testing.T) { invalidSQL := []struct { input string err string }{{ input: "select a, b from (select * from tbl) sort by a", err: "syntax error", }, { input: "/*!*/", err: "empty statement", }} for _, tcase := range invalidSQL { _, err := Parse(tcase.input) if err == nil { t.Errorf("Parse invalid query(%q), got: nil, want: %s...", tcase.input, tcase.err) } if err != nil && !strings.Contains(err.Error(), tcase.err) { t.Errorf("Parse invalid query(%q), got: %v, want: %s...", tcase.input, err, tcase.err) } } } func TestCaseSensitivity(t *testing.T) { validSQL := []struct { input string output string }{{ input: "create table A (\n\t`B` int\n)", output: "create table A (\n\tB int\n)", }, { input: "create index b on A (col1 desc)", }, { input: "alter table A foo", output: "alter table A", }, { input: "alter table A convert", output: "alter table A", }, { // View names get lower-cased. input: "alter view A foo", output: "alter table a", }, { input: "alter table A rename to B", output: "rename table A to B", }, { input: "rename table A to B", }, { input: "drop table B", output: "drop table B", }, { input: "drop table if exists B", output: "drop table if exists B", }, { input: "drop index b on A", output: "alter table A", }, { input: "select a from B", }, { input: "select A as B from C", }, { input: "select B.* from c", }, { input: "select B.A from c", }, { input: "select * from B as C", }, { input: "select * from A.B", }, { input: "update A set b = 1", }, { input: "update A.B set b = 1", }, { input: "select A() from b", }, { input: "select A(B, C) from b", }, { input: "select A(distinct B, C) from b", }, { // IF is an exception. It's always lower-cased. input: "select IF(B, C) from b", output: "select if(B, C) from b", }, { input: "select * from b use index (A)", }, { input: "insert into A(A, B) values (1, 2)", }, { input: "CREATE TABLE A (\n\t`A` int\n)", output: "create table A (\n\tA int\n)", }, { input: "create view A", output: "create table a", }, { input: "alter view A", output: "alter table a", }, { input: "drop view A", output: "drop table a", }, { input: "drop view if exists A", output: "drop table if exists a", }, { input: "select /* lock in SHARE MODE */ 1 from t lock in SHARE MODE", output: "select /* lock in SHARE MODE */ 1 from t lock in share mode", }, { input: "select next VALUE from t", output: "select next 1 values from t", }, { input: "select /* use */ 1 from t1 use index (A) where b = 1", }} for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } tree, err := Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } out := String(tree) if out != tcase.output { t.Errorf("out: %s, want %s", out, tcase.output) } } } func TestKeywords(t *testing.T) { validSQL := []struct { input string output string }{{ input: "select current_timestamp", output: "select current_timestamp() from dual", }, { input: "update t set a = current_timestamp()", }, { input: "update t set a = current_timestamp(5)", }, { input: "select a, current_date from t", output: "select a, current_date() from t", }, { input: "insert into t(a, b) values (current_date, current_date())", output: "insert into t(a, b) values (current_date(), current_date())", }, { input: "select * from t where a > utc_timestmp()", }, { input: "select * from t where a > utc_timestamp(4)", }, { input: "update t set b = utc_timestamp + 5", output: "update t set b = utc_timestamp() + 5", }, { input: "select utc_time, utc_date, utc_time(6)", output: "select utc_time(), utc_date(), utc_time(6) from dual", }, { input: "select 1 from dual where localtime > utc_time", output: "select 1 from dual where localtime() > utc_time()", }, { input: "select 1 from dual where localtime(2) > utc_time(1)", output: "select 1 from dual where localtime(2) > utc_time(1)", }, { input: "update t set a = localtimestamp(), b = utc_timestamp", output: "update t set a = localtimestamp(), b = utc_timestamp()", }, { input: "update t set a = localtimestamp(10), b = utc_timestamp(13)", output: "update t set a = localtimestamp(10), b = utc_timestamp(13)", }, { input: "insert into t(a) values (unix_timestamp)", }, { input: "select replace(a, 'foo', 'bar') from t", }, { input: "update t set a = replace('1234', '2', '1')", }, { input: "insert into t(a, b) values ('foo', 'bar') on duplicate key update a = replace(hex('foo'), 'f', 'b')", }, { input: "update t set a = left('1234', 3)", }, { input: "select left(a, 5) from t", }, { input: "update t set d = adddate(date('2003-12-31 01:02:03'), interval 5 days)", }, { input: "insert into t(a, b) values (left('foo', 1), 'b')", }, { input: "insert /* qualified function */ into t(a, b) values (test.PI(), 'b')", }, { input: "select /* keyword in qualified id */ * from t join z on t.key = z.key", output: "select /* keyword in qualified id */ * from t join z on t.`key` = z.`key`", }, { input: "select /* non-reserved keywords as unqualified cols */ date, view, offset from t", output: "select /* non-reserved keywords as unqualified cols */ `date`, `view`, `offset` from t", }, { input: "select /* share and mode as cols */ share, mode from t where share = 'foo'", output: "select /* share and mode as cols */ `share`, `mode` from t where `share` = 'foo'", }, { input: "select /* unused keywords as cols */ `write`, varying from t where trailing = 'foo'", output: "select /* unused keywords as cols */ `write`, `varying` from t where `trailing` = 'foo'", }, { input: "select status from t", output: "select `status` from t", }, { input: "select Status from t", output: "select `Status` from t", }, { input: "select variables from t", output: "select `variables` from t", }, { input: "select Variables from t", output: "select `Variables` from t", }} for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } tree, err := Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } out := String(tree) if out != tcase.output { t.Errorf("out: %s, want %s", out, tcase.output) } } } func TestConvert(t *testing.T) { validSQL := []struct { input string output string }{{ input: "select cast('abc' as date) from t", output: "select convert('abc', date) from t", }, { input: "select convert('abc', binary(4)) from t", }, { input: "select convert('abc', binary) from t", }, { input: "select convert('abc', char character set binary) from t", }, { input: "select convert('abc', char(4) ascii) from t", }, { input: "select convert('abc', char unicode) from t", }, { input: "select convert('abc', char(4)) from t", }, { input: "select convert('abc', char) from t", }, { input: "select convert('abc', nchar(4)) from t", }, { input: "select convert('abc', nchar) from t", }, { input: "select convert('abc', signed) from t", }, { input: "select convert('abc', signed integer) from t", output: "select convert('abc', signed) from t", }, { input: "select convert('abc', unsigned) from t", }, { input: "select convert('abc', unsigned integer) from t", output: "select convert('abc', unsigned) from t", }, { input: "select convert('abc', decimal(3, 4)) from t", }, { input: "select convert('abc', decimal(4)) from t", }, { input: "select convert('abc', decimal) from t", }, { input: "select convert('abc', date) from t", }, { input: "select convert('abc', time(4)) from t", }, { input: "select convert('abc', time) from t", }, { input: "select convert('abc', datetime(9)) from t", }, { input: "select convert('abc', datetime) from t", }, { input: "select convert('abc', json) from t", }, { input: "select convert('abc' using ascii) from t", }} for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } tree, err := Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } out := String(tree) if out != tcase.output { t.Errorf("out: %s, want %s", out, tcase.output) } } invalidSQL := []struct { input string output string }{{ input: "select convert('abc' as date) from t", output: "syntax error at position 24 near 'as'", }, { input: "select convert from t", output: "syntax error at position 20 near 'from'", }, { input: "select cast('foo', decimal) from t", output: "syntax error at position 19", }, { input: "select convert('abc', datetime(4+9)) from t", output: "syntax error at position 34", }, { input: "select convert('abc', decimal(4+9)) from t", output: "syntax error at position 33", }, { input: "/* a comment */", output: "empty statement", }, { input: "set transaction isolation level 12345", output: "syntax error at position 38 near '12345'", }} for _, tcase := range invalidSQL { _, err := Parse(tcase.input) if err == nil || err.Error() != tcase.output { t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) } } } func TestSelectInto(t *testing.T) { validSQL := []struct { input string output string }{{ input: "select * from t order by name limit 100 into outfile s3 'out_file_name'", output: "select * from t order by `name` asc limit 100 into outfile s3 'out_file_name'", }, { input: "select * from t into dumpfile 'out_file_name'", }, { input: "select * from t into outfile 'out_file_name' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'", }, { input: "select * from t into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off", }, { input: "select * from (select * from t union select * from t2) as t3 where t3.name in (select col from t4) into outfile s3 'out_file_name'", output: "select * from (select * from t union select * from t2) as t3 where t3.`name` in (select col from t4) into outfile s3 'out_file_name'", }, { // Invalid queries but these are parsed and errors caught in planbuilder input: "select * from t limit 100 into outfile s3 'out_file_name' union select * from t2", }, { input: "select * from (select * from t into outfile s3 'inner_outfile') as t2 into outfile s3 'out_file_name'", }} for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } tree, err := Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } out := String(tree) assert.Equal(t, tcase.output, out) } invalidSQL := []struct { input string output string }{{ input: "select convert('abc' as date) from t", output: "syntax error at position 24 near 'as'", }, { input: "set transaction isolation level 12345", output: "syntax error at position 38 near '12345'", }} for _, tcase := range invalidSQL { _, err := Parse(tcase.input) if err == nil || err.Error() != tcase.output { t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) } } } func TestPositionedErr(t *testing.T) { invalidSQL := []struct { input string output PositionedErr }{{ input: "select convert('abc' as date) from t", output: PositionedErr{"syntax error", 24, []byte("as")}, }, { input: "select convert from t", output: PositionedErr{"syntax error", 20, []byte("from")}, }, { input: "select cast('foo', decimal) from t", output: PositionedErr{"syntax error", 19, nil}, }, { input: "select convert('abc', datetime(4+9)) from t", output: PositionedErr{"syntax error", 34, nil}, }, { input: "select convert('abc', decimal(4+9)) from t", output: PositionedErr{"syntax error", 33, nil}, }, { input: "set transaction isolation level 12345", output: PositionedErr{"syntax error", 38, []byte("12345")}, }, { input: "select * from a left join b", output: PositionedErr{"syntax error", 28, nil}, }, { input: "select a from (select * from tbl)", output: PositionedErr{"syntax error", 34, nil}, }} for _, tcase := range invalidSQL { tkn := NewStringTokenizer(tcase.input) _, err := ParseNext(tkn) if posErr, ok := err.(PositionedErr); !ok { t.Errorf("%s: %v expected PositionedErr, got (%T) %v", tcase.input, err, err, tcase.output) } else if posErr.Pos != tcase.output.Pos || !bytes.Equal(posErr.Near, tcase.output.Near) || err.Error() != tcase.output.Error() { t.Errorf("%s: %v, want: %v", tcase.input, err, tcase.output) } } } func TestSubStr(t *testing.T) { validSQL := []struct { input string output string }{{ input: `select substr('foobar', 1) from t`, }, { input: "select substr(a, 1, 6) from t", }, { input: "select substring(a, 1) from t", output: "select substr(a, 1) from t", }, { input: "select substring(a, 1, 6) from t", output: "select substr(a, 1, 6) from t", }, { input: "select substr(a from 1 for 6) from t", output: "select substr(a, 1, 6) from t", }, { input: "select substring(a from 1 for 6) from t", output: "select substr(a, 1, 6) from t", }, { input: `select substr("foo" from 1 for 2) from t`, output: `select substr('foo', 1, 2) from t`, }, { input: `select substring("foo", 1, 2) from t`, output: `select substr('foo', 1, 2) from t`, }, { input: `select substr(substr("foo" from 1 for 2), 1, 2) from t`, output: `select substr(substr('foo', 1, 2), 1, 2) from t`, }, { input: `select substr(substring("foo", 1, 2), 3, 4) from t`, output: `select substr(substr('foo', 1, 2), 3, 4) from t`, }, { input: `select substring(substr("foo", 1), 2) from t`, output: `select substr(substr('foo', 1), 2) from t`, }} for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } tree, err := Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } out := String(tree) if out != tcase.output { t.Errorf("out: %s, want %s", out, tcase.output) } } } func TestLoadData(t *testing.T) { validSQL := []string{ "load data from s3 'x.txt'", "load data from s3 manifest 'x.txt'", "load data from s3 file 'x.txt'", "load data infile 'x.txt' into table 'c'", "load data from s3 'x.txt' into table x"} for _, tcase := range validSQL { _, err := Parse(tcase) require.NoError(t, err) } } func TestCreateTable(t *testing.T) { validSQL := []string{ // test all the data types and options "create table t (\n" + " col_bit bit,\n" + " col_tinyint tinyint auto_increment,\n" + " col_tinyint3 tinyint(3) unsigned,\n" + " col_smallint smallint,\n" + " col_smallint4 smallint(4) zerofill,\n" + " col_mediumint mediumint,\n" + " col_mediumint5 mediumint(5) unsigned not null,\n" + " col_int int,\n" + " col_int10 int(10) not null,\n" + " col_integer integer comment 'this is an integer',\n" + " col_bigint bigint,\n" + " col_bigint10 bigint(10) zerofill not null default 10,\n" + " col_real real,\n" + " col_real2 real(1,2) not null default 1.23,\n" + " col_double double,\n" + " col_double2 double(3,4) not null default 1.23,\n" + " col_float float,\n" + " col_float2 float(3,4) not null default 1.23,\n" + " col_decimal decimal,\n" + " col_decimal2 decimal(2),\n" + " col_decimal3 decimal(2,3),\n" + " col_numeric numeric,\n" + " col_numeric2 numeric(2),\n" + " col_numeric3 numeric(2,3),\n" + " col_date date,\n" + " col_time time,\n" + " col_timestamp timestamp,\n" + " col_datetime datetime,\n" + " col_year year,\n" + " col_char char,\n" + " col_char2 char(2),\n" + " col_char3 char(3) character set ascii,\n" + " col_char4 char(4) character set ascii collate ascii_bin,\n" + " col_varchar varchar,\n" + " col_varchar2 varchar(2),\n" + " col_varchar3 varchar(3) character set ascii,\n" + " col_varchar4 varchar(4) character set ascii collate ascii_bin,\n" + " col_binary binary,\n" + " col_varbinary varbinary(10),\n" + " col_tinyblob tinyblob,\n" + " col_blob blob,\n" + " col_mediumblob mediumblob,\n" + " col_longblob longblob,\n" + " col_tinytext tinytext,\n" + " col_text text,\n" + " col_mediumtext mediumtext,\n" + " col_longtext longtext,\n" + " col_text text character set ascii collate ascii_bin,\n" + " col_json json,\n" + " col_enum enum('a', 'b', 'c', 'd'),\n" + " col_enum2 enum('a', 'b', 'c', 'd') character set ascii,\n" + " col_enum3 enum('a', 'b', 'c', 'd') collate ascii_bin,\n" + " col_enum4 enum('a', 'b', 'c', 'd') character set ascii collate ascii_bin,\n" + " col_set set('a', 'b', 'c', 'd'),\n" + " col_set2 set('a', 'b', 'c', 'd') character set ascii,\n" + " col_set3 set('a', 'b', 'c', 'd') collate ascii_bin,\n" + " col_set4 set('a', 'b', 'c', 'd') character set ascii collate ascii_bin,\n" + " col_geometry1 geometry,\n" + " col_geometry2 geometry not null,\n" + " col_point1 point,\n" + " col_point2 point not null,\n" + " col_linestring1 linestring,\n" + " col_linestring2 linestring not null,\n" + " col_polygon1 polygon,\n" + " col_polygon2 polygon not null,\n" + " col_geometrycollection1 geometrycollection,\n" + " col_geometrycollection2 geometrycollection not null,\n" + " col_multipoint1 multipoint,\n" + " col_multipoint2 multipoint not null,\n" + " col_multilinestring1 multilinestring,\n" + " col_multilinestring2 multilinestring not null,\n" + " col_multipolygon1 multipolygon,\n" + " col_multipolygon2 multipolygon not null\n" + ")", // test defining indexes separately "create table t (\n" + " id int auto_increment,\n" + " username varchar,\n" + " email varchar,\n" + " full_name varchar,\n" + " geom point not null,\n" + " status_nonkeyword varchar,\n" + " primary key (id),\n" + " spatial key geom (geom),\n" + " fulltext key fts (full_name),\n" + " unique key by_username (username),\n" + " unique by_username2 (username),\n" + " unique index by_username3 (username),\n" + " index by_status (status_nonkeyword),\n" + " key by_full_name (full_name)\n" + ")", // test that indexes support USING <id> "create table t (\n" + " id int auto_increment,\n" + " username varchar,\n" + " email varchar,\n" + " full_name varchar,\n" + " status_nonkeyword varchar,\n" + " primary key (id) using BTREE,\n" + " unique key by_username (username) using HASH,\n" + " unique by_username2 (username) using OTHER,\n" + " unique index by_username3 (username) using XYZ,\n" + " index by_status (status_nonkeyword) using PDQ,\n" + " key by_full_name (full_name) using OTHER\n" + ")", // test other index options "create table t (\n" + " id int auto_increment,\n" + " username varchar,\n" + " email varchar,\n" + " primary key (id) comment 'hi',\n" + " unique key by_username (username) key_block_size 8,\n" + " unique index by_username4 (username) comment 'hi' using BTREE,\n" + " unique index by_username4 (username) using BTREE key_block_size 4 comment 'hi'\n" + ")", // multi-column indexes "create table t (\n" + " id int auto_increment,\n" + " username varchar,\n" + " email varchar,\n" + " full_name varchar,\n" + " a int,\n" + " b int,\n" + " c int,\n" + " primary key (id, username),\n" + " unique key by_abc (a, b, c),\n" + " unique key (a, b, c),\n" + " key by_email (email(10), username)\n" + ")", // foreign keys "create table t (\n" + " id int auto_increment,\n" + " username varchar,\n" + " k int,\n" + " Z int,\n" + " primary key (id, username),\n" + " key by_email (email(10), username),\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b),\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on delete restrict,\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on delete no action,\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on delete cascade on update set default,\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on delete set default on update set null,\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on delete set null on update restrict,\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on update no action,\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on update cascade\n" + ")", // table options "create table t (\n" + " id int auto_increment\n" + ") engine InnoDB,\n" + " auto_increment 123,\n" + " avg_row_length 1,\n" + " default character set utf8mb4,\n" + " character set latin1,\n" + " checksum 0,\n" + " default collate binary,\n" + " collate ascii_bin,\n" + " comment 'this is a comment',\n" + " compression 'zlib',\n" + " connection 'connect_string',\n" + " data directory 'absolute path to directory',\n" + " delay_key_write 1,\n" + " encryption 'n',\n" + " index directory 'absolute path to directory',\n" + " insert_method no,\n" + " key_block_size 1024,\n" + " max_rows 100,\n" + " min_rows 10,\n" + " pack_keys 0,\n" + " password 'sekret',\n" + " row_format default,\n" + " stats_auto_recalc default,\n" + " stats_persistent 0,\n" + " stats_sample_pages 1,\n" + " tablespace tablespace_name storage disk,\n" + " tablespace tablespace_name\n", // boolean columns "create table t (\n" + " bi bigint not null primary key,\n" + " b1 bool not null,\n" + " b2 boolean\n" + ")", } for _, sql := range validSQL { sql = strings.TrimSpace(sql) tree, err := ParseStrictDDL(sql) if err != nil { t.Errorf("input: %s, err: %v", sql, err) continue } got := String(tree.(*CreateTable)) assert.True(t, tree.(*CreateTable).FullyParsed) if sql != got { t.Errorf("want:\n%s\ngot:\n%s", sql, got) } } sql := "create table t garbage" tree, err := Parse(sql) if err != nil { t.Errorf("input: %s, err: %v", sql, err) } assert.True(t, !tree.(*CreateTable).FullyParsed) tree, err = ParseStrictDDL(sql) if tree != nil || err == nil { t.Errorf("ParseStrictDDL unexpectedly accepted input %s", sql) } testCases := []struct { input string output string }{{ // test key_block_size input: "create table t (\n" + " id int auto_increment,\n" + " username varchar,\n" + " unique key by_username (username) key_block_size 8,\n" + " unique key by_username2 (username) key_block_size=8,\n" + " unique by_username3 (username) key_block_size = 4\n" + ")", output: "create table t (\n" + " id int auto_increment,\n" + " username varchar,\n" + " unique key by_username (username) key_block_size 8,\n" + " unique key by_username2 (username) key_block_size 8,\n" + " unique by_username3 (username) key_block_size 4\n" + ")", }, { // test defaults input: "create table t (\n" + " i1 int default 1,\n" + " i2 int default null,\n" + " f1 float default 1.23,\n" + " s1 varchar default 'c',\n" + " s2 varchar default 'this is a string',\n" + " s3 varchar default null,\n" + " s4 timestamp default current_timestamp,\n" + " s5 bit(1) default B'0'\n" + ")", output: "create table t (\n" + " i1 int default 1,\n" + " i2 int default null,\n" + " f1 float default 1.23,\n" + " s1 varchar default 'c',\n" + " s2 varchar default 'this is a string',\n" + " `s3` varchar default null,\n" + " s4 timestamp default current_timestamp(),\n" + " s5 bit(1) default B'0'\n" + ")", }, { // test non_reserved word in column name input: "create table t (\n" + " repair int\n" + ")", output: "create table t (\n" + " `repair` int\n" + ")", }, { // test key field options input: "create table t (\n" + " id int auto_increment primary key,\n" + " username varchar unique key,\n" + " email varchar unique,\n" + " full_name varchar key,\n" + " time1 timestamp on update current_timestamp,\n" + " time2 timestamp default current_timestamp on update current_timestamp\n" + ")", output: "create table t (\n" + " id int auto_increment primary key,\n" + " username varchar unique key,\n" + " email varchar unique,\n" + " full_name varchar key,\n" + " time1 timestamp on update current_timestamp(),\n" + " time2 timestamp default current_timestamp() on update current_timestamp()\n" + ")", }, { // test current_timestamp with and without () input: "create table t (\n" + " time1 timestamp default current_timestamp,\n" + " time2 timestamp default current_timestamp(),\n" + " time3 timestamp default current_timestamp on update current_timestamp,\n" + " time4 timestamp default current_timestamp() on update current_timestamp(),\n" + " time5 timestamp(3) default current_timestamp(3) on update current_timestamp(3)\n" + ")", output: "create table t (\n" + " time1 timestamp default current_timestamp(),\n" + " time2 timestamp default current_timestamp(),\n" + " time3 timestamp default current_timestamp() on update current_timestamp(),\n" + " time4 timestamp default current_timestamp() on update current_timestamp(),\n" + " time5 timestamp(3) default current_timestamp(3) on update current_timestamp(3)\n" + ")", }, { // test utc_timestamp with and without () input: "create table t (\n" + " time1 timestamp default utc_timestamp,\n" + " time2 timestamp default utc_timestamp(),\n" + " time3 timestamp default utc_timestamp on update utc_timestamp,\n" + " time4 timestamp default utc_timestamp() on update utc_timestamp(),\n" + " time5 timestamp(4) default utc_timestamp(4) on update utc_timestamp(4)\n" + ")", output: "create table t (\n" + " time1 timestamp default utc_timestamp(),\n" + " time2 timestamp default utc_timestamp(),\n" + " time3 timestamp default utc_timestamp() on update utc_timestamp(),\n" + " time4 timestamp default utc_timestamp() on update utc_timestamp(),\n" + " time5 timestamp(4) default utc_timestamp(4) on update utc_timestamp(4)\n" + ")", }, { // test utc_time with and without () input: "create table t (\n" + " time1 timestamp default utc_time,\n" + " time2 timestamp default utc_time(),\n" + " time3 timestamp default utc_time on update utc_time,\n" + " time4 timestamp default utc_time() on update utc_time(),\n" + " time5 timestamp(5) default utc_time(5) on update utc_time(5)\n" + ")", output: "create table t (\n" + " time1 timestamp default utc_time(),\n" + " time2 timestamp default utc_time(),\n" + " time3 timestamp default utc_time() on update utc_time(),\n" + " time4 timestamp default utc_time() on update utc_time(),\n" + " time5 timestamp(5) default utc_time(5) on update utc_time(5)\n" + ")", }, { // test utc_date with and without () input: "create table t (\n" + " time1 timestamp default utc_date,\n" + " time2 timestamp default utc_date(),\n" + " time3 timestamp default utc_date on update utc_date,\n" + " time4 timestamp default utc_date() on update utc_date()\n" + ")", output: "create table t (\n" + " time1 timestamp default utc_date(),\n" + " time2 timestamp default utc_date(),\n" + " time3 timestamp default utc_date() on update utc_date(),\n" + " time4 timestamp default utc_date() on update utc_date()\n" + ")", }, { // test localtime with and without () input: "create table t (\n" + " time1 timestamp default localtime,\n" + " time2 timestamp default localtime(),\n" + " time3 timestamp default localtime on update localtime,\n" + " time4 timestamp default localtime() on update localtime(),\n" + " time5 timestamp(6) default localtime(6) on update localtime(6)\n" + ")", output: "create table t (\n" + " time1 timestamp default localtime(),\n" + " time2 timestamp default localtime(),\n" + " time3 timestamp default localtime() on update localtime(),\n" + " time4 timestamp default localtime() on update localtime(),\n" + " time5 timestamp(6) default localtime(6) on update localtime(6)\n" + ")", }, { // test localtimestamp with and without () input: "create table t (\n" + " time1 timestamp default localtimestamp,\n" + " time2 timestamp default localtimestamp(),\n" + " time3 timestamp default localtimestamp on update localtimestamp,\n" + " time4 timestamp default localtimestamp() on update localtimestamp(),\n" + " time5 timestamp(1) default localtimestamp(1) on update localtimestamp(1)\n" + ")", output: "create table t (\n" + " time1 timestamp default localtimestamp(),\n" + " time2 timestamp default localtimestamp(),\n" + " time3 timestamp default localtimestamp() on update localtimestamp(),\n" + " time4 timestamp default localtimestamp() on update localtimestamp(),\n" + " time5 timestamp(1) default localtimestamp(1) on update localtimestamp(1)\n" + ")", }, { // test current_date with and without () input: "create table t (\n" + " time1 timestamp default current_date,\n" + " time2 timestamp default current_date(),\n" + " time3 timestamp default current_date on update current_date,\n" + " time4 timestamp default current_date() on update current_date()\n" + ")", output: "create table t (\n" + " time1 timestamp default current_date(),\n" + " time2 timestamp default current_date(),\n" + " time3 timestamp default current_date() on update current_date(),\n" + " time4 timestamp default current_date() on update current_date()\n" + ")", }, { // test current_time with and without () input: "create table t (\n" + " time1 timestamp default current_time,\n" + " time2 timestamp default current_time(),\n" + " time3 timestamp default current_time on update current_time,\n" + " time4 timestamp default current_time() on update current_time(),\n" + " time5 timestamp(2) default current_time(2) on update current_time(2)\n" + ")", output: "create table t (\n" + " time1 timestamp default current_time(),\n" + " time2 timestamp default current_time(),\n" + " time3 timestamp default current_time() on update current_time(),\n" + " time4 timestamp default current_time() on update current_time(),\n" + " time5 timestamp(2) default current_time(2) on update current_time(2)\n" + ")", }, } for _, tcase := range testCases { tree, err := ParseStrictDDL(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } assert.True(t, tree.(*CreateTable).FullyParsed) if got, want := String(tree.(*CreateTable)), tcase.output; got != want { t.Errorf("Parse(%s):\n%s, want\n%s", tcase.input, got, want) } } } func TestCreateTableLike(t *testing.T) { normal := "create table a like b" testCases := []struct { input string output string }{ { "create table a like b", normal, }, { "create table a (like b)", normal, }, { "create table ks.a like unsharded_ks.b", "create table ks.a like unsharded_ks.b", }, } for _, tcase := range testCases { tree, err := ParseStrictDDL(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } assert.True(t, tree.(*CreateTable).FullyParsed) if got, want := String(tree.(*CreateTable)), tcase.output; got != want { t.Errorf("Parse(%s):\n%s, want\n%s", tcase.input, got, want) } } } func TestCreateTableEscaped(t *testing.T) { testCases := []struct { input string output string }{{ input: "create table `a`(`id` int, primary key(`id`))", output: "create table a (\n" + "\tid int,\n" + "\tprimary key (id)\n" + ")", }, { input: "create table `insert`(`update` int, primary key(`delete`))", output: "create table `insert` (\n" + "\t`update` int,\n" + "\tprimary key (`delete`)\n" + ")", }} for _, tcase := range testCases { tree, err := ParseStrictDDL(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } if got, want := String(tree.(*CreateTable)), tcase.output; got != want { t.Errorf("Parse(%s):\n%s, want\n%s", tcase.input, got, want) } } } var ( invalidSQL = []struct { input string output string excludeMulti bool // Don't use in the ParseNext multi-statement parsing tests. }{{ input: "select : from t", output: "syntax error at position 9 near ':'", }, { input: "select 0xH from t", output: "syntax error at position 10 near '0x'", }, { input: "select x'78 from t", output: "syntax error at position 12 near '78'", }, { input: "select x'777' from t", output: "syntax error at position 14 near '777'", }, { input: "select * from t where :1 = 2", output: "syntax error at position 24 near ':'", }, { input: "select * from t where :. = 2", output: "syntax error at position 24 near ':'", }, { input: "select * from t where ::1 = 2", output: "syntax error at position 25 near '::'", }, { input: "select * from t where ::. = 2", output: "syntax error at position 25 near '::'", }, { input: "update a set c = values(1)", output: "syntax error at position 26 near '1'", }, { input: "select(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(F(", output: "max nesting level reached at position 406", }, { input: "select(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(", output: "syntax error at position 404", }, { // This construct is considered invalid due to a grammar conflict. input: "insert into a select * from b join c on duplicate key update d=e", output: "syntax error at position 54 near 'key'", }, { input: "select * from a left join b", output: "syntax error at position 28", }, { input: "select * from a natural join b on c = d", output: "syntax error at position 34 near 'on'", }, { input: "select * from a natural join b using (c)", output: "syntax error at position 37 near 'using'", }, { input: "select next id from a", output: "expecting value after next at position 15 near 'id'", }, { input: "select next 1+1 values from a", output: "syntax error at position 15", }, { input: "insert into a values (select * from b)", output: "syntax error at position 29 near 'select'", }, { input: "select database", output: "syntax error at position 16", }, { input: "select mod from t", output: "syntax error at position 16 near 'from'", }, { input: "select 1 from t where div 5", output: "syntax error at position 26 near 'div'", }, { input: "select 1 from t where binary", output: "syntax error at position 29", }, { input: "select match(a1, a2) against ('foo' in boolean mode with query expansion) from t", output: "syntax error at position 57 near 'with'", }, { input: "select /* reserved keyword as unqualified column */ * from t where key = 'test'", output: "syntax error at position 71 near 'key'", }, { input: "select /* vitess-reserved keyword as unqualified column */ * from t where escape = 'test'", output: "syntax error at position 81 near 'escape'", }, { input: "select * from t where id = ((select a from t1 union select b from t2) order by a limit 1)", output: "syntax error at position 76 near 'order'", }, { input: "select /* straight_join using */ 1 from t1 straight_join t2 using (a)", output: "syntax error at position 66 near 'using'", }, { input: "select 'aa", output: "syntax error at position 11 near 'aa'", excludeMulti: true, }, { input: "select 'aa\\", output: "syntax error at position 12 near 'aa'", excludeMulti: true, }, { input: "select /* aa", output: "syntax error at position 13 near '/* aa'", excludeMulti: true, }, { // non_reserved keywords are currently not permitted everywhere input: "create database repair", output: "syntax error at position 23 near 'repair'", excludeMulti: true, }} ) func TestErrors(t *testing.T) { for _, tcase := range invalidSQL { t.Run(tcase.input, func(t *testing.T) { _, err := Parse(tcase.input) require.Error(t, err, tcase.output) require.Equal(t, err.Error(), tcase.output) }) } } // TestSkipToEnd tests that the skip to end functionality // does not skip past a ';'. If any tokens exist after that, Parse // should return an error. func TestSkipToEnd(t *testing.T) { testcases := []struct { input string output string }{{ // This is the case where the partial ddl will be reset // because of a premature ';'. input: "create table a(id; select * from t", output: "syntax error at position 19", }, { // Partial DDL should get reset for valid DDLs also. input: "create table a(id int); select * from t", output: "syntax error at position 31 near 'select'", }, { // Partial DDL does not get reset here. But we allow the // DDL only if there are no new tokens after skipping to end. input: "create table a bb cc; select * from t", output: "extra characters encountered after end of DDL: 'select'", }, { // Test that we don't step at ';' inside strings. input: "create table a bb 'a;'; select * from t", output: "extra characters encountered after end of DDL: 'select'", }} for _, tcase := range testcases { _, err := Parse(tcase.input) if err == nil || err.Error() != tcase.output { t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) } } } func TestParseDjangoQueries(t *testing.T) { file, err := os.Open("./test_queries/django_queries.txt") if err != nil { t.Errorf(" Error: %v", err) } defer file.Close() scanner := bufio.NewScanner(file) for scanner.Scan() { _, err := Parse(string(scanner.Text())) if err != nil { t.Error(scanner.Text()) t.Errorf(" Error: %v", err) } } } // Benchmark run on 6/23/17, prior to improvements: // BenchmarkParse1-4 100000 16334 ns/op // BenchmarkParse2-4 30000 44121 ns/op // Benchmark run on 9/3/18, comparing pooled parser performance. // // benchmark old ns/op new ns/op delta // BenchmarkNormalize-4 2540 2533 -0.28% // BenchmarkParse1-4 18269 13330 -27.03% // BenchmarkParse2-4 46703 41255 -11.67% // BenchmarkParse2Parallel-4 22246 20707 -6.92% // BenchmarkParse3-4 4064743 4083135 +0.45% // // benchmark old allocs new allocs delta // BenchmarkNormalize-4 27 27 +0.00% // BenchmarkParse1-4 75 74 -1.33% // BenchmarkParse2-4 264 263 -0.38% // BenchmarkParse2Parallel-4 176 175 -0.57% // BenchmarkParse3-4 360 361 +0.28% // // benchmark old bytes new bytes delta // BenchmarkNormalize-4 821 821 +0.00% // BenchmarkParse1-4 22776 2307 -89.87% // BenchmarkParse2-4 28352 7881 -72.20% // BenchmarkParse2Parallel-4 25712 5235 -79.64% // BenchmarkParse3-4 6352082 6336307 -0.25% const ( sql1 = "select 'abcd', 20, 30.0, eid from a where 1=eid and name='3'" sql2 = "select aaaa, bbb, ccc, ddd, eeee, ffff, gggg, hhhh, iiii from tttt, ttt1, ttt3 where aaaa = bbbb and bbbb = cccc and dddd+1 = eeee group by fff, gggg having hhhh = iiii and iiii = jjjj order by kkkk, llll limit 3, 4" ) func BenchmarkParse1(b *testing.B) { sql := sql1 for i := 0; i < b.N; i++ { ast, err := Parse(sql) if err != nil { b.Fatal(err) } _ = String(ast) } } func BenchmarkParse2(b *testing.B) { sql := sql2 for i := 0; i < b.N; i++ { ast, err := Parse(sql) if err != nil { b.Fatal(err) } _ = String(ast) } } func BenchmarkParse2Parallel(b *testing.B) { sql := sql2 b.RunParallel(func(pb *testing.PB) { for pb.Next() { ast, err := Parse(sql) if err != nil { b.Fatal(err) } _ = ast } }) } var benchQuery string func init() { // benchQuerySize is the approximate size of the query. benchQuerySize := 1000000 // Size of value is 1/10 size of query. Then we add // 10 such values to the where clause. var baseval bytes.Buffer for i := 0; i < benchQuerySize/100; i++ { // Add an escape character: This will force the upcoming // tokenizer improvement to still create a copy of the string. // Then we can see if avoiding the copy will be worth it. baseval.WriteString("\\'123456789") } var buf bytes.Buffer buf.WriteString("select a from t1 where v = 1") for i := 0; i < 10; i++ { fmt.Fprintf(&buf, " and v%d = \"%d%s\"", i, i, baseval.String()) } benchQuery = buf.String() } func BenchmarkParse3(b *testing.B) { for i := 0; i < b.N; i++ { if _, err := Parse(benchQuery); err != nil { b.Fatal(err) } } } add parsing test Signed-off-by: Harshit Gangal <af4d8fd3e8a94e9e7874ee31a198463039323a54@planetscale.com> /* Copyright 2019 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package sqlparser import ( "bufio" "bytes" "fmt" "math/rand" "os" "strings" "sync" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( validSQL = []struct { input string output string partialDDL bool }{{ input: "select 1", output: "select 1 from dual", }, { input: "select 1 from t", }, { input: "select * from information_schema.columns", output: "select * from information_schema.`columns`", }, { input: "select * from information_schema.processlist", output: "select * from information_schema.`processlist`", }, { input: "select .1 from t", }, { input: "select 1.2e1 from t", }, { input: "select 1.2e+1 from t", }, { input: "select 1.2e-1 from t", }, { input: "select 08.3 from t", }, { input: "select -1 from t where b = -2", }, { input: "select - -1 from t", output: "select 1 from t", }, { input: "select a from t", }, { input: "select $ from t", }, { input: "select a.b as a$b from $test$", }, { input: "select 1 from t // aa\n", output: "select 1 from t", }, { input: "select 1 from t -- aa\n", output: "select 1 from t", }, { input: "select 1 from t # aa\n", output: "select 1 from t", }, { input: "select 1 --aa\nfrom t", output: "select 1 from t", }, { input: "select 1 #aa\nfrom t", output: "select 1 from t", }, { input: "select /* simplest */ 1 from t", }, { input: "select /* double star **/ 1 from t", }, { input: "select /* double */ /* comment */ 1 from t", }, { input: "select /* back-quote keyword */ `By` from t", }, { input: "select /* back-quote num */ `2a` from t", }, { input: "select /* back-quote . */ `a.b` from t", }, { input: "select /* back-quote back-quote */ `a``b` from t", }, { input: "select /* back-quote unnecessary */ 1 from `t`", output: "select /* back-quote unnecessary */ 1 from t", }, { input: "select /* back-quote idnum */ 1 from `a1`", output: "select /* back-quote idnum */ 1 from a1", }, { input: "select /* @ */ @@a from b", }, { input: "select /* \\0 */ '\\0' from a", }, { input: "select 1 /* drop this comment */ from t", output: "select 1 from t", }, { input: "select /* union */ 1 from t union select 1 from t", }, { input: "select /* double union */ 1 from t union select 1 from t union select 1 from t", }, { input: "select /* union all */ 1 from t union all select 1 from t", }, { input: "select /* union distinct */ 1 from t union distinct select 1 from t", output: "select /* union distinct */ 1 from t union select 1 from t", }, { input: "(select /* union parenthesized select */ 1 from t order by a) union select 1 from t", output: "(select /* union parenthesized select */ 1 from t order by a asc) union select 1 from t", }, { input: "select /* union parenthesized select 2 */ 1 from t union (select 1 from t)", }, { input: "select /* union order by */ 1 from t union select 1 from t order by a", output: "select /* union order by */ 1 from t union select 1 from t order by a asc", }, { input: "select /* union order by limit lock */ 1 from t union select 1 from t order by a limit 1 for update", output: "select /* union order by limit lock */ 1 from t union select 1 from t order by a asc limit 1 for update", }, { input: "select /* union with limit on lhs */ 1 from t limit 1 union select 1 from t", }, { input: "(select id, a from t order by id limit 1) union (select id, b as a from s order by id limit 1) order by a limit 1", output: "(select id, a from t order by id asc limit 1) union (select id, b as a from s order by id asc limit 1) order by a asc limit 1", }, { input: "select a from (select 1 as a from tbl1 union select 2 from tbl2) as t", }, { input: "select * from t1 join (select * from t2 union select * from t3) as t", }, { // Ensure this doesn't generate: ""select * from t1 join t2 on a = b join t3 on a = b". input: "select * from t1 join t2 on a = b join t3", }, { input: "select * from t1 where col in (select 1 from dual union select 2 from dual)", }, { input: "select * from t1 where exists (select a from t2 union select b from t3)", }, { input: "select 1 from dual union select 2 from dual union all select 3 from dual union select 4 from dual union all select 5 from dual", }, { input: "(select 1 from dual) order by 1 asc limit 2", }, { input: "(select 1 from dual order by 1 desc) order by 1 asc limit 2", }, { input: "(select 1 from dual)", }, { input: "((select 1 from dual))", }, { input: "select 1 from (select 1 from dual) as t", }, { input: "select 1 from (select 1 from dual union select 2 from dual) as t", }, { input: "select 1 from ((select 1 from dual) union select 2 from dual) as t", }, { input: "select /* distinct */ distinct 1 from t", }, { input: "select /* straight_join */ straight_join 1 from t", }, { input: "select /* for update */ 1 from t for update", }, { input: "select /* lock in share mode */ 1 from t lock in share mode", }, { input: "select /* select list */ 1, 2 from t", }, { input: "select /* * */ * from t", }, { input: "select /* a.* */ a.* from t", }, { input: "select /* a.b.* */ a.b.* from t", }, { input: "select /* column alias */ a b from t", output: "select /* column alias */ a as b from t", }, { input: "select /* column alias with as */ a as b from t", }, { input: "select /* keyword column alias */ a as `By` from t", }, { input: "select /* column alias as string */ a as \"b\" from t", output: "select /* column alias as string */ a as b from t", }, { input: "select /* column alias as string without as */ a \"b\" from t", output: "select /* column alias as string without as */ a as b from t", }, { input: "select /* column alias with non_reserved keyword */ a as auto_increment from t", output: "select /* column alias with non_reserved keyword */ a as `auto_increment` from t", }, { input: "select /* a.* */ a.* from t", }, { input: "select next value for t", output: "select next 1 values from t", }, { input: "select next value from t", output: "select next 1 values from t", }, { input: "select next 10 values from t", }, { input: "select next :a values from t", }, { input: "select /* `By`.* */ `By`.* from t", }, { input: "select /* select with bool expr */ a = b from t", }, { input: "select /* case_when */ case when a = b then c end from t", }, { input: "select /* case_when_else */ case when a = b then c else d end from t", }, { input: "select /* case_when_when_else */ case when a = b then c when b = d then d else d end from t", }, { input: "select /* case */ case aa when a = b then c end from t", }, { input: "select /* parenthesis */ 1 from (t)", }, { input: "select /* parenthesis multi-table */ 1 from (t1, t2)", }, { input: "select /* table list */ 1 from t1, t2", }, { input: "select /* parenthessis in table list 1 */ 1 from (t1), t2", }, { input: "select /* parenthessis in table list 2 */ 1 from t1, (t2)", }, { input: "select /* use */ 1 from t1 use index (a) where b = 1", }, { input: "select /* use */ 1 from t1 use index () where b = 1", }, { input: "select /* keyword index */ 1 from t1 use index (`By`) where b = 1", }, { input: "select /* ignore */ 1 from t1 as t2 ignore index (a), t3 use index (b) where b = 1", }, { input: "select /* use */ 1 from t1 as t2 use index (a), t3 use index (b) where b = 1", }, { input: "select /* force */ 1 from t1 as t2 force index (a), t3 force index (b) where b = 1", }, { input: "select /* table alias */ 1 from t t1", output: "select /* table alias */ 1 from t as t1", }, { input: "select /* table alias with as */ 1 from t as t1", }, { input: "select /* string table alias */ 1 from t as 't1'", output: "select /* string table alias */ 1 from t as t1", }, { input: "select /* string table alias without as */ 1 from t 't1'", output: "select /* string table alias without as */ 1 from t as t1", }, { input: "select /* keyword table alias */ 1 from t as `By`", }, { input: "select /* join */ 1 from t1 join t2", }, { input: "select /* join on */ 1 from t1 join t2 on a = b", }, { input: "select /* join on */ 1 from t1 join t2 using (a)", }, { input: "select /* inner join */ 1 from t1 inner join t2", output: "select /* inner join */ 1 from t1 join t2", }, { input: "select /* cross join */ 1 from t1 cross join t2", output: "select /* cross join */ 1 from t1 join t2", }, { input: "select /* straight_join */ 1 from t1 straight_join t2", }, { input: "select /* straight_join on */ 1 from t1 straight_join t2 on a = b", }, { input: "select /* left join */ 1 from t1 left join t2 on a = b", }, { input: "select /* left join */ 1 from t1 left join t2 using (a)", }, { input: "select /* left outer join */ 1 from t1 left outer join t2 on a = b", output: "select /* left outer join */ 1 from t1 left join t2 on a = b", }, { input: "select /* left outer join */ 1 from t1 left outer join t2 using (a)", output: "select /* left outer join */ 1 from t1 left join t2 using (a)", }, { input: "select /* right join */ 1 from t1 right join t2 on a = b", }, { input: "select /* right join */ 1 from t1 right join t2 using (a)", }, { input: "select /* right outer join */ 1 from t1 right outer join t2 on a = b", output: "select /* right outer join */ 1 from t1 right join t2 on a = b", }, { input: "select /* right outer join */ 1 from t1 right outer join t2 using (a)", output: "select /* right outer join */ 1 from t1 right join t2 using (a)", }, { input: "select /* natural join */ 1 from t1 natural join t2", }, { input: "select /* natural left join */ 1 from t1 natural left join t2", }, { input: "select /* natural left outer join */ 1 from t1 natural left join t2", output: "select /* natural left outer join */ 1 from t1 natural left join t2", }, { input: "select /* natural right join */ 1 from t1 natural right join t2", }, { input: "select /* natural right outer join */ 1 from t1 natural right join t2", output: "select /* natural right outer join */ 1 from t1 natural right join t2", }, { input: "select /* join on */ 1 from t1 join t2 on a = b", }, { input: "select /* join using */ 1 from t1 join t2 using (a)", }, { input: "select /* join using (a, b, c) */ 1 from t1 join t2 using (a, b, c)", }, { input: "select /* s.t */ 1 from s.t", }, { input: "select /* keyword schema & table name */ 1 from `By`.`bY`", }, { input: "select /* select in from */ 1 from (select 1 from t) as a", }, { input: "select /* select in from with no as */ 1 from (select 1 from t) a", output: "select /* select in from with no as */ 1 from (select 1 from t) as a", }, { input: "select /* where */ 1 from t where a = b", }, { input: "select /* and */ 1 from t where a = b and a = c", }, { input: "select /* && */ 1 from t where a = b && a = c", output: "select /* && */ 1 from t where a = b and a = c", }, { input: "select /* or */ 1 from t where a = b or a = c", }, { input: "select /* || */ 1 from t where a = b || a = c", output: "select /* || */ 1 from t where a = b or a = c", }, { input: "select /* not */ 1 from t where not a = b", }, { input: "select /* ! */ 1 from t where a = !1", }, { input: "select /* bool is */ 1 from t where a = b is null", }, { input: "select /* bool is not */ 1 from t where a = b is not false", }, { input: "select /* true */ 1 from t where true", }, { input: "select /* false */ 1 from t where false", }, { input: "select /* false on left */ 1 from t where false = 0", }, { input: "select /* exists */ 1 from t where exists (select 1 from t)", }, { input: "select /* (boolean) */ 1 from t where not (a = b)", output: "select /* (boolean) */ 1 from t where not a = b", }, { input: "select /* in value list */ 1 from t where a in (b, c)", }, { input: "select /* in select */ 1 from t where a in (select 1 from t)", }, { input: "select /* not in */ 1 from t where a not in (b, c)", }, { input: "select /* like */ 1 from t where a like b", }, { input: "select /* like escape */ 1 from t where a like b escape '!'", }, { input: "select /* not like */ 1 from t where a not like b", }, { input: "select /* not like escape */ 1 from t where a not like b escape '$'", }, { input: "select /* regexp */ 1 from t where a regexp b", }, { input: "select /* not regexp */ 1 from t where a not regexp b", }, { input: "select /* rlike */ 1 from t where a rlike b", output: "select /* rlike */ 1 from t where a regexp b", }, { input: "select /* not rlike */ 1 from t where a not rlike b", output: "select /* not rlike */ 1 from t where a not regexp b", }, { input: "select /* between */ 1 from t where a between b and c", }, { input: "select /* not between */ 1 from t where a not between b and c", }, { input: "select /* is null */ 1 from t where a is null", }, { input: "select /* is not null */ 1 from t where a is not null", }, { input: "select /* is true */ 1 from t where a is true", }, { input: "select /* is not true */ 1 from t where a is not true", }, { input: "select /* is false */ 1 from t where a is false", }, { input: "select /* is not false */ 1 from t where a is not false", }, { input: "select /* < */ 1 from t where a < b", }, { input: "select /* <= */ 1 from t where a <= b", }, { input: "select /* >= */ 1 from t where a >= b", }, { input: "select /* > */ 1 from t where a > b", }, { input: "select /* != */ 1 from t where a != b", }, { input: "select /* <> */ 1 from t where a <> b", output: "select /* <> */ 1 from t where a != b", }, { input: "select /* <=> */ 1 from t where a <=> b", }, { input: "select /* != */ 1 from t where a != b", }, { input: "select /* single value expre list */ 1 from t where a in (b)", }, { input: "select /* select as a value expression */ 1 from t where a = (select a from t)", }, { input: "select /* parenthesised value */ 1 from t where a = (b)", output: "select /* parenthesised value */ 1 from t where a = b", }, { input: "select /* over-parenthesize */ ((1)) from t where ((a)) in (((1))) and ((a, b)) in ((((1, 1))), ((2, 2)))", output: "select /* over-parenthesize */ 1 from t where a in (1) and (a, b) in ((1, 1), (2, 2))", }, { input: "select /* dot-parenthesize */ (a.b) from t where (b.c) = 2", output: "select /* dot-parenthesize */ a.b from t where b.c = 2", }, { input: "select /* & */ 1 from t where a = b & c", }, { input: "select /* & */ 1 from t where a = b & c", }, { input: "select /* | */ 1 from t where a = b | c", }, { input: "select /* ^ */ 1 from t where a = b ^ c", }, { input: "select /* + */ 1 from t where a = b + c", }, { input: "select /* - */ 1 from t where a = b - c", }, { input: "select /* * */ 1 from t where a = b * c", }, { input: "select /* / */ 1 from t where a = b / c", }, { input: "select /* % */ 1 from t where a = b % c", }, { input: "select /* div */ 1 from t where a = b div c", }, { input: "select /* MOD */ 1 from t where a = b MOD c", output: "select /* MOD */ 1 from t where a = b % c", }, { input: "select /* << */ 1 from t where a = b << c", }, { input: "select /* >> */ 1 from t where a = b >> c", }, { input: "select /* % no space */ 1 from t where a = b%c", output: "select /* % no space */ 1 from t where a = b % c", }, { input: "select /* u+ */ 1 from t where a = +b", }, { input: "select /* u- */ 1 from t where a = -b", }, { input: "select /* u~ */ 1 from t where a = ~b", }, { input: "select /* -> */ a.b -> 'ab' from t", }, { input: "select /* -> */ a.b ->> 'ab' from t", }, { input: "select /* empty function */ 1 from t where a = b()", }, { input: "select /* function with 1 param */ 1 from t where a = b(c)", }, { input: "select /* function with many params */ 1 from t where a = b(c, d)", }, { input: "select /* function with distinct */ count(distinct a) from t", }, { input: "select count(distinctrow(1)) from (select (1) from dual union all select 1 from dual) a", output: "select count(distinct 1) from (select 1 from dual union all select 1 from dual) as a", }, { input: "select /* if as func */ 1 from t where a = if(b)", }, { input: "select /* current_timestamp */ current_timestamp() from t", }, { input: "select /* current_timestamp as func */ current_timestamp() from t", }, { input: "select /* current_timestamp with fsp */ current_timestamp(3) from t", }, { input: "select /* current_date */ current_date() from t", }, { input: "select /* current_date as func */ current_date() from t", }, { input: "select /* current_time */ current_time() from t", }, { input: "select /* current_time as func */ current_time() from t", }, { input: "select /* current_time with fsp */ current_time(1) from t", }, { input: "select /* utc_timestamp */ utc_timestamp() from t", }, { input: "select /* utc_timestamp as func */ utc_timestamp() from t", }, { input: "select /* utc_timestamp with fsp */ utc_timestamp(0) from t", }, { input: "select /* utc_time */ utc_time() from t", }, { input: "select /* utc_time as func */ utc_time() from t", }, { input: "select /* utc_time with fsp */ utc_time(4) from t", }, { input: "select /* utc_date */ utc_date() from t", }, { input: "select /* utc_date as func */ utc_date() from t", }, { input: "select /* localtime */ localtime() from t", }, { input: "select /* localtime as func */ localtime() from t", }, { input: "select /* localtime with fsp */ localtime(5) from t", }, { input: "select /* localtimestamp */ localtimestamp() from t", }, { input: "select /* localtimestamp as func */ localtimestamp() from t", }, { input: "select /* localtimestamp with fsp */ localtimestamp(7) from t", }, { input: "select /* mod as func */ a from tab where mod(b, 2) = 0", }, { input: "select /* database as func no param */ database() from t", }, { input: "select /* database as func 1 param */ database(1) from t", }, { input: "select /* a */ a from t", }, { input: "select /* a.b */ a.b from t", }, { input: "select /* a.b.c */ a.b.c from t", }, { input: "select /* keyword a.b */ `By`.`bY` from t", }, { input: "select /* string */ 'a' from t", }, { input: "select /* double quoted string */ \"a\" from t", output: "select /* double quoted string */ 'a' from t", }, { input: "select /* quote quote in string */ 'a''a' from t", output: "select /* quote quote in string */ 'a\\'a' from t", }, { input: "select /* double quote quote in string */ \"a\"\"a\" from t", output: "select /* double quote quote in string */ 'a\\\"a' from t", }, { input: "select /* quote in double quoted string */ \"a'a\" from t", output: "select /* quote in double quoted string */ 'a\\'a' from t", }, { input: "select /* backslash quote in string */ 'a\\'a' from t", }, { input: "select /* literal backslash in string */ 'a\\\\na' from t", }, { input: "select /* all escapes */ '\\0\\'\\\"\\b\\n\\r\\t\\Z\\\\' from t", }, { input: "select /* non-escape */ '\\x' from t", output: "select /* non-escape */ 'x' from t", }, { input: "select /* unescaped backslash */ '\\n' from t", }, { input: "select /* value argument */ :a from t", }, { input: "select /* value argument with digit */ :a1 from t", }, { input: "select /* value argument with dot */ :a.b from t", }, { input: "select /* positional argument */ ? from t", output: "select /* positional argument */ :v1 from t", }, { input: "select /* multiple positional arguments */ ?, ? from t", output: "select /* multiple positional arguments */ :v1, :v2 from t", }, { input: "select /* list arg */ * from t where a in ::list", }, { input: "select /* list arg not in */ * from t where a not in ::list", }, { input: "select /* null */ null from t", }, { input: "select /* octal */ 010 from t", }, { input: "select /* hex */ x'f0A1' from t", output: "select /* hex */ X'f0A1' from t", }, { input: "select /* hex caps */ X'F0a1' from t", }, { input: "select /* bit literal */ b'0101' from t", output: "select /* bit literal */ B'0101' from t", }, { input: "select /* bit literal caps */ B'010011011010' from t", }, { input: "select /* 0x */ 0xf0 from t", }, { input: "select /* float */ 0.1 from t", }, { input: "select /* group by */ 1 from t group by a", }, { input: "select /* having */ 1 from t having a = b", }, { input: "select /* simple order by */ 1 from t order by a", output: "select /* simple order by */ 1 from t order by a asc", }, { input: "select /* order by asc */ 1 from t order by a asc", }, { input: "select /* order by desc */ 1 from t order by a desc", }, { input: "select /* order by null */ 1 from t order by null", }, { input: "select /* limit a */ 1 from t limit a", }, { input: "select /* limit a,b */ 1 from t limit a, b", }, { input: "select /* binary unary */ a- -b from t", output: "select /* binary unary */ a - -b from t", }, { input: "select /* - - */ - -b from t", }, { input: "select /* binary binary */ binary binary b from t", }, { input: "select /* binary ~ */ binary ~b from t", }, { input: "select /* ~ binary */ ~ binary b from t", }, { input: "select /* interval */ adddate('2008-01-02', interval 31 day) from t", }, { input: "select /* interval keyword */ adddate('2008-01-02', interval 1 year) from t", }, { input: "select /* TIMESTAMPADD */ TIMESTAMPADD(MINUTE, 1, '2008-01-04') from t", output: "select /* TIMESTAMPADD */ timestampadd(MINUTE, 1, '2008-01-04') from t", }, { input: "select /* TIMESTAMPDIFF */ TIMESTAMPDIFF(MINUTE, '2008-01-02', '2008-01-04') from t", output: "select /* TIMESTAMPDIFF */ timestampdiff(MINUTE, '2008-01-02', '2008-01-04') from t", }, { input: "select /* dual */ 1 from dual", }, { input: "select /* Dual */ 1 from Dual", output: "select /* Dual */ 1 from dual", }, { input: "select /* DUAL */ 1 from Dual", output: "select /* DUAL */ 1 from dual", }, { input: "select /* column as bool in where */ a from t where b", }, { input: "select /* OR of columns in where */ * from t where a or b", }, { input: "select /* OR of mixed columns in where */ * from t where a = 5 or b and c is not null", }, { input: "select /* OR in select columns */ (a or b) from t where c = 5", output: "select /* OR in select columns */ a or b from t where c = 5", }, { input: "select /* XOR of columns in where */ * from t where a xor b", }, { input: "select /* XOR of mixed columns in where */ * from t where a = 5 xor b and c is not null", }, { input: "select /* XOR in select columns */ (a xor b) from t where c = 5", output: "select /* XOR in select columns */ a xor b from t where c = 5", }, { input: "select /* XOR in select columns */ * from t where (1 xor c1 > 0)", output: "select /* XOR in select columns */ * from t where 1 xor c1 > 0", }, { input: "select /* bool as select value */ a, true from t", }, { input: "select /* bool column in ON clause */ * from t join s on t.id = s.id and s.foo where t.bar", }, { input: "select /* bool in order by */ * from t order by a is null or b asc", }, { input: "select /* string in case statement */ if(max(case a when 'foo' then 1 else 0 end) = 1, 'foo', 'bar') as foobar from t", }, { input: "/*!show databases*/", output: "show databases", }, { input: "select /*!40101 * from*/ t", output: "select * from t", }, { input: "select /*! * from*/ t", output: "select * from t", }, { input: "select /*!* from*/ t", output: "select * from t", }, { input: "select /*!401011 from*/ t", output: "select 1 from t", }, { input: "select /* dual */ 1 from dual", }, { input: "select * from (select 'tables') tables", output: "select * from (select 'tables' from dual) as `tables`", }, { input: "insert /* simple */ into a values (1)", }, { input: "insert /* a.b */ into a.b values (1)", }, { input: "insert /* multi-value */ into a values (1, 2)", }, { input: "insert /* multi-value list */ into a values (1, 2), (3, 4)", }, { input: "insert /* no values */ into a values ()", }, { input: "insert /* set */ into a set a = 1, b = 2", output: "insert /* set */ into a(a, b) values (1, 2)", }, { input: "insert /* set default */ into a set a = default, b = 2", output: "insert /* set default */ into a(a, b) values (default, 2)", }, { input: "insert /* value expression list */ into a values (a + 1, 2 * 3)", }, { input: "insert /* default */ into a values (default, 2 * 3)", }, { input: "insert /* column list */ into a(a, b) values (1, 2)", }, { input: "insert into a(a, b) values (1, ifnull(null, default(b)))", }, { input: "insert /* qualified column list */ into a(a, b) values (1, 2)", }, { input: "insert /* qualified columns */ into t (t.a, t.b) values (1, 2)", output: "insert /* qualified columns */ into t(a, b) values (1, 2)", }, { input: "insert /* select */ into a select b, c from d", }, { input: "insert /* it accepts columns with keyword action */ into a(action, b) values (1, 2)", output: "insert /* it accepts columns with keyword action */ into a(`action`, b) values (1, 2)", }, { input: "insert /* no cols & paren select */ into a (select * from t)", }, { input: "insert /* cols & paren select */ into a(a, b, c) (select * from t)", }, { input: "insert /* cols & union with paren select */ into a(b, c) (select d, e from f) union (select g from h)", }, { input: "insert /* on duplicate */ into a values (1, 2) on duplicate key update b = func(a), c = d", }, { input: "insert /* bool in insert value */ into a values (1, true, false)", }, { input: "insert /* bool in on duplicate */ into a values (1, 2) on duplicate key update b = false, c = d", }, { input: "insert /* bool in on duplicate */ into a values (1, 2, 3) on duplicate key update b = values(b), c = d", }, { input: "insert /* bool in on duplicate */ into a values (1, 2, 3) on duplicate key update b = values(a.b), c = d", }, { input: "insert /* bool expression on duplicate */ into a values (1, 2) on duplicate key update b = func(a), c = a > d", }, { input: "insert into user(username, `status`) values ('Chuck', default(`status`))", }, { input: "insert into user(format, tree, vitess) values ('Chuck', 42, 'Barry')", output: "insert into user(`format`, `tree`, `vitess`) values ('Chuck', 42, 'Barry')", }, { input: "insert into customer () values ()", output: "insert into customer values ()", }, { input: "update /* simple */ a set b = 3", }, { input: "update /* a.b */ a.b set b = 3", }, { input: "update /* list */ a set b = 3, c = 4", }, { input: "update /* expression */ a set b = 3 + 4", }, { input: "update /* where */ a set b = 3 where a = b", }, { input: "update /* order */ a set b = 3 order by c desc", }, { input: "update /* limit */ a set b = 3 limit c", }, { input: "update /* bool in update */ a set b = true", }, { input: "update /* bool expr in update */ a set b = 5 > 2", }, { input: "update /* bool in update where */ a set b = 5 where c", }, { input: "update /* table qualifier */ a set a.b = 3", }, { input: "update /* table qualifier */ a set t.a.b = 3", }, { input: "update /* table alias */ tt aa set aa.cc = 3", output: "update /* table alias */ tt as aa set aa.cc = 3", }, { input: "update (select id from foo) subqalias set id = 4", output: "update (select id from foo) as subqalias set id = 4", }, { input: "update foo f, bar b set f.id = b.id where b.name = 'test'", output: "update foo as f, bar as b set f.id = b.id where b.`name` = 'test'", }, { input: "update foo f join bar b on f.name = b.name set f.id = b.id where b.name = 'test'", output: "update foo as f join bar as b on f.`name` = b.`name` set f.id = b.id where b.`name` = 'test'", }, { input: "update /* ignore */ ignore a set b = 3", }, { input: "delete /* simple */ from a", }, { input: "delete /* a.b */ from a.b", }, { input: "delete /* where */ from a where a = b", }, { input: "delete /* order */ from a order by b desc", }, { input: "delete /* limit */ from a limit b", }, { input: "delete a from a join b on a.id = b.id where b.name = 'test'", output: "delete a from a join b on a.id = b.id where b.`name` = 'test'", }, { input: "delete a, b from a, b where a.id = b.id and b.name = 'test'", output: "delete a, b from a, b where a.id = b.id and b.`name` = 'test'", }, { input: "delete /* simple */ ignore from a", }, { input: "delete ignore from a", }, { input: "delete /* limit */ ignore from a", }, { input: "delete from a1, a2 using t1 as a1 inner join t2 as a2 where a1.id=a2.id", output: "delete a1, a2 from t1 as a1 join t2 as a2 where a1.id = a2.id", }, { input: "set /* simple */ a = 3", }, { input: "set #simple\n b = 4", }, { input: "set character_set_results = utf8", }, { input: "set @@session.autocommit = true", }, { input: "set @@session.`autocommit` = true", }, { input: "set @@session.'autocommit' = true", }, { input: "set @@session.\"autocommit\" = true", }, { input: "set @@session.autocommit = ON", output: "set @@session.autocommit = 'on'", }, { input: "set @@session.autocommit= OFF", output: "set @@session.autocommit = 'off'", }, { input: "set autocommit = on", output: "set autocommit = 'on'", }, { input: "set autocommit = off", output: "set autocommit = 'off'", }, { input: "set names utf8 collate foo", output: "set names 'utf8'", }, { input: "set names utf8 collate 'foo'", output: "set names 'utf8'", }, { input: "set character set utf8", output: "set charset 'utf8'", }, { input: "set character set 'utf8'", output: "set charset 'utf8'", }, { input: "set character set \"utf8\"", output: "set charset 'utf8'", }, { input: "set charset default", output: "set charset default", }, { input: "set session wait_timeout = 3600", output: "set session wait_timeout = 3600", }, { input: "set session wait_timeout = 3600, session autocommit = off", output: "set session wait_timeout = 3600, session autocommit = 'off'", }, { input: "set session wait_timeout = 3600, @@global.autocommit = off", output: "set session wait_timeout = 3600, @@global.autocommit = 'off'", }, { input: "set /* list */ a = 3, b = 4", }, { input: "set /* mixed list */ a = 3, names 'utf8', charset 'ascii', b = 4", }, { input: "set session transaction isolation level repeatable read", }, { input: "set transaction isolation level repeatable read", }, { input: "set global transaction isolation level repeatable read", }, { input: "set transaction isolation level repeatable read", }, { input: "set transaction isolation level read committed", }, { input: "set transaction isolation level read uncommitted", }, { input: "set transaction isolation level serializable", }, { input: "set transaction read write", }, { input: "set transaction read only", }, { input: "set tx_read_only = 1", }, { input: "set tx_read_only = 0", }, { input: "set transaction_read_only = 1", }, { input: "set transaction_read_only = 0", }, { input: "set tx_isolation = 'repeatable read'", }, { input: "set tx_isolation = 'read committed'", }, { input: "set tx_isolation = 'read uncommitted'", }, { input: "set tx_isolation = 'serializable'", }, { input: "set sql_safe_updates = 0", }, { input: "set sql_safe_updates = 1", }, { input: "set @variable = 42", }, { input: "set @period.variable = 42", }, { input: "alter table a add foo", output: "alter table a", }, { input: "alter table a add spatial key foo (column1)", output: "alter table a", }, { input: "alter table a add fulltext key foo (column1)", output: "alter table a", }, { input: "alter table a add unique key foo (column1)", output: "alter table a", }, { input: "alter table `By` add foo", output: "alter table `By`", }, { input: "alter table a alter foo", output: "alter table a", }, { input: "alter table a change foo", output: "alter table a", }, { input: "alter table a modify foo", output: "alter table a", }, { input: "alter table a drop foo", output: "alter table a", }, { input: "alter table a disable foo", output: "alter table a", }, { input: "alter table a enable foo", output: "alter table a", }, { input: "alter table a order foo", output: "alter table a", }, { input: "alter table a default foo", output: "alter table a", }, { input: "alter table a discard foo", output: "alter table a", }, { input: "alter table a import foo", output: "alter table a", }, { input: "alter table a rename b", output: "rename table a to b", }, { input: "alter table `By` rename `bY`", output: "rename table `By` to `bY`", }, { input: "alter table a rename to b", output: "rename table a to b", }, { input: "alter table a rename as b", output: "rename table a to b", }, { input: "alter table a rename index foo to bar", output: "alter table a", }, { input: "alter table a rename key foo to bar", output: "alter table a", }, { input: "alter table e auto_increment = 20", output: "alter table e", }, { input: "alter table e character set = 'ascii'", output: "alter table e", }, { input: "alter table e default character set = 'ascii'", output: "alter table e", }, { input: "alter table e comment = 'hello'", output: "alter table e", }, { input: "alter table a reorganize partition b into (partition c values less than (?), partition d values less than (maxvalue))", output: "alter table a reorganize partition b into (partition c values less than (:v1), partition d values less than (maxvalue))", }, { input: "alter table a partition by range (id) (partition p0 values less than (10), partition p1 values less than (maxvalue))", output: "alter table a", }, { input: "alter table a add column id int", output: "alter table a", }, { input: "alter table a add index idx (id)", output: "alter table a", }, { input: "alter table a add fulltext index idx (id)", output: "alter table a", }, { input: "alter table a add spatial index idx (id)", output: "alter table a", }, { input: "alter table a add fulltext index idx (id)", output: "alter table a", }, { input: "alter table a add foreign key", output: "alter table a", }, { input: "alter table a add primary key", output: "alter table a", }, { input: "alter table a add constraint", output: "alter table a", }, { input: "alter table a add id", output: "alter table a", }, { input: "alter table a drop column id int", output: "alter table a", }, { input: "alter table a drop partition p2712", output: "alter table a", }, { input: "alter table a drop index idx (id)", output: "alter table a", }, { input: "alter table a drop fulltext index idx (id)", output: "alter table a", }, { input: "alter table a drop spatial index idx (id)", output: "alter table a", }, { input: "alter table a drop fulltext index idx (id)", output: "alter table a", }, { input: "alter table a add check ch_1", output: "alter table a", }, { input: "alter table a drop check ch_1", output: "alter table a", }, { input: "alter table a drop foreign key", output: "alter table a", }, { input: "alter table a drop primary key", output: "alter table a", }, { input: "alter table a drop constraint", output: "alter table a", }, { input: "alter table a drop id", output: "alter table a", }, { input: "alter database character set geostd8", }, { input: "alter database d character set geostd8", }, { input: "alter database d default collate 'utf8_bin'", }, { input: "alter database default collate 'utf8_bin'", }, { input: "alter database d upgrade data directory name", }, { input: "alter database d collate = 'utf8_bin'", output: "alter database d collate 'utf8_bin'", }, { input: "alter schema d default character set = geostd8", output: "alter database d default character set geostd8", }, { input: "alter schema d character set = geostd8", output: "alter database d character set geostd8", }, { input: "alter schema d default collate = 'utf8_bin'", output: "alter database d default collate 'utf8_bin'", }, { input: "alter schema d collate = 'utf8_bin' character set = geostd8 character set = geostd8", output: "alter database d collate 'utf8_bin' character set geostd8 character set geostd8", }, { input: "create table a", }, { input: "create table a (\n\t`a` int\n)", output: "create table a (\n\ta int\n)", }, { input: "create table `by` (\n\t`by` char\n)", }, { input: "create table test (\n\t__year year(4)\n)", }, { input: "create table if not exists a (\n\t`a` int\n)", output: "create table a (\n\ta int\n)", }, { input: "create table a ignore me this is garbage", output: "create table a", }, { input: "create table a (a int, b char, c garbage)", output: "create table a", }, { input: "create table a (b1 bool not null primary key, b2 boolean not null)", output: "create table a (\n\tb1 bool not null primary key,\n\tb2 boolean not null\n)", }, { input: "alter vschema create vindex hash_vdx using hash", }, { input: "alter vschema create vindex keyspace.hash_vdx using hash", }, { input: "alter vschema create vindex lookup_vdx using lookup with owner=user, table=name_user_idx, from=name, to=user_id", }, { input: "alter vschema create vindex xyz_vdx using xyz with param1=hello, param2='world', param3=123", }, { input: "alter vschema drop vindex hash_vdx", }, { input: "alter vschema drop vindex ks.hash_vdx", }, { input: "alter vschema add table a", }, { input: "alter vschema add table ks.a", }, { input: "alter vschema add sequence a_seq", }, { input: "alter vschema add sequence ks.a_seq", }, { input: "alter vschema on a add auto_increment id using a_seq", }, { input: "alter vschema on ks.a add auto_increment id using a_seq", }, { input: "alter vschema drop table a", }, { input: "alter vschema drop table ks.a", }, { input: "alter vschema on a add vindex hash (id)", }, { input: "alter vschema on ks.a add vindex hash (id)", }, { input: "alter vschema on a add vindex `hash` (`id`)", output: "alter vschema on a add vindex hash (id)", }, { input: "alter vschema on `ks`.a add vindex `hash` (`id`)", output: "alter vschema on ks.a add vindex hash (id)", }, { input: "alter vschema on a add vindex hash (id) using `hash`", output: "alter vschema on a add vindex hash (id) using hash", }, { input: "alter vschema on a add vindex `add` (`add`)", }, { input: "alter vschema on a add vindex hash (id) using hash", }, { input: "alter vschema on a add vindex hash (id) using `hash`", output: "alter vschema on a add vindex hash (id) using hash", }, { input: "alter vschema on user add vindex name_lookup_vdx (name) using lookup_hash with owner=user, table=name_user_idx, from=name, to=user_id", output: "alter vschema on user add vindex name_lookup_vdx (`name`) using lookup_hash with owner=user, table=name_user_idx, from=name, to=user_id", }, { input: "alter vschema on user2 add vindex name_lastname_lookup_vdx (name,lastname) using lookup with owner=`user`, table=`name_lastname_keyspace_id_map`, from=`name,lastname`, to=`keyspace_id`", output: "alter vschema on user2 add vindex name_lastname_lookup_vdx (`name`, lastname) using lookup with owner=user, table=name_lastname_keyspace_id_map, from=name,lastname, to=keyspace_id", }, { input: "alter vschema on a drop vindex hash", }, { input: "alter vschema on ks.a drop vindex hash", }, { input: "alter vschema on a drop vindex `hash`", output: "alter vschema on a drop vindex hash", }, { input: "alter vschema on a drop vindex hash", output: "alter vschema on a drop vindex hash", }, { input: "alter vschema on a drop vindex `add`", output: "alter vschema on a drop vindex `add`", }, { input: "create index a on b (col1)", }, { input: "create unique index a on b (col1)", }, { input: "create unique index a using foo on b (col1 desc)", }, { input: "create fulltext index a using foo on b (col1)", }, { input: "create spatial index a using foo on b (col1)", }, { input: "create index a on b (col1) using btree key_block_size 12 with parser 'a' comment 'string' algorithm inplace lock none", }, { input: "create index a on b ((col1 + col2), (col1*col2))", output: "create index a on b ()", partialDDL: true, }, { input: "create view a", output: "create table a", }, { input: "create or replace view a", output: "create table a", }, { input: "alter view a", output: "alter table a", }, { input: "rename table a to b", output: "rename table a to b", }, { input: "rename table a to b, b to c", output: "rename table a to b, b to c", }, { input: "drop view a", output: "drop table a", }, { input: "drop table a", output: "drop table a", }, { input: "drop table a, b", output: "drop table a, b", }, { input: "drop table if exists a", output: "drop table if exists a", }, { input: "drop view if exists a", output: "drop table if exists a", }, { input: "drop index b on a", output: "alter table a", }, { input: "analyze table a", output: "otherread", }, { input: "flush tables", output: "flush", }, { input: "flush tables with read lock", output: "flush", }, { input: "show binary logs", output: "show binary logs", }, { input: "show binlog events", output: "show binlog", }, { input: "show character set", output: "show charset", }, { input: "show character set like '%foo'", output: "show charset like '%foo'", }, { input: "show charset", output: "show charset", }, { input: "show charset like '%foo'", output: "show charset like '%foo'", }, { input: "show charset where 'charset' = 'utf8'", output: "show charset where 'charset' = 'utf8'", }, { input: "show charset where 'charset' = '%foo'", output: "show charset where 'charset' = '%foo'", }, { input: "show collation", output: "show collation", }, { input: "show collation where `Charset` = 'utf8' and `Collation` = 'utf8_bin'", output: "show collation where `Charset` = 'utf8' and `Collation` = 'utf8_bin'", }, { input: "show create database d", output: "show create database", }, { input: "show create event e", output: "show create event", }, { input: "show create function f", output: "show create function", }, { input: "show create procedure p", output: "show create procedure", }, { input: "show create table t", output: "show create table t", }, { input: "show create trigger t", output: "show create trigger", }, { input: "show create user u", output: "show create user", }, { input: "show create view v", output: "show create view", }, { input: "show databases", output: "show databases", }, { input: "show databases like '%'", output: "show databases like '%'", }, { input: "show schemas", output: "show schemas", }, { input: "show schemas like '%'", output: "show schemas like '%'", }, { input: "show engine INNODB", output: "show engine", }, { input: "show engines", output: "show engines", }, { input: "show storage engines", output: "show storage", }, { input: "show errors", output: "show errors", }, { input: "show events", output: "show events", }, { input: "show function code func", output: "show function", }, { input: "show function status", output: "show function", }, { input: "show grants for 'root@localhost'", output: "show grants", }, { input: "show index from t", }, { input: "show indexes from t", }, { input: "show keys from t", }, { input: "show master status", output: "show master", }, { input: "show open tables", output: "show open", }, { input: "show plugins", output: "show plugins", }, { input: "show privileges", output: "show privileges", }, { input: "show procedure code p", output: "show procedure", }, { input: "show procedure status", output: "show procedure", }, { input: "show processlist", output: "show processlist", }, { input: "show full processlist", output: "show processlist", }, { input: "show profile cpu for query 1", output: "show profile", }, { input: "show profiles", output: "show profiles", }, { input: "show relaylog events", output: "show relaylog", }, { input: "show slave hosts", output: "show slave", }, { input: "show slave status", output: "show slave", }, { input: "show status", output: "show status", }, { input: "show global status", output: "show global status", }, { input: "show session status", output: "show session status", }, { input: "show table status", }, { input: "show table status from dbname", }, { input: "show table status in dbname", output: "show table status from dbname", }, { input: "show table status in dbname LIKE '%' ", output: "show table status from dbname like '%'", }, { input: "show table status from dbname Where col=42 ", output: "show table status from dbname where col = 42", }, { input: "show tables", }, { input: "show tables like '%keyspace%'", }, { input: "show tables where 1 = 0", }, { input: "show tables from a", }, { input: "show tables from a where 1 = 0", }, { input: "show tables from a like '%keyspace%'", }, { input: "show full tables", }, { input: "show full tables from a", }, { input: "show full tables in a", output: "show full tables from a", }, { input: "show full tables from a like '%keyspace%'", }, { input: "show full tables from a where 1 = 0", }, { input: "show full tables like '%keyspace%'", }, { input: "show full tables where 1 = 0", }, { input: "show full columns in a in b like '%'", output: "show full columns from a from b like '%'", }, { input: "show full columns from messages from test_keyspace like '%'", }, { input: "show full fields from a like '%'", output: "show full columns from a like '%'", }, { input: "show fields from a where 1 = 1", output: "show columns from a where 1 = 1", }, { input: "show triggers", output: "show triggers", }, { input: "show variables", output: "show variables", }, { input: "show global variables", output: "show global variables", }, { input: "show session variables", output: "show session variables", }, { input: "show vitess_keyspaces", }, { input: "show vitess_keyspaces like '%'", }, { input: "show vitess_shards", }, { input: "show vitess_shards like '%'", }, { input: "show vitess_tablets", }, { input: "show vitess_tablets like '%'", }, { input: "show vitess_tablets where hostname = 'some-tablet'", }, { input: "show vschema tables", }, { input: "show vschema vindexes", }, { input: "show vschema vindexes on t", }, { input: "show warnings", output: "show warnings", }, { input: "select warnings from t", output: "select `warnings` from t", }, { input: "show foobar", output: "show foobar", }, { input: "show foobar like select * from table where syntax is 'ignored'", output: "show foobar", }, { input: "use db", output: "use db", }, { input: "use duplicate", output: "use `duplicate`", }, { input: "use `ks:-80@master`", output: "use `ks:-80@master`", }, { input: "use @replica", output: "use `@replica`", }, { input: "use ks@replica", output: "use `ks@replica`", }, { input: "describe select * from t", output: "explain select * from t", }, { input: "desc select * from t", output: "explain select * from t", }, { input: "desc foobar", output: "otherread", }, { input: "explain t1", output: "otherread", }, { input: "explain t1 col", output: "otherread", }, { input: "explain select * from t", }, { input: "explain format = traditional select * from t", }, { input: "explain analyze select * from t", }, { input: "explain format = tree select * from t", }, { input: "explain format = json select * from t", }, { input: "explain format = vitess select * from t", }, { input: "describe format = vitess select * from t", output: "explain format = vitess select * from t", }, { input: "explain delete from t", }, { input: "explain insert into t(col1, col2) values (1, 2)", }, { input: "explain update t set col = 2", }, { input: "truncate table foo", output: "truncate table foo", }, { input: "truncate foo", output: "truncate table foo", }, { input: "repair foo", output: "otheradmin", }, { input: "optimize foo", output: "otheradmin", }, { input: "lock tables foo read", output: "lock tables foo read", }, { input: "lock tables foo write", output: "lock tables foo write", }, { input: "lock tables foo read local", output: "lock tables foo read local", }, { input: "lock tables foo low_priority write", output: "lock tables foo low_priority write", }, { input: "unlock tables", output: "unlock tables", }, { input: "select /* EQ true */ 1 from t where a = true", }, { input: "select /* EQ false */ 1 from t where a = false", }, { input: "select /* NE true */ 1 from t where a != true", }, { input: "select /* NE false */ 1 from t where a != false", }, { input: "select /* LT true */ 1 from t where a < true", }, { input: "select /* LT false */ 1 from t where a < false", }, { input: "select /* GT true */ 1 from t where a > true", }, { input: "select /* GT false */ 1 from t where a > false", }, { input: "select /* LE true */ 1 from t where a <= true", }, { input: "select /* LE false */ 1 from t where a <= false", }, { input: "select /* GE true */ 1 from t where a >= true", }, { input: "select /* GE false */ 1 from t where a >= false", }, { input: "select * from t order by a collate utf8_general_ci", output: "select * from t order by a collate utf8_general_ci asc", }, { input: "select k collate latin1_german2_ci as k1 from t1 order by k1 asc", }, { input: "select * from t group by a collate utf8_general_ci", }, { input: "select MAX(k collate latin1_german2_ci) from t1", }, { input: "select distinct k collate latin1_german2_ci from t1", }, { input: "select * from t1 where 'Müller' collate latin1_german2_ci = k", }, { input: "select * from t1 where k like 'Müller' collate latin1_german2_ci", }, { input: "select k from t1 group by k having k = 'Müller' collate latin1_german2_ci", }, { input: "select k from t1 join t2 order by a collate latin1_german2_ci asc, b collate latin1_german2_ci asc", }, { input: "select k collate 'latin1_german2_ci' as k1 from t1 order by k1 asc", output: "select k collate latin1_german2_ci as k1 from t1 order by k1 asc", }, { input: "select /* drop trailing semicolon */ 1 from dual;", output: "select /* drop trailing semicolon */ 1 from dual", }, { input: "select /* cache directive */ sql_no_cache 'foo' from t", }, { input: "select distinct sql_no_cache 'foo' from t", }, { input: "select sql_no_cache distinct 'foo' from t", output: "select distinct sql_no_cache 'foo' from t", }, { input: "select sql_no_cache straight_join distinct 'foo' from t", output: "select distinct sql_no_cache straight_join 'foo' from t", }, { input: "select straight_join distinct sql_no_cache 'foo' from t", output: "select distinct sql_no_cache straight_join 'foo' from t", }, { input: "select sql_calc_found_rows 'foo' from t", output: "select sql_calc_found_rows 'foo' from t", }, { input: "select binary 'a' = 'A' from t", }, { input: "select 1 from t where foo = _binary 'bar'", }, { input: "select 1 from t where foo = _utf8 'bar' and bar = _latin1 'sjösjuk'", }, { input: "select 1 from t where foo = _binary'bar'", output: "select 1 from t where foo = _binary 'bar'", }, { input: "select 1 from t where foo = _utf8mb4 'bar'", }, { input: "select 1 from t where foo = _utf8mb4'bar'", output: "select 1 from t where foo = _utf8mb4 'bar'", }, { input: "select match(a) against ('foo') from t", }, { input: "select match(a1, a2) against ('foo' in natural language mode with query expansion) from t", }, { input: "select database()", output: "select database() from dual", }, { input: "select schema()", output: "select schema() from dual", }, { input: "select title from video as v where match(v.title, v.tag) against ('DEMO' in boolean mode)", }, { input: "select name, group_concat(score) from t group by name", output: "select `name`, group_concat(score) from t group by `name`", }, { input: "select name, group_concat(distinct id, score order by id desc separator ':') from t group by name", output: "select `name`, group_concat(distinct id, score order by id desc separator ':') from t group by `name`", }, { input: "select name, group_concat(distinct id, score order by id desc separator ':' limit 1) from t group by name", output: "select `name`, group_concat(distinct id, score order by id desc separator ':' limit 1) from t group by `name`", }, { input: "select name, group_concat(distinct id, score order by id desc separator ':' limit 10, 2) from t group by name", output: "select `name`, group_concat(distinct id, score order by id desc separator ':' limit 10, 2) from t group by `name`", }, { input: "select * from t partition (p0)", }, { input: "select * from t partition (p0, p1)", }, { input: "select e.id, s.city from employees as e join stores partition (p1) as s on e.store_id = s.id", }, { input: "select truncate(120.3333, 2) from dual", }, { input: "update t partition (p0) set a = 1", }, { input: "insert into t partition (p0) values (1, 'asdf')", }, { input: "insert into t1 select * from t2 partition (p0)", }, { input: "replace into t partition (p0) values (1, 'asdf')", }, { input: "delete from t partition (p0) where a = 1", }, { input: "stream * from t", }, { input: "vstream * from t", }, { input: "stream /* comment */ * from t", }, { input: "begin", }, { input: "begin;", output: "begin", }, { input: "start transaction", output: "begin", }, { input: "commit", }, { input: "rollback", }, { input: "create database test_db", }, { input: "create schema test_db", output: "create database test_db", }, { input: "create database if not exists test_db", }, { input: "create schema if not exists test_db", output: "create database if not exists test_db", }, { input: "create database test_db default collate 'utf8mb4_general_ci' collate utf8mb4_general_ci", }, { input: "create database test_db character set geostd8", }, { input: "create database test_db character set * unparsable", output: "create database test_db", partialDDL: true, }, { input: "drop database test_db", }, { input: "drop schema test_db", output: "drop database test_db", }, { input: "drop database if exists test_db", }, { input: "delete a.*, b.* from tbl_a a, tbl_b b where a.id = b.id and b.name = 'test'", output: "delete a, b from tbl_a as a, tbl_b as b where a.id = b.id and b.`name` = 'test'", }, { input: "select distinctrow a.* from (select (1) from dual union all select 1 from dual) a", output: "select distinct a.* from (select 1 from dual union all select 1 from dual) as a", }, { input: "select `weird function name`() from t", }, { input: "select status() from t", // should not escape function names that are keywords }, { input: "select * from `weird table name`", }, { input: "SHOW FULL TABLES FROM `jiradb` LIKE 'AO_E8B6CC_ISSUE_MAPPING'", output: "show full tables from jiradb like 'AO_E8B6CC_ISSUE_MAPPING'", }, { input: "SHOW FULL COLUMNS FROM AO_E8B6CC_ISSUE_MAPPING FROM jiradb LIKE '%'", output: "show full columns from AO_E8B6CC_ISSUE_MAPPING from jiradb like '%'", }, { input: "SHOW KEYS FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", output: "show keys from AO_E8B6CC_ISSUE_MAPPING from jiradb", }, { input: "SHOW CREATE TABLE `jiradb`.`AO_E8B6CC_ISSUE_MAPPING`", output: "show create table jiradb.AO_E8B6CC_ISSUE_MAPPING", }, { input: "SHOW INDEX FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", output: "show index from AO_E8B6CC_ISSUE_MAPPING from jiradb", }, { input: "SHOW FULL TABLES FROM `jiradb` LIKE '%'", output: "show full tables from jiradb like '%'", }, { input: "SHOW EXTENDED INDEX FROM `AO_E8B6CC_PROJECT_MAPPING` FROM `jiradb`", output: "show extended index from AO_E8B6CC_PROJECT_MAPPING from jiradb", }, { input: "SHOW EXTENDED KEYS FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", output: "show extended keys from AO_E8B6CC_ISSUE_MAPPING from jiradb", }, { input: "SHOW CREATE TABLE `jiradb`.`AO_E8B6CC_ISSUE_MAPPING`", output: "show create table jiradb.AO_E8B6CC_ISSUE_MAPPING", }, { input: "create table t1 ( check (c1 <> c2), c1 int check (c1 > 10), c2 int constraint c2_positive check (c2 > 0), c3 int check (c3 < 100), constraint c1_nonzero check (c1 <> 0), check (c1 > c3))", output: "create table t1 (\n" + "\tc1 int,\n" + "\tc2 int,\n" + "\tc3 int,\n" + "\tcheck constraint on expression c1 != c2 enforced,\n" + "\tcheck constraint on expression c1 > 10 enforced,\n" + "\tconstraint c2_positive check constraint on expression c2 > 0 enforced,\n" + "\tcheck constraint on expression c3 < 100 enforced,\n" + "\tconstraint c1_nonzero check constraint on expression c1 != 0 enforced,\n" + "\tcheck constraint on expression c1 > c3 enforced\n)", }, { input: "SHOW INDEXES FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", output: "show indexes from AO_E8B6CC_ISSUE_MAPPING from jiradb", }, { input: "SHOW FULL TABLES FROM `jiradb` LIKE '%'", output: "show full tables from jiradb like '%'", }, { input: "SHOW EXTENDED INDEXES FROM `AO_E8B6CC_PROJECT_MAPPING` FROM `jiradb`", output: "show extended indexes from AO_E8B6CC_PROJECT_MAPPING from jiradb", }, { input: "SHOW EXTENDED INDEXES IN `AO_E8B6CC_PROJECT_MAPPING` IN `jiradb`", output: "show extended indexes from AO_E8B6CC_PROJECT_MAPPING from jiradb", }, { input: "do 1", output: "otheradmin", }, { input: "do funcCall(), 2 = 1, 3 + 1", output: "otheradmin", }, { input: "savepoint a", }, { input: "savepoint `@@@;a`", }, { input: "rollback to a", }, { input: "rollback to `@@@;a`", }, { input: "rollback work to a", output: "rollback to a", }, { input: "rollback to savepoint a", output: "rollback to a", }, { input: "rollback work to savepoint a", output: "rollback to a", }, { input: "release savepoint a", }, { input: "release savepoint `@@@;a`", }} ) func TestValid(t *testing.T) { for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } tree, err := Parse(tcase.input) require.NoError(t, err, tcase.input) out := String(tree) if tcase.output != out { t.Errorf("Parsing failed. \nExpected/Got:\n%s\n%s", tcase.output, out) } // CREATE INDEX currently only has 5.7 specifications. // For mysql 8.0 syntax, the query is not entirely parsed. // Add more structs as we go on adding full parsing support for DDL constructs for 5.7 syntax. switch x := tree.(type) { case *CreateIndex: assert.Equal(t, !tcase.partialDDL, x.IsFullyParsed()) case *CreateDatabase: assert.Equal(t, !tcase.partialDDL, x.IsFullyParsed()) case *AlterDatabase: assert.Equal(t, !tcase.partialDDL, x.IsFullyParsed()) } // This test just exercises the tree walking functionality. // There's no way automated way to verify that a node calls // all its children. But we can examine code coverage and // ensure that all walkSubtree functions were called. Walk(func(node SQLNode) (bool, error) { return true, nil }, tree) }) } } // Ensure there is no corruption from using a pooled yyParserImpl in Parse. func TestParallelValid(t *testing.T) { parallelism := 100 numIters := 1000 wg := sync.WaitGroup{} wg.Add(parallelism) for i := 0; i < parallelism; i++ { go func() { defer wg.Done() for j := 0; j < numIters; j++ { tcase := validSQL[rand.Intn(len(validSQL))] if tcase.output == "" { tcase.output = tcase.input } tree, err := Parse(tcase.input) if err != nil { t.Errorf("Parse(%q) err: %v, want nil", tcase.input, err) continue } out := String(tree) if out != tcase.output { t.Errorf("Parse(%q) = %q, want: %q", tcase.input, out, tcase.output) } } }() } wg.Wait() } func TestInvalid(t *testing.T) { invalidSQL := []struct { input string err string }{{ input: "select a, b from (select * from tbl) sort by a", err: "syntax error", }, { input: "/*!*/", err: "empty statement", }} for _, tcase := range invalidSQL { _, err := Parse(tcase.input) if err == nil { t.Errorf("Parse invalid query(%q), got: nil, want: %s...", tcase.input, tcase.err) } if err != nil && !strings.Contains(err.Error(), tcase.err) { t.Errorf("Parse invalid query(%q), got: %v, want: %s...", tcase.input, err, tcase.err) } } } func TestCaseSensitivity(t *testing.T) { validSQL := []struct { input string output string }{{ input: "create table A (\n\t`B` int\n)", output: "create table A (\n\tB int\n)", }, { input: "create index b on A (col1 desc)", }, { input: "alter table A foo", output: "alter table A", }, { input: "alter table A convert", output: "alter table A", }, { // View names get lower-cased. input: "alter view A foo", output: "alter table a", }, { input: "alter table A rename to B", output: "rename table A to B", }, { input: "rename table A to B", }, { input: "drop table B", output: "drop table B", }, { input: "drop table if exists B", output: "drop table if exists B", }, { input: "drop index b on A", output: "alter table A", }, { input: "select a from B", }, { input: "select A as B from C", }, { input: "select B.* from c", }, { input: "select B.A from c", }, { input: "select * from B as C", }, { input: "select * from A.B", }, { input: "update A set b = 1", }, { input: "update A.B set b = 1", }, { input: "select A() from b", }, { input: "select A(B, C) from b", }, { input: "select A(distinct B, C) from b", }, { // IF is an exception. It's always lower-cased. input: "select IF(B, C) from b", output: "select if(B, C) from b", }, { input: "select * from b use index (A)", }, { input: "insert into A(A, B) values (1, 2)", }, { input: "CREATE TABLE A (\n\t`A` int\n)", output: "create table A (\n\tA int\n)", }, { input: "create view A", output: "create table a", }, { input: "alter view A", output: "alter table a", }, { input: "drop view A", output: "drop table a", }, { input: "drop view if exists A", output: "drop table if exists a", }, { input: "select /* lock in SHARE MODE */ 1 from t lock in SHARE MODE", output: "select /* lock in SHARE MODE */ 1 from t lock in share mode", }, { input: "select next VALUE from t", output: "select next 1 values from t", }, { input: "select /* use */ 1 from t1 use index (A) where b = 1", }} for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } tree, err := Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } out := String(tree) if out != tcase.output { t.Errorf("out: %s, want %s", out, tcase.output) } } } func TestKeywords(t *testing.T) { validSQL := []struct { input string output string }{{ input: "select current_timestamp", output: "select current_timestamp() from dual", }, { input: "update t set a = current_timestamp()", }, { input: "update t set a = current_timestamp(5)", }, { input: "select a, current_date from t", output: "select a, current_date() from t", }, { input: "insert into t(a, b) values (current_date, current_date())", output: "insert into t(a, b) values (current_date(), current_date())", }, { input: "select * from t where a > utc_timestmp()", }, { input: "select * from t where a > utc_timestamp(4)", }, { input: "update t set b = utc_timestamp + 5", output: "update t set b = utc_timestamp() + 5", }, { input: "select utc_time, utc_date, utc_time(6)", output: "select utc_time(), utc_date(), utc_time(6) from dual", }, { input: "select 1 from dual where localtime > utc_time", output: "select 1 from dual where localtime() > utc_time()", }, { input: "select 1 from dual where localtime(2) > utc_time(1)", output: "select 1 from dual where localtime(2) > utc_time(1)", }, { input: "update t set a = localtimestamp(), b = utc_timestamp", output: "update t set a = localtimestamp(), b = utc_timestamp()", }, { input: "update t set a = localtimestamp(10), b = utc_timestamp(13)", output: "update t set a = localtimestamp(10), b = utc_timestamp(13)", }, { input: "insert into t(a) values (unix_timestamp)", }, { input: "select replace(a, 'foo', 'bar') from t", }, { input: "update t set a = replace('1234', '2', '1')", }, { input: "insert into t(a, b) values ('foo', 'bar') on duplicate key update a = replace(hex('foo'), 'f', 'b')", }, { input: "update t set a = left('1234', 3)", }, { input: "select left(a, 5) from t", }, { input: "update t set d = adddate(date('2003-12-31 01:02:03'), interval 5 days)", }, { input: "insert into t(a, b) values (left('foo', 1), 'b')", }, { input: "insert /* qualified function */ into t(a, b) values (test.PI(), 'b')", }, { input: "select /* keyword in qualified id */ * from t join z on t.key = z.key", output: "select /* keyword in qualified id */ * from t join z on t.`key` = z.`key`", }, { input: "select /* non-reserved keywords as unqualified cols */ date, view, offset from t", output: "select /* non-reserved keywords as unqualified cols */ `date`, `view`, `offset` from t", }, { input: "select /* share and mode as cols */ share, mode from t where share = 'foo'", output: "select /* share and mode as cols */ `share`, `mode` from t where `share` = 'foo'", }, { input: "select /* unused keywords as cols */ `write`, varying from t where trailing = 'foo'", output: "select /* unused keywords as cols */ `write`, `varying` from t where `trailing` = 'foo'", }, { input: "select status from t", output: "select `status` from t", }, { input: "select Status from t", output: "select `Status` from t", }, { input: "select variables from t", output: "select `variables` from t", }, { input: "select Variables from t", output: "select `Variables` from t", }} for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } tree, err := Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } out := String(tree) if out != tcase.output { t.Errorf("out: %s, want %s", out, tcase.output) } } } func TestConvert(t *testing.T) { validSQL := []struct { input string output string }{{ input: "select cast('abc' as date) from t", output: "select convert('abc', date) from t", }, { input: "select convert('abc', binary(4)) from t", }, { input: "select convert('abc', binary) from t", }, { input: "select convert('abc', char character set binary) from t", }, { input: "select convert('abc', char(4) ascii) from t", }, { input: "select convert('abc', char unicode) from t", }, { input: "select convert('abc', char(4)) from t", }, { input: "select convert('abc', char) from t", }, { input: "select convert('abc', nchar(4)) from t", }, { input: "select convert('abc', nchar) from t", }, { input: "select convert('abc', signed) from t", }, { input: "select convert('abc', signed integer) from t", output: "select convert('abc', signed) from t", }, { input: "select convert('abc', unsigned) from t", }, { input: "select convert('abc', unsigned integer) from t", output: "select convert('abc', unsigned) from t", }, { input: "select convert('abc', decimal(3, 4)) from t", }, { input: "select convert('abc', decimal(4)) from t", }, { input: "select convert('abc', decimal) from t", }, { input: "select convert('abc', date) from t", }, { input: "select convert('abc', time(4)) from t", }, { input: "select convert('abc', time) from t", }, { input: "select convert('abc', datetime(9)) from t", }, { input: "select convert('abc', datetime) from t", }, { input: "select convert('abc', json) from t", }, { input: "select convert('abc' using ascii) from t", }} for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } tree, err := Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } out := String(tree) if out != tcase.output { t.Errorf("out: %s, want %s", out, tcase.output) } } invalidSQL := []struct { input string output string }{{ input: "select convert('abc' as date) from t", output: "syntax error at position 24 near 'as'", }, { input: "select convert from t", output: "syntax error at position 20 near 'from'", }, { input: "select cast('foo', decimal) from t", output: "syntax error at position 19", }, { input: "select convert('abc', datetime(4+9)) from t", output: "syntax error at position 34", }, { input: "select convert('abc', decimal(4+9)) from t", output: "syntax error at position 33", }, { input: "/* a comment */", output: "empty statement", }, { input: "set transaction isolation level 12345", output: "syntax error at position 38 near '12345'", }} for _, tcase := range invalidSQL { _, err := Parse(tcase.input) if err == nil || err.Error() != tcase.output { t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) } } } func TestSelectInto(t *testing.T) { validSQL := []struct { input string output string }{{ input: "select * from t order by name limit 100 into outfile s3 'out_file_name'", output: "select * from t order by `name` asc limit 100 into outfile s3 'out_file_name'", }, { input: "select * from t into dumpfile 'out_file_name'", }, { input: "select * from t into outfile 'out_file_name' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'", }, { input: "select * from t into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off", }, { input: "select * from (select * from t union select * from t2) as t3 where t3.name in (select col from t4) into outfile s3 'out_file_name'", output: "select * from (select * from t union select * from t2) as t3 where t3.`name` in (select col from t4) into outfile s3 'out_file_name'", }, { // Invalid queries but these are parsed and errors caught in planbuilder input: "select * from t limit 100 into outfile s3 'out_file_name' union select * from t2", }, { input: "select * from (select * from t into outfile s3 'inner_outfile') as t2 into outfile s3 'out_file_name'", }} for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } tree, err := Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } out := String(tree) assert.Equal(t, tcase.output, out) } invalidSQL := []struct { input string output string }{{ input: "select convert('abc' as date) from t", output: "syntax error at position 24 near 'as'", }, { input: "set transaction isolation level 12345", output: "syntax error at position 38 near '12345'", }} for _, tcase := range invalidSQL { _, err := Parse(tcase.input) if err == nil || err.Error() != tcase.output { t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) } } } func TestPositionedErr(t *testing.T) { invalidSQL := []struct { input string output PositionedErr }{{ input: "select convert('abc' as date) from t", output: PositionedErr{"syntax error", 24, []byte("as")}, }, { input: "select convert from t", output: PositionedErr{"syntax error", 20, []byte("from")}, }, { input: "select cast('foo', decimal) from t", output: PositionedErr{"syntax error", 19, nil}, }, { input: "select convert('abc', datetime(4+9)) from t", output: PositionedErr{"syntax error", 34, nil}, }, { input: "select convert('abc', decimal(4+9)) from t", output: PositionedErr{"syntax error", 33, nil}, }, { input: "set transaction isolation level 12345", output: PositionedErr{"syntax error", 38, []byte("12345")}, }, { input: "select * from a left join b", output: PositionedErr{"syntax error", 28, nil}, }, { input: "select a from (select * from tbl)", output: PositionedErr{"syntax error", 34, nil}, }} for _, tcase := range invalidSQL { tkn := NewStringTokenizer(tcase.input) _, err := ParseNext(tkn) if posErr, ok := err.(PositionedErr); !ok { t.Errorf("%s: %v expected PositionedErr, got (%T) %v", tcase.input, err, err, tcase.output) } else if posErr.Pos != tcase.output.Pos || !bytes.Equal(posErr.Near, tcase.output.Near) || err.Error() != tcase.output.Error() { t.Errorf("%s: %v, want: %v", tcase.input, err, tcase.output) } } } func TestSubStr(t *testing.T) { validSQL := []struct { input string output string }{{ input: `select substr('foobar', 1) from t`, }, { input: "select substr(a, 1, 6) from t", }, { input: "select substring(a, 1) from t", output: "select substr(a, 1) from t", }, { input: "select substring(a, 1, 6) from t", output: "select substr(a, 1, 6) from t", }, { input: "select substr(a from 1 for 6) from t", output: "select substr(a, 1, 6) from t", }, { input: "select substring(a from 1 for 6) from t", output: "select substr(a, 1, 6) from t", }, { input: `select substr("foo" from 1 for 2) from t`, output: `select substr('foo', 1, 2) from t`, }, { input: `select substring("foo", 1, 2) from t`, output: `select substr('foo', 1, 2) from t`, }, { input: `select substr(substr("foo" from 1 for 2), 1, 2) from t`, output: `select substr(substr('foo', 1, 2), 1, 2) from t`, }, { input: `select substr(substring("foo", 1, 2), 3, 4) from t`, output: `select substr(substr('foo', 1, 2), 3, 4) from t`, }, { input: `select substring(substr("foo", 1), 2) from t`, output: `select substr(substr('foo', 1), 2) from t`, }} for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } tree, err := Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } out := String(tree) if out != tcase.output { t.Errorf("out: %s, want %s", out, tcase.output) } } } func TestLoadData(t *testing.T) { validSQL := []string{ "load data from s3 'x.txt'", "load data from s3 manifest 'x.txt'", "load data from s3 file 'x.txt'", "load data infile 'x.txt' into table 'c'", "load data from s3 'x.txt' into table x"} for _, tcase := range validSQL { _, err := Parse(tcase) require.NoError(t, err) } } func TestCreateTable(t *testing.T) { validSQL := []string{ // test all the data types and options "create table t (\n" + " col_bit bit,\n" + " col_tinyint tinyint auto_increment,\n" + " col_tinyint3 tinyint(3) unsigned,\n" + " col_smallint smallint,\n" + " col_smallint4 smallint(4) zerofill,\n" + " col_mediumint mediumint,\n" + " col_mediumint5 mediumint(5) unsigned not null,\n" + " col_int int,\n" + " col_int10 int(10) not null,\n" + " col_integer integer comment 'this is an integer',\n" + " col_bigint bigint,\n" + " col_bigint10 bigint(10) zerofill not null default 10,\n" + " col_real real,\n" + " col_real2 real(1,2) not null default 1.23,\n" + " col_double double,\n" + " col_double2 double(3,4) not null default 1.23,\n" + " col_float float,\n" + " col_float2 float(3,4) not null default 1.23,\n" + " col_decimal decimal,\n" + " col_decimal2 decimal(2),\n" + " col_decimal3 decimal(2,3),\n" + " col_numeric numeric,\n" + " col_numeric2 numeric(2),\n" + " col_numeric3 numeric(2,3),\n" + " col_date date,\n" + " col_time time,\n" + " col_timestamp timestamp,\n" + " col_datetime datetime,\n" + " col_year year,\n" + " col_char char,\n" + " col_char2 char(2),\n" + " col_char3 char(3) character set ascii,\n" + " col_char4 char(4) character set ascii collate ascii_bin,\n" + " col_varchar varchar,\n" + " col_varchar2 varchar(2),\n" + " col_varchar3 varchar(3) character set ascii,\n" + " col_varchar4 varchar(4) character set ascii collate ascii_bin,\n" + " col_binary binary,\n" + " col_varbinary varbinary(10),\n" + " col_tinyblob tinyblob,\n" + " col_blob blob,\n" + " col_mediumblob mediumblob,\n" + " col_longblob longblob,\n" + " col_tinytext tinytext,\n" + " col_text text,\n" + " col_mediumtext mediumtext,\n" + " col_longtext longtext,\n" + " col_text text character set ascii collate ascii_bin,\n" + " col_json json,\n" + " col_enum enum('a', 'b', 'c', 'd'),\n" + " col_enum2 enum('a', 'b', 'c', 'd') character set ascii,\n" + " col_enum3 enum('a', 'b', 'c', 'd') collate ascii_bin,\n" + " col_enum4 enum('a', 'b', 'c', 'd') character set ascii collate ascii_bin,\n" + " col_set set('a', 'b', 'c', 'd'),\n" + " col_set2 set('a', 'b', 'c', 'd') character set ascii,\n" + " col_set3 set('a', 'b', 'c', 'd') collate ascii_bin,\n" + " col_set4 set('a', 'b', 'c', 'd') character set ascii collate ascii_bin,\n" + " col_geometry1 geometry,\n" + " col_geometry2 geometry not null,\n" + " col_point1 point,\n" + " col_point2 point not null,\n" + " col_linestring1 linestring,\n" + " col_linestring2 linestring not null,\n" + " col_polygon1 polygon,\n" + " col_polygon2 polygon not null,\n" + " col_geometrycollection1 geometrycollection,\n" + " col_geometrycollection2 geometrycollection not null,\n" + " col_multipoint1 multipoint,\n" + " col_multipoint2 multipoint not null,\n" + " col_multilinestring1 multilinestring,\n" + " col_multilinestring2 multilinestring not null,\n" + " col_multipolygon1 multipolygon,\n" + " col_multipolygon2 multipolygon not null\n" + ")", // test defining indexes separately "create table t (\n" + " id int auto_increment,\n" + " username varchar,\n" + " email varchar,\n" + " full_name varchar,\n" + " geom point not null,\n" + " status_nonkeyword varchar,\n" + " primary key (id),\n" + " spatial key geom (geom),\n" + " fulltext key fts (full_name),\n" + " unique key by_username (username),\n" + " unique by_username2 (username),\n" + " unique index by_username3 (username),\n" + " index by_status (status_nonkeyword),\n" + " key by_full_name (full_name)\n" + ")", // test that indexes support USING <id> "create table t (\n" + " id int auto_increment,\n" + " username varchar,\n" + " email varchar,\n" + " full_name varchar,\n" + " status_nonkeyword varchar,\n" + " primary key (id) using BTREE,\n" + " unique key by_username (username) using HASH,\n" + " unique by_username2 (username) using OTHER,\n" + " unique index by_username3 (username) using XYZ,\n" + " index by_status (status_nonkeyword) using PDQ,\n" + " key by_full_name (full_name) using OTHER\n" + ")", // test other index options "create table t (\n" + " id int auto_increment,\n" + " username varchar,\n" + " email varchar,\n" + " primary key (id) comment 'hi',\n" + " unique key by_username (username) key_block_size 8,\n" + " unique index by_username4 (username) comment 'hi' using BTREE,\n" + " unique index by_username4 (username) using BTREE key_block_size 4 comment 'hi'\n" + ")", // multi-column indexes "create table t (\n" + " id int auto_increment,\n" + " username varchar,\n" + " email varchar,\n" + " full_name varchar,\n" + " a int,\n" + " b int,\n" + " c int,\n" + " primary key (id, username),\n" + " unique key by_abc (a, b, c),\n" + " unique key (a, b, c),\n" + " key by_email (email(10), username)\n" + ")", // foreign keys "create table t (\n" + " id int auto_increment,\n" + " username varchar,\n" + " k int,\n" + " Z int,\n" + " primary key (id, username),\n" + " key by_email (email(10), username),\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b),\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on delete restrict,\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on delete no action,\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on delete cascade on update set default,\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on delete set default on update set null,\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on delete set null on update restrict,\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on update no action,\n" + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on update cascade\n" + ")", // table options "create table t (\n" + " id int auto_increment\n" + ") engine InnoDB,\n" + " auto_increment 123,\n" + " avg_row_length 1,\n" + " default character set utf8mb4,\n" + " character set latin1,\n" + " checksum 0,\n" + " default collate binary,\n" + " collate ascii_bin,\n" + " comment 'this is a comment',\n" + " compression 'zlib',\n" + " connection 'connect_string',\n" + " data directory 'absolute path to directory',\n" + " delay_key_write 1,\n" + " encryption 'n',\n" + " index directory 'absolute path to directory',\n" + " insert_method no,\n" + " key_block_size 1024,\n" + " max_rows 100,\n" + " min_rows 10,\n" + " pack_keys 0,\n" + " password 'sekret',\n" + " row_format default,\n" + " stats_auto_recalc default,\n" + " stats_persistent 0,\n" + " stats_sample_pages 1,\n" + " tablespace tablespace_name storage disk,\n" + " tablespace tablespace_name\n", // boolean columns "create table t (\n" + " bi bigint not null primary key,\n" + " b1 bool not null,\n" + " b2 boolean\n" + ")", } for _, sql := range validSQL { sql = strings.TrimSpace(sql) tree, err := ParseStrictDDL(sql) if err != nil { t.Errorf("input: %s, err: %v", sql, err) continue } got := String(tree.(*CreateTable)) assert.True(t, tree.(*CreateTable).FullyParsed) if sql != got { t.Errorf("want:\n%s\ngot:\n%s", sql, got) } } sql := "create table t garbage" tree, err := Parse(sql) if err != nil { t.Errorf("input: %s, err: %v", sql, err) } assert.True(t, !tree.(*CreateTable).FullyParsed) tree, err = ParseStrictDDL(sql) if tree != nil || err == nil { t.Errorf("ParseStrictDDL unexpectedly accepted input %s", sql) } testCases := []struct { input string output string }{{ // test key_block_size input: "create table t (\n" + " id int auto_increment,\n" + " username varchar,\n" + " unique key by_username (username) key_block_size 8,\n" + " unique key by_username2 (username) key_block_size=8,\n" + " unique by_username3 (username) key_block_size = 4\n" + ")", output: "create table t (\n" + " id int auto_increment,\n" + " username varchar,\n" + " unique key by_username (username) key_block_size 8,\n" + " unique key by_username2 (username) key_block_size 8,\n" + " unique by_username3 (username) key_block_size 4\n" + ")", }, { // test defaults input: "create table t (\n" + " i1 int default 1,\n" + " i2 int default null,\n" + " f1 float default 1.23,\n" + " s1 varchar default 'c',\n" + " s2 varchar default 'this is a string',\n" + " s3 varchar default null,\n" + " s4 timestamp default current_timestamp,\n" + " s5 bit(1) default B'0'\n" + ")", output: "create table t (\n" + " i1 int default 1,\n" + " i2 int default null,\n" + " f1 float default 1.23,\n" + " s1 varchar default 'c',\n" + " s2 varchar default 'this is a string',\n" + " `s3` varchar default null,\n" + " s4 timestamp default current_timestamp(),\n" + " s5 bit(1) default B'0'\n" + ")", }, { // test non_reserved word in column name input: "create table t (\n" + " repair int\n" + ")", output: "create table t (\n" + " `repair` int\n" + ")", }, { // test key field options input: "create table t (\n" + " id int auto_increment primary key,\n" + " username varchar unique key,\n" + " email varchar unique,\n" + " full_name varchar key,\n" + " time1 timestamp on update current_timestamp,\n" + " time2 timestamp default current_timestamp on update current_timestamp\n" + ")", output: "create table t (\n" + " id int auto_increment primary key,\n" + " username varchar unique key,\n" + " email varchar unique,\n" + " full_name varchar key,\n" + " time1 timestamp on update current_timestamp(),\n" + " time2 timestamp default current_timestamp() on update current_timestamp()\n" + ")", }, { // test current_timestamp with and without () input: "create table t (\n" + " time1 timestamp default current_timestamp,\n" + " time2 timestamp default current_timestamp(),\n" + " time3 timestamp default current_timestamp on update current_timestamp,\n" + " time4 timestamp default current_timestamp() on update current_timestamp(),\n" + " time5 timestamp(3) default current_timestamp(3) on update current_timestamp(3)\n" + ")", output: "create table t (\n" + " time1 timestamp default current_timestamp(),\n" + " time2 timestamp default current_timestamp(),\n" + " time3 timestamp default current_timestamp() on update current_timestamp(),\n" + " time4 timestamp default current_timestamp() on update current_timestamp(),\n" + " time5 timestamp(3) default current_timestamp(3) on update current_timestamp(3)\n" + ")", }, { // test utc_timestamp with and without () input: "create table t (\n" + " time1 timestamp default utc_timestamp,\n" + " time2 timestamp default utc_timestamp(),\n" + " time3 timestamp default utc_timestamp on update utc_timestamp,\n" + " time4 timestamp default utc_timestamp() on update utc_timestamp(),\n" + " time5 timestamp(4) default utc_timestamp(4) on update utc_timestamp(4)\n" + ")", output: "create table t (\n" + " time1 timestamp default utc_timestamp(),\n" + " time2 timestamp default utc_timestamp(),\n" + " time3 timestamp default utc_timestamp() on update utc_timestamp(),\n" + " time4 timestamp default utc_timestamp() on update utc_timestamp(),\n" + " time5 timestamp(4) default utc_timestamp(4) on update utc_timestamp(4)\n" + ")", }, { // test utc_time with and without () input: "create table t (\n" + " time1 timestamp default utc_time,\n" + " time2 timestamp default utc_time(),\n" + " time3 timestamp default utc_time on update utc_time,\n" + " time4 timestamp default utc_time() on update utc_time(),\n" + " time5 timestamp(5) default utc_time(5) on update utc_time(5)\n" + ")", output: "create table t (\n" + " time1 timestamp default utc_time(),\n" + " time2 timestamp default utc_time(),\n" + " time3 timestamp default utc_time() on update utc_time(),\n" + " time4 timestamp default utc_time() on update utc_time(),\n" + " time5 timestamp(5) default utc_time(5) on update utc_time(5)\n" + ")", }, { // test utc_date with and without () input: "create table t (\n" + " time1 timestamp default utc_date,\n" + " time2 timestamp default utc_date(),\n" + " time3 timestamp default utc_date on update utc_date,\n" + " time4 timestamp default utc_date() on update utc_date()\n" + ")", output: "create table t (\n" + " time1 timestamp default utc_date(),\n" + " time2 timestamp default utc_date(),\n" + " time3 timestamp default utc_date() on update utc_date(),\n" + " time4 timestamp default utc_date() on update utc_date()\n" + ")", }, { // test localtime with and without () input: "create table t (\n" + " time1 timestamp default localtime,\n" + " time2 timestamp default localtime(),\n" + " time3 timestamp default localtime on update localtime,\n" + " time4 timestamp default localtime() on update localtime(),\n" + " time5 timestamp(6) default localtime(6) on update localtime(6)\n" + ")", output: "create table t (\n" + " time1 timestamp default localtime(),\n" + " time2 timestamp default localtime(),\n" + " time3 timestamp default localtime() on update localtime(),\n" + " time4 timestamp default localtime() on update localtime(),\n" + " time5 timestamp(6) default localtime(6) on update localtime(6)\n" + ")", }, { // test localtimestamp with and without () input: "create table t (\n" + " time1 timestamp default localtimestamp,\n" + " time2 timestamp default localtimestamp(),\n" + " time3 timestamp default localtimestamp on update localtimestamp,\n" + " time4 timestamp default localtimestamp() on update localtimestamp(),\n" + " time5 timestamp(1) default localtimestamp(1) on update localtimestamp(1)\n" + ")", output: "create table t (\n" + " time1 timestamp default localtimestamp(),\n" + " time2 timestamp default localtimestamp(),\n" + " time3 timestamp default localtimestamp() on update localtimestamp(),\n" + " time4 timestamp default localtimestamp() on update localtimestamp(),\n" + " time5 timestamp(1) default localtimestamp(1) on update localtimestamp(1)\n" + ")", }, { // test current_date with and without () input: "create table t (\n" + " time1 timestamp default current_date,\n" + " time2 timestamp default current_date(),\n" + " time3 timestamp default current_date on update current_date,\n" + " time4 timestamp default current_date() on update current_date()\n" + ")", output: "create table t (\n" + " time1 timestamp default current_date(),\n" + " time2 timestamp default current_date(),\n" + " time3 timestamp default current_date() on update current_date(),\n" + " time4 timestamp default current_date() on update current_date()\n" + ")", }, { // test current_time with and without () input: "create table t (\n" + " time1 timestamp default current_time,\n" + " time2 timestamp default current_time(),\n" + " time3 timestamp default current_time on update current_time,\n" + " time4 timestamp default current_time() on update current_time(),\n" + " time5 timestamp(2) default current_time(2) on update current_time(2)\n" + ")", output: "create table t (\n" + " time1 timestamp default current_time(),\n" + " time2 timestamp default current_time(),\n" + " time3 timestamp default current_time() on update current_time(),\n" + " time4 timestamp default current_time() on update current_time(),\n" + " time5 timestamp(2) default current_time(2) on update current_time(2)\n" + ")", }, } for _, tcase := range testCases { tree, err := ParseStrictDDL(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } assert.True(t, tree.(*CreateTable).FullyParsed) if got, want := String(tree.(*CreateTable)), tcase.output; got != want { t.Errorf("Parse(%s):\n%s, want\n%s", tcase.input, got, want) } } } func TestCreateTableLike(t *testing.T) { normal := "create table a like b" testCases := []struct { input string output string }{ { "create table a like b", normal, }, { "create table a (like b)", normal, }, { "create table ks.a like unsharded_ks.b", "create table ks.a like unsharded_ks.b", }, } for _, tcase := range testCases { tree, err := ParseStrictDDL(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } assert.True(t, tree.(*CreateTable).FullyParsed) if got, want := String(tree.(*CreateTable)), tcase.output; got != want { t.Errorf("Parse(%s):\n%s, want\n%s", tcase.input, got, want) } } } func TestCreateTableEscaped(t *testing.T) { testCases := []struct { input string output string }{{ input: "create table `a`(`id` int, primary key(`id`))", output: "create table a (\n" + "\tid int,\n" + "\tprimary key (id)\n" + ")", }, { input: "create table `insert`(`update` int, primary key(`delete`))", output: "create table `insert` (\n" + "\t`update` int,\n" + "\tprimary key (`delete`)\n" + ")", }} for _, tcase := range testCases { tree, err := ParseStrictDDL(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue } if got, want := String(tree.(*CreateTable)), tcase.output; got != want { t.Errorf("Parse(%s):\n%s, want\n%s", tcase.input, got, want) } } } var ( invalidSQL = []struct { input string output string excludeMulti bool // Don't use in the ParseNext multi-statement parsing tests. }{{ input: "select : from t", output: "syntax error at position 9 near ':'", }, { input: "select 0xH from t", output: "syntax error at position 10 near '0x'", }, { input: "select x'78 from t", output: "syntax error at position 12 near '78'", }, { input: "select x'777' from t", output: "syntax error at position 14 near '777'", }, { input: "select * from t where :1 = 2", output: "syntax error at position 24 near ':'", }, { input: "select * from t where :. = 2", output: "syntax error at position 24 near ':'", }, { input: "select * from t where ::1 = 2", output: "syntax error at position 25 near '::'", }, { input: "select * from t where ::. = 2", output: "syntax error at position 25 near '::'", }, { input: "update a set c = values(1)", output: "syntax error at position 26 near '1'", }, { input: "select(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(F(", output: "max nesting level reached at position 406", }, { input: "select(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(", output: "syntax error at position 404", }, { // This construct is considered invalid due to a grammar conflict. input: "insert into a select * from b join c on duplicate key update d=e", output: "syntax error at position 54 near 'key'", }, { input: "select * from a left join b", output: "syntax error at position 28", }, { input: "select * from a natural join b on c = d", output: "syntax error at position 34 near 'on'", }, { input: "select * from a natural join b using (c)", output: "syntax error at position 37 near 'using'", }, { input: "select next id from a", output: "expecting value after next at position 15 near 'id'", }, { input: "select next 1+1 values from a", output: "syntax error at position 15", }, { input: "insert into a values (select * from b)", output: "syntax error at position 29 near 'select'", }, { input: "select database", output: "syntax error at position 16", }, { input: "select mod from t", output: "syntax error at position 16 near 'from'", }, { input: "select 1 from t where div 5", output: "syntax error at position 26 near 'div'", }, { input: "select 1 from t where binary", output: "syntax error at position 29", }, { input: "select match(a1, a2) against ('foo' in boolean mode with query expansion) from t", output: "syntax error at position 57 near 'with'", }, { input: "select /* reserved keyword as unqualified column */ * from t where key = 'test'", output: "syntax error at position 71 near 'key'", }, { input: "select /* vitess-reserved keyword as unqualified column */ * from t where escape = 'test'", output: "syntax error at position 81 near 'escape'", }, { input: "select * from t where id = ((select a from t1 union select b from t2) order by a limit 1)", output: "syntax error at position 76 near 'order'", }, { input: "select /* straight_join using */ 1 from t1 straight_join t2 using (a)", output: "syntax error at position 66 near 'using'", }, { input: "select 'aa", output: "syntax error at position 11 near 'aa'", excludeMulti: true, }, { input: "select 'aa\\", output: "syntax error at position 12 near 'aa'", excludeMulti: true, }, { input: "select /* aa", output: "syntax error at position 13 near '/* aa'", excludeMulti: true, }, { // non_reserved keywords are currently not permitted everywhere input: "create database repair", output: "syntax error at position 23 near 'repair'", excludeMulti: true, }} ) func TestErrors(t *testing.T) { for _, tcase := range invalidSQL { t.Run(tcase.input, func(t *testing.T) { _, err := Parse(tcase.input) require.Error(t, err, tcase.output) require.Equal(t, err.Error(), tcase.output) }) } } // TestSkipToEnd tests that the skip to end functionality // does not skip past a ';'. If any tokens exist after that, Parse // should return an error. func TestSkipToEnd(t *testing.T) { testcases := []struct { input string output string }{{ // This is the case where the partial ddl will be reset // because of a premature ';'. input: "create table a(id; select * from t", output: "syntax error at position 19", }, { // Partial DDL should get reset for valid DDLs also. input: "create table a(id int); select * from t", output: "syntax error at position 31 near 'select'", }, { // Partial DDL does not get reset here. But we allow the // DDL only if there are no new tokens after skipping to end. input: "create table a bb cc; select * from t", output: "extra characters encountered after end of DDL: 'select'", }, { // Test that we don't step at ';' inside strings. input: "create table a bb 'a;'; select * from t", output: "extra characters encountered after end of DDL: 'select'", }} for _, tcase := range testcases { _, err := Parse(tcase.input) if err == nil || err.Error() != tcase.output { t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) } } } func TestParseDjangoQueries(t *testing.T) { file, err := os.Open("./test_queries/django_queries.txt") if err != nil { t.Errorf(" Error: %v", err) } defer file.Close() scanner := bufio.NewScanner(file) for scanner.Scan() { _, err := Parse(string(scanner.Text())) if err != nil { t.Error(scanner.Text()) t.Errorf(" Error: %v", err) } } } // Benchmark run on 6/23/17, prior to improvements: // BenchmarkParse1-4 100000 16334 ns/op // BenchmarkParse2-4 30000 44121 ns/op // Benchmark run on 9/3/18, comparing pooled parser performance. // // benchmark old ns/op new ns/op delta // BenchmarkNormalize-4 2540 2533 -0.28% // BenchmarkParse1-4 18269 13330 -27.03% // BenchmarkParse2-4 46703 41255 -11.67% // BenchmarkParse2Parallel-4 22246 20707 -6.92% // BenchmarkParse3-4 4064743 4083135 +0.45% // // benchmark old allocs new allocs delta // BenchmarkNormalize-4 27 27 +0.00% // BenchmarkParse1-4 75 74 -1.33% // BenchmarkParse2-4 264 263 -0.38% // BenchmarkParse2Parallel-4 176 175 -0.57% // BenchmarkParse3-4 360 361 +0.28% // // benchmark old bytes new bytes delta // BenchmarkNormalize-4 821 821 +0.00% // BenchmarkParse1-4 22776 2307 -89.87% // BenchmarkParse2-4 28352 7881 -72.20% // BenchmarkParse2Parallel-4 25712 5235 -79.64% // BenchmarkParse3-4 6352082 6336307 -0.25% const ( sql1 = "select 'abcd', 20, 30.0, eid from a where 1=eid and name='3'" sql2 = "select aaaa, bbb, ccc, ddd, eeee, ffff, gggg, hhhh, iiii from tttt, ttt1, ttt3 where aaaa = bbbb and bbbb = cccc and dddd+1 = eeee group by fff, gggg having hhhh = iiii and iiii = jjjj order by kkkk, llll limit 3, 4" ) func BenchmarkParse1(b *testing.B) { sql := sql1 for i := 0; i < b.N; i++ { ast, err := Parse(sql) if err != nil { b.Fatal(err) } _ = String(ast) } } func BenchmarkParse2(b *testing.B) { sql := sql2 for i := 0; i < b.N; i++ { ast, err := Parse(sql) if err != nil { b.Fatal(err) } _ = String(ast) } } func BenchmarkParse2Parallel(b *testing.B) { sql := sql2 b.RunParallel(func(pb *testing.PB) { for pb.Next() { ast, err := Parse(sql) if err != nil { b.Fatal(err) } _ = ast } }) } var benchQuery string func init() { // benchQuerySize is the approximate size of the query. benchQuerySize := 1000000 // Size of value is 1/10 size of query. Then we add // 10 such values to the where clause. var baseval bytes.Buffer for i := 0; i < benchQuerySize/100; i++ { // Add an escape character: This will force the upcoming // tokenizer improvement to still create a copy of the string. // Then we can see if avoiding the copy will be worth it. baseval.WriteString("\\'123456789") } var buf bytes.Buffer buf.WriteString("select a from t1 where v = 1") for i := 0; i < 10; i++ { fmt.Fprintf(&buf, " and v%d = \"%d%s\"", i, i, baseval.String()) } benchQuery = buf.String() } func BenchmarkParse3(b *testing.B) { for i := 0; i < b.N; i++ { if _, err := Parse(benchQuery); err != nil { b.Fatal(err) } } }
package meta_test import ( "bytes" "fmt" "html/template" "io/ioutil" "os" "path/filepath" "github.com/goadesign/goa/goagen/codegen" "github.com/goadesign/goa/goagen/meta" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("Run", func() { var compiledFiles []string var compileError error var outputWorkspace *codegen.Workspace var designWorkspace *codegen.Workspace var genfunc string var debug bool var outputDir string var designPackage *codegen.Package var designPackagePath string var designPackageSource string var m *meta.Generator BeforeEach(func() { genfunc = "" debug = false designPackagePath = "design" designPackageSource = "package design" codegen.DesignPackagePath = designPackagePath var err error outputWorkspace, err = codegen.NewWorkspace("output") p, err := outputWorkspace.NewPackage("testOutput") Ω(err).ShouldNot(HaveOccurred()) outputDir = p.Abs() designWorkspace, err = codegen.NewWorkspace("test") Ω(err).ShouldNot(HaveOccurred()) compiledFiles = nil compileError = nil }) JustBeforeEach(func() { if designPackagePath != "" { designPackage, err := designWorkspace.NewPackage(designPackagePath) Ω(err).ShouldNot(HaveOccurred()) if designPackageSource != "" { file := designPackage.CreateSourceFile("design.go") err = ioutil.WriteFile(file.Abs(), []byte(designPackageSource), 0655) Ω(err).ShouldNot(HaveOccurred()) } } else { designPackage = nil } m = &meta.Generator{ Genfunc: genfunc, Imports: []*codegen.ImportSpec{codegen.SimpleImport(designPackagePath)}, } codegen.Debug = debug codegen.OutputDir = outputDir compiledFiles, compileError = m.Generate() }) AfterEach(func() { designWorkspace.Delete() outputWorkspace.Delete() }) Context("with no GOPATH environment variable", func() { var gopath string BeforeEach(func() { gopath = os.Getenv("GOPATH") os.Setenv("GOPATH", "") }) AfterEach(func() { os.Setenv("GOPATH", gopath) }) It("fails with a useful error message", func() { Ω(compileError).Should(MatchError("GOPATH not set")) }) }) Context("with an invalid GOPATH environment variable", func() { var gopath string const invalidPath = "DOES NOT EXIST" BeforeEach(func() { gopath = os.Getenv("GOPATH") os.Setenv("GOPATH", invalidPath) }) AfterEach(func() { os.Setenv("GOPATH", gopath) }) It("fails with a useful error message", func() { path := fmt.Sprintf("%s", filepath.Join(invalidPath, "src", filepath.FromSlash(designPackagePath))) msg := fmt.Sprintf(`%s does not contain a Go package`, path) Ω(compileError).Should(MatchError(MatchRegexp(msg))) }) }) Context("with an invalid design package path", func() { const invalidDesignPackage = "foobar" BeforeEach(func() { codegen.DesignPackagePath = invalidDesignPackage }) It("fails with a useful error message", func() { msg := `do not contain a Go package` Ω(compileError).Should(MatchError(HaveSuffix(msg))) Ω(compileError).Should(MatchError(ContainSubstring(invalidDesignPackage))) }) }) Context("with no go compiler in PATH", func() { var pathEnv string const invalidPath = "/foobar" BeforeEach(func() { genfunc = "foo.Generate" pathEnv = os.Getenv("PATH") os.Setenv("PATH", invalidPath) }) AfterEach(func() { os.Setenv("PATH", pathEnv) }) It("fails with a useful error message", func() { Ω(compileError).Should(MatchError(`failed to find a go compiler, looked in "` + os.Getenv("PATH") + `"`)) }) }) Context("with no output directory specified", func() { BeforeEach(func() { genfunc = "foo.Generate" outputDir = "" }) It("fails with a useful error message", func() { Ω(compileError).Should(MatchError("missing output directory specification")) }) }) Context("with no design package path specified", func() { BeforeEach(func() { genfunc = "foo.Generate" outputDir = "" }) It("fails with a useful error message", func() { Ω(compileError).Should(MatchError("missing output directory specification")) }) }) Context("with no design package path specified", func() { BeforeEach(func() { genfunc = "foo.Generate" codegen.DesignPackagePath = "" }) It("fails with a useful error message", func() { Ω(compileError).Should(MatchError("missing design package path specification")) }) }) Context("with design package content", func() { BeforeEach(func() { genfunc = "foo.Generate" outputDir = os.TempDir() }) Context("that is not valid Go code", func() { BeforeEach(func() { designPackageSource = invalidSource }) It("fails with a useful error message", func() { Ω(compileError.Error()).Should(ContainSubstring("syntax error")) }) }) Context("whose code blows up", func() { BeforeEach(func() { designPackageSource = panickySource }) It("fails with a useful error message", func() { Ω(compileError.Error()).Should(ContainSubstring("panic: kaboom")) }) }) Context("with valid code", func() { BeforeEach(func() { designPackageSource = validSource }) It("successfully runs", func() { Ω(compileError).ShouldNot(HaveOccurred()) }) Context("with a comma separated list of path in GOPATH", func() { var gopath string BeforeEach(func() { gopath = os.Getenv("GOPATH") os.Setenv("GOPATH", fmt.Sprintf("%s%c%s", gopath, os.PathListSeparator, os.TempDir())) }) AfterEach(func() { os.Setenv("GOPATH", gopath) }) It("successfull runs", func() { Ω(compileError).ShouldNot(HaveOccurred()) }) }) }) Context("with code that returns generated file paths", func() { var filePaths = []string{"foo", "bar"} BeforeEach(func() { var b bytes.Buffer tmpl, err := template.New("source").Parse(validSourceTmpl) Ω(err).ShouldNot(HaveOccurred()) err = tmpl.Execute(&b, filePaths) Ω(err).ShouldNot(HaveOccurred()) designPackageSource = b.String() }) It("returns the paths", func() { Ω(compileError).ShouldNot(HaveOccurred()) Ω(compiledFiles).Should(Equal(filePaths)) }) }) }) }) const ( invalidSource = `package foo invalid go code ` panickySource = `package foo import . "github.com/goadesign/goa/design" var Metadata *APIDefinition func Generate(api *APIDefinition) ([]string, error) { return nil, nil } func init() { panic("kaboom") } ` validSource = `package foo import . "github.com/goadesign/goa/design" var Metadata *APIDefinition func Generate(api *APIDefinition) ([]string, error) { return nil, nil } ` validSourceTmpl = `package foo import "fmt" import . "github.com/goadesign/goa/design" var Metadata *APIDefinition func Generate(api *APIDefinition) ([]string, error) { {{range .}}fmt.Println("{{.}}") {{end}} return nil, nil } ` ) Fix test for Windows package meta_test import ( "bytes" "fmt" "html/template" "io/ioutil" "os" "path/filepath" "github.com/goadesign/goa/goagen/codegen" "github.com/goadesign/goa/goagen/meta" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("Run", func() { var compiledFiles []string var compileError error var outputWorkspace *codegen.Workspace var designWorkspace *codegen.Workspace var genfunc string var debug bool var outputDir string var designPackage *codegen.Package var designPackagePath string var designPackageSource string var m *meta.Generator BeforeEach(func() { genfunc = "" debug = false designPackagePath = "design" designPackageSource = "package design" codegen.DesignPackagePath = designPackagePath var err error outputWorkspace, err = codegen.NewWorkspace("output") p, err := outputWorkspace.NewPackage("testOutput") Ω(err).ShouldNot(HaveOccurred()) outputDir = p.Abs() designWorkspace, err = codegen.NewWorkspace("test") Ω(err).ShouldNot(HaveOccurred()) compiledFiles = nil compileError = nil }) JustBeforeEach(func() { if designPackagePath != "" { designPackage, err := designWorkspace.NewPackage(designPackagePath) Ω(err).ShouldNot(HaveOccurred()) if designPackageSource != "" { file := designPackage.CreateSourceFile("design.go") err = ioutil.WriteFile(file.Abs(), []byte(designPackageSource), 0655) Ω(err).ShouldNot(HaveOccurred()) } } else { designPackage = nil } m = &meta.Generator{ Genfunc: genfunc, Imports: []*codegen.ImportSpec{codegen.SimpleImport(designPackagePath)}, } codegen.Debug = debug codegen.OutputDir = outputDir compiledFiles, compileError = m.Generate() }) AfterEach(func() { designWorkspace.Delete() outputWorkspace.Delete() }) Context("with no GOPATH environment variable", func() { var gopath string BeforeEach(func() { gopath = os.Getenv("GOPATH") os.Setenv("GOPATH", "") }) AfterEach(func() { os.Setenv("GOPATH", gopath) }) It("fails with a useful error message", func() { Ω(compileError).Should(MatchError("GOPATH not set")) }) }) Context("with an invalid GOPATH environment variable", func() { var gopath string const invalidPath = "DOES NOT EXIST" BeforeEach(func() { gopath = os.Getenv("GOPATH") os.Setenv("GOPATH", invalidPath) }) AfterEach(func() { os.Setenv("GOPATH", gopath) }) It("fails with a useful error message", func() { path := fmt.Sprintf("%s", filepath.Join(invalidPath, "src", filepath.FromSlash(designPackagePath))) msg := fmt.Sprintf(`%s does not contain a Go package`, path) Ω(compileError.Error()).Should(Equal(msg)) }) }) Context("with an invalid design package path", func() { const invalidDesignPackage = "foobar" BeforeEach(func() { codegen.DesignPackagePath = invalidDesignPackage }) It("fails with a useful error message", func() { msg := `do not contain a Go package` Ω(compileError).Should(MatchError(HaveSuffix(msg))) Ω(compileError).Should(MatchError(ContainSubstring(invalidDesignPackage))) }) }) Context("with no go compiler in PATH", func() { var pathEnv string const invalidPath = "/foobar" BeforeEach(func() { genfunc = "foo.Generate" pathEnv = os.Getenv("PATH") os.Setenv("PATH", invalidPath) }) AfterEach(func() { os.Setenv("PATH", pathEnv) }) It("fails with a useful error message", func() { Ω(compileError).Should(MatchError(`failed to find a go compiler, looked in "` + os.Getenv("PATH") + `"`)) }) }) Context("with no output directory specified", func() { BeforeEach(func() { genfunc = "foo.Generate" outputDir = "" }) It("fails with a useful error message", func() { Ω(compileError).Should(MatchError("missing output directory specification")) }) }) Context("with no design package path specified", func() { BeforeEach(func() { genfunc = "foo.Generate" outputDir = "" }) It("fails with a useful error message", func() { Ω(compileError).Should(MatchError("missing output directory specification")) }) }) Context("with no design package path specified", func() { BeforeEach(func() { genfunc = "foo.Generate" codegen.DesignPackagePath = "" }) It("fails with a useful error message", func() { Ω(compileError).Should(MatchError("missing design package path specification")) }) }) Context("with design package content", func() { BeforeEach(func() { genfunc = "foo.Generate" outputDir = os.TempDir() }) Context("that is not valid Go code", func() { BeforeEach(func() { designPackageSource = invalidSource }) It("fails with a useful error message", func() { Ω(compileError.Error()).Should(ContainSubstring("syntax error")) }) }) Context("whose code blows up", func() { BeforeEach(func() { designPackageSource = panickySource }) It("fails with a useful error message", func() { Ω(compileError.Error()).Should(ContainSubstring("panic: kaboom")) }) }) Context("with valid code", func() { BeforeEach(func() { designPackageSource = validSource }) It("successfully runs", func() { Ω(compileError).ShouldNot(HaveOccurred()) }) Context("with a comma separated list of path in GOPATH", func() { var gopath string BeforeEach(func() { gopath = os.Getenv("GOPATH") os.Setenv("GOPATH", fmt.Sprintf("%s%c%s", gopath, os.PathListSeparator, os.TempDir())) }) AfterEach(func() { os.Setenv("GOPATH", gopath) }) It("successfull runs", func() { Ω(compileError).ShouldNot(HaveOccurred()) }) }) }) Context("with code that returns generated file paths", func() { var filePaths = []string{"foo", "bar"} BeforeEach(func() { var b bytes.Buffer tmpl, err := template.New("source").Parse(validSourceTmpl) Ω(err).ShouldNot(HaveOccurred()) err = tmpl.Execute(&b, filePaths) Ω(err).ShouldNot(HaveOccurred()) designPackageSource = b.String() }) It("returns the paths", func() { Ω(compileError).ShouldNot(HaveOccurred()) Ω(compiledFiles).Should(Equal(filePaths)) }) }) }) }) const ( invalidSource = `package foo invalid go code ` panickySource = `package foo import . "github.com/goadesign/goa/design" var Metadata *APIDefinition func Generate(api *APIDefinition) ([]string, error) { return nil, nil } func init() { panic("kaboom") } ` validSource = `package foo import . "github.com/goadesign/goa/design" var Metadata *APIDefinition func Generate(api *APIDefinition) ([]string, error) { return nil, nil } ` validSourceTmpl = `package foo import "fmt" import . "github.com/goadesign/goa/design" var Metadata *APIDefinition func Generate(api *APIDefinition) ([]string, error) { {{range .}}fmt.Println("{{.}}") {{end}} return nil, nil } ` )
// Copyright 2016, RadiantBlue Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package piazza import ( "testing" "github.com/stretchr/testify/assert" ) //-------------------------- func Test06VcapServices(t *testing.T) { assert := assert.New(t) unsetenvT(t, "VCAP_SERVICES") vcap, err := NewVcapServices() assert.NoError(err) assert.EqualValues("localhost:9092", vcap.Services["pz-kafka"]) assert.EqualValues("pz-kafka", vcap.UserProvided[1].Name) env := `{ "user-provided": [ { "credentials": { "host": "172.32.125.109:9200" }, "label": "user-provided", "name": "pz-elasticsearch", "syslog_drain_url": "", "tags": [] } ] }` setenvT(t, "VCAP_SERVICES", env) defer unsetenvT(t, "VCAP_SERVICES") vcap, err = NewVcapServices() assert.NoError(err) assert.EqualValues("172.32.125.109:9200", vcap.Services["pz-elasticsearch"]) assert.EqualValues("pz-elasticsearch", vcap.UserProvided[0].Name) } Fix vcap test // Copyright 2016, RadiantBlue Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package piazza import ( "testing" "github.com/stretchr/testify/assert" ) //-------------------------- func Test06VcapServices(t *testing.T) { assert := assert.New(t) unsetenvT(t, "VCAP_SERVICES") vcap, err := NewVcapServices() assert.NoError(err) assert.EqualValues("http://localhost:9092", vcap.Services["pz-kafka"]) assert.EqualValues("pz-kafka", vcap.UserProvided[1].Name) env := `{ "user-provided": [ { "credentials": { "host": "172.32.125.109:9200" }, "label": "user-provided", "name": "pz-elasticsearch", "syslog_drain_url": "", "tags": [] } ] }` setenvT(t, "VCAP_SERVICES", env) defer unsetenvT(t, "VCAP_SERVICES") vcap, err = NewVcapServices() assert.NoError(err) assert.EqualValues("172.32.125.109:9200", vcap.Services["pz-elasticsearch"]) assert.EqualValues("pz-elasticsearch", vcap.UserProvided[0].Name) }
package shell import ( "bufio" "fmt" "io" "os" "strings" ) type editor interface { ReadLine() (string, error) Close() } type minEditor struct { in *bufio.Reader out io.Writer } func newMinEditor(in, out *os.File) *minEditor { return &minEditor{bufio.NewReader(in), out} } func (ed *minEditor) ReadLine() (string, error) { wd, err := os.Getwd() if err != nil { wd = "?" } fmt.Fprintf(ed.out, "%s> ", wd) line, err := ed.in.ReadString('\n') // Chop off the trailing \r on Windows. line = strings.TrimRight(line, "\r") return line, err return ed.in.ReadString('\n') } func (editor *minEditor) Close() { } shell: In minEditor, chop off \n as well. package shell import ( "bufio" "fmt" "io" "os" "strings" ) type editor interface { ReadLine() (string, error) Close() } type minEditor struct { in *bufio.Reader out io.Writer } func newMinEditor(in, out *os.File) *minEditor { return &minEditor{bufio.NewReader(in), out} } func (ed *minEditor) ReadLine() (string, error) { wd, err := os.Getwd() if err != nil { wd = "?" } fmt.Fprintf(ed.out, "%s> ", wd) line, err := ed.in.ReadString('\n') // Chop off the trailing \r on Windows. line = strings.TrimRight(line, "\r\n") return line, err return ed.in.ReadString('\n') } func (editor *minEditor) Close() { }
package main import ( "archive/tar" "archive/zip" "compress/gzip" "encoding/json" "encoding/xml" "fmt" "io" "io/ioutil" "log" "net/http" "os" "path/filepath" "strings" "utilities.qs/registry_windows" "github.com/codegangsta/cli" "github.com/jmoiron/jsonq" ) type Driver struct { Name string `xml:"Name,attr"` } func overwrite(mpath string) (*os.File, error) { f, err := os.OpenFile(mpath, os.O_RDWR|os.O_TRUNC, 0777) if err != nil { f, err = os.Create(mpath) if err != nil { return f, err } } return f, nil } func read(mpath string) (*os.File, error) { f, err := os.OpenFile(mpath, os.O_RDONLY, 0444) if err != nil { return f, err } return f, nil } func getTemplateInfoFile() (link string, err error) { response, err := http.Get("https://api.github.com/repos/QualiSystems/shell-templates/contents/templates.yaml") if err != nil { return "", err } defer response.Body.Close() resp := make(map[string]interface{}) body, _ := ioutil.ReadAll(response.Body) err = json.Unmarshal(body, &resp) if err != nil { return "", err } jq := jsonq.NewQuery(resp) download_url, err := jq.String("download_url") if err != nil { return "", err } return download_url, nil } func untarIt(mpath string, basepath string, root string) { fr, err := read(mpath) //fmt.Printf("reading %s\n", mpath) defer fr.Close() if err != nil { panic(err) } gr, err := gzip.NewReader(fr) defer gr.Close() if err != nil { panic(err) } tr := tar.NewReader(gr) var index = 0 var topDir = "" var firstDir = true for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { panic(err) } originalPath := hdr.Name //fmt.Printf("current fie %s \n", originalPath) path := filepath.Join(basepath, strings.Replace(hdr.Name, topDir, "", -1)) switch hdr.Typeflag { case tar.TypeDir: if firstDir == true { if filepath.Base(originalPath) == root { topDir = originalPath firstDir = false //fmt.Printf("Setting the top folder %s \n", topDir) } } else if strings.HasPrefix(originalPath, topDir) { if err := os.MkdirAll(path, os.FileMode(hdr.Mode)); err != nil { panic(err) } //fmt.Printf("creating dir %s \n", path) } case tar.TypeReg: if firstDir == false && strings.HasPrefix(originalPath, topDir) { ow, err := overwrite(path) defer ow.Close() if err != nil { panic(err) } if _, err := io.Copy(ow, tr); err != nil { panic(err) } //fmt.Printf("writing %s \n", path) } default: //fmt.Printf("Can't: %c, %s\n", hdr.Typeflag, path) } index++ } } func zipIt(source, target string, excludedExtensions []string) error { zipfile, err := os.Create(target) if err != nil { return err } defer zipfile.Close() archive := zip.NewWriter(zipfile) defer archive.Close() filepath.Walk(source, func(path string, info os.FileInfo, err error) error { if err != nil { return err } /* FileInfoHeader creates a partially-populated FileHeader from an os.FileInfo. Because os.FileInfo's Name method returns only the base name of the file it describes, it may be necessary to modify the Name field of the returned header to provide the full path name of the file. */ header, err := zip.FileInfoHeader(info) if err != nil { return err } if info.IsDir() && path == source { return nil } filename := strings.TrimPrefix(path, source) filename = strings.TrimPrefix(filename, "/") // trims parent part of the path, for example: strings.TrimPrefix("template/file.txt", "template/") -> "file.txt" for index := 0; index < len(excludedExtensions); index++ { if strings.HasSuffix(strings.ToLower(filename), strings.ToLower(excludedExtensions[index])) { return nil } } if filename != "" { header.Name = filename } if info.IsDir() { //fmt.Println("dir: " + info.Name()) header.Name += "/" } else { header.Method = zip.Deflate } if strings.HasPrefix(header.Name, "\\") { header.Name = strings.Replace(header.Name, "\\", "", 1) } header.Name = strings.Replace(header.Name, "\\", "/", -1) fmt.Println("zipping: " + header.Name) /** CreateHeader adds a file to the zip file using the provided FileHeader for the file metadata. It returns a Writer to which the file contents should be written. The file's contents must be written to the io.Writer before the next call to Create, CreateHeader, or Close. The provided FileHeader fh must not be modified after a call to CreateHeader. */ //fmt.Println("header: " + header.Name) writer, err := archive.CreateHeader(header) if err != nil { return err } if info.IsDir() { return nil } //fmt.Println(path) file, err := os.Open(path) if err != nil { return err } defer file.Close() /** Copy copies from src to dst until either EOF is reached on src or an error occurs. It returns the number of bytes copied and the first error encountered while copying, if any. A successful Copy returns err == nil, not err == EOF. Because Copy is defined to read from src until EOF, it does not treat an EOF from Read as an error to be reported. If src implements the WriterTo interface, the copy is implemented by calling src.WriteTo(dst). Otherwise, if dst implements the ReaderFrom interface, the copy is implemented by calling dst.ReadFrom(src). */ _, err = io.Copy(writer /*dst*/, file /*src*/) return err }) return err } func downloadFromURL(url string) (filename string, err error) { temp_file, err := ioutil.TempFile(os.TempDir(), "shelltemp_") //fmt.Println("Downloading", url, "to", temp_file.Name()) defer temp_file.Close() response, err := http.Get(url) if err != nil { return "", fmt.Errorf("Error while downloading", url, "-", err) } defer response.Body.Close() n, err := io.Copy(temp_file, response.Body) if err != nil || n == 0 { return "", fmt.Errorf("Error while downloading", url, "-", err) } return temp_file.Name(), nil } // Copies file source to destination dest. func CopyFile(source string, dest string) (err error) { sf, err := os.Open(source) if err != nil { return err } defer sf.Close() df, err := os.Create(dest) if err != nil { return err } defer df.Close() _, err = io.Copy(df, sf) if err == nil { si, err := os.Stat(source) if err != nil { err = os.Chmod(dest, si.Mode()) } } return } func CreateDirIfNotExists(path string) (err error) { // create dest dir if needed _, err = os.Open(path) if os.IsNotExist(err) { err = os.MkdirAll(path, 0777) if err != nil { return err } } return nil } // Recursively copies a directory tree, attempting to preserve permissions. // Source directory must exist, destination directory must *not* exist. func CopyDir(source string, dest string) (err error) { // get properties of source dir fi, err := os.Stat(source) if err != nil { return err } if !fi.IsDir() { return &CustomError{"Source is not a directory"} } // create dest dir if needed err = CreateDirIfNotExists(dest) if err != nil { return err } // ensure dest dir does not already exist entries, err := ioutil.ReadDir(source) for _, entry := range entries { sfp := source + "/" + entry.Name() dfp := dest + "/" + entry.Name() if entry.IsDir() { err = CopyDir(sfp, dfp) if err != nil { log.Println(err) } } else { // perform copy err = CopyFile(sfp, dfp) if err != nil { log.Println(err) } } } return } // A struct for returning custom error messages type CustomError struct { What string } // Returns the error message defined in What as a string func (e *CustomError) Error() string { return e.What } func Copy(dst, src string) error { in, err := os.Open(src) if err != nil { return err } defer in.Close() out, err := os.Create(dst) if err != nil { return err } defer out.Close() _, err = io.Copy(out, in) cerr := out.Close() if err != nil { return err } //fmt.Println("Copied DST:" + dst) return cerr } func parseXML(filename string) string { xmlFile, err := os.Open(filename) if err != nil { fmt.Println("Error opening file:", err) return "" } defer xmlFile.Close() b, _ := ioutil.ReadAll(xmlFile) var q Driver xml.Unmarshal(b, &q) return q.Name } func printFile(file string) { input, err := ioutil.ReadFile(file) if err != nil { log.Fatalln(err) } lines := strings.Split(string(input), "\n") for _, line := range lines { fmt.Println(line) } output := strings.Join(lines, "\n") err = ioutil.WriteFile(file, []byte(output), 0644) if err != nil { log.Fatalln(err) } } func replaceString(file string, match string, replace string) { input, err := ioutil.ReadFile(file) if err != nil { log.Fatalln(err) } lines := strings.Split(string(input), "\n") for i, line := range lines { if strings.Contains(line, match) { lines[i] = strings.Replace(line, match, replace, -1) } } output := strings.Join(lines, "\n") err = ioutil.WriteFile(file, []byte(output), 0644) if err != nil { log.Fatalln(err) } } func downloadTemplate(template string) (directory string, err error) { url := "https://api.github.com/repos/QualiSystems/shell-templates/tarball/" zipfile, err := downloadFromURL(url) if err != nil { fmt.Println("Error while downloading", url, "-", err) return "", err } packageTempDir := filepath.Join(os.TempDir(), "spool_"+filepath.Base(zipfile)) os.MkdirAll(packageTempDir, 0777) untarIt(zipfile, packageTempDir, template) return packageTempDir, nil } func main() { app := cli.NewApp() app.Version = "0.0.1" app.Name = "shellfoundry" app.Usage = "CloudShell package CLI build tool. Use 'shellfoundry help' for more options" app.Commands = []cli.Command{ { Name: "create", Aliases: []string{"c"}, Usage: "shellfoundry create [Name]", Description: "Creates a new shell project. You can select a predefined template.", UsageText: "blah", ArgsUsage: "[Name] we recommend the name for the shells follows the CloudShell convention of [Vendor OS Type] for example: 'Acme AOS Switch' ", Flags: []cli.Flag{ cli.StringFlag{ Name: "template, t", Value: "base", Usage: "Select the shell template to use", }, cli.StringFlag{ Name: "cloudshell_version, cv", Value: "latest", Usage: "The minimal CloudShell version this shell should be compatible with", }, }, Action: func(c *cli.Context) { template := c.String("template") if len(template) == 0 { template = "base" } packageTempDir, err := downloadTemplate(template) if err != nil { fmt.Println("Error while downloading template: " + err.Error()) return } if len(c.Args()) < 0 { fmt.Println("Usage: shellfoundry create [shellname] ") return } directory := c.Args()[0] dataModelDir := filepath.Join(directory, "datamodel") distDir := filepath.Join(directory, "dist") srcDir := filepath.Join(directory, "src") scriptsDir := filepath.Join(directory, "scripts") err = CreateDirIfNotExists(dataModelDir) if err != nil { fmt.Println("Error while copying template: " + err.Error()) } err = CopyDir(packageTempDir, directory) if err != nil { fmt.Println("Error while copying template: " + err.Error()) } templateName := template + "Shell" err = CreateDirIfNotExists(scriptsDir) if err != nil { fmt.Println("Error while copying template: " + err.Error()) } err = CreateDirIfNotExists(distDir) if err != nil { fmt.Println("Error while copying template: " + err.Error()) } os.Rename(filepath.Join(srcDir, strings.ToLower(templateName)+"driver.py"), filepath.Join(srcDir, directory+"driver.py")) replaceString(filepath.Join(srcDir, "drivermetadata.xml"), fmt.Sprintf("<Driver Name=\"%s Driver\"", strings.Title(templateName)), fmt.Sprintf("<Driver Name=\"%s\"", strings.Title(directory+" driver"))) replaceString(filepath.Join(srcDir, "drivermetadata.xml"), fmt.Sprintf("MainClass=\"%sdriver.%sDriver\"", strings.ToLower(templateName), strings.Title(templateName)), fmt.Sprintf("MainClass=\"%s\"", directory+"driver."+strings.Title(directory+"Driver"))) replaceString(filepath.Join(srcDir, directory+"driver.py"), fmt.Sprintf("class %sDriver", strings.Title(templateName)), fmt.Sprintf("class %s", strings.Title(directory+"Driver"))) replaceString(filepath.Join(dataModelDir, "datamodel.xml"), fmt.Sprintf("<ResourceModel Name=\"%s\"", strings.Title(templateName)), fmt.Sprintf("<ResourceModel Name=\"%s\"", strings.Title(directory))) replaceString(filepath.Join(dataModelDir, "datamodel.xml"), fmt.Sprintf("<DriverDescriptor Name=\"%s\"", strings.Title(templateName+" driver")), fmt.Sprintf("<DriverDescriptor Name=\"%s\"", strings.Title(directory+" driver"))) replaceString(filepath.Join(dataModelDir, "datamodel.xml"), fmt.Sprintf("<DriverName>%s</DriverName>", strings.Title(templateName+" Driver")), fmt.Sprintf("<DriverName>%s</DriverName>", strings.Title(directory+" Driver"))) replaceString(filepath.Join(dataModelDir, "shellconfig.xml"), fmt.Sprintf("<ResourceTemplate Name=\"%s\" Model=\"%s\" Driver=\"%s\">", strings.Title(templateName), strings.Title(templateName), strings.Title(templateName+" driver")), fmt.Sprintf("<ResourceTemplate Name=\"%s\" Model=\"%s\" Driver=\"%s\">", strings.Title(directory), strings.Title(directory), strings.Title(directory+" driver"))) err = os.RemoveAll(packageTempDir) if err != nil { fmt.Println("Error deleting temp files: " + err.Error()) return } }, }, { Name: "package", Aliases: []string{"p"}, Usage: "shellfoundry package", UsageText: "blah", Action: func(c *cli.Context) { driverName := parseXML(filepath.Join("src", "drivermetadata.xml")) driverPath := filepath.Join(os.TempDir(), driverName) + ".zip" excludedExt := []string{".ds_store", ".gitignore"} errr := zipIt("src", driverPath, excludedExt) if errr != nil { fmt.Println("Error while packaging driver: " + errr.Error()) return } template := "package_template" packageTempDir, err := downloadTemplate(template) if err != nil { fmt.Println("Error while downloading template: " + err.Error()) return } err = Copy(filepath.Join(packageTempDir, "Resource Drivers - Python", filepath.Base(driverPath)), driverPath) if err != nil { fmt.Println("Error while copying driver " + err.Error()) return } err = Copy(filepath.Join(packageTempDir, "DataModel", "datamodel.xml"), filepath.Join("DataModel", "datamodel.xml")) if err != nil { fmt.Println("Error while copying data model: " + err.Error()) return } err = Copy(filepath.Join(packageTempDir, "Configuration", "shellconfig.xml"), filepath.Join("DataModel", "shellconfig.xml")) if err != nil { fmt.Println("Error while copying configuration: " + err.Error()) return } currentDir, err := os.Getwd() if err != nil { fmt.Println("Error while getting current dir: " + err.Error()) return } //fmt.Println(packageTempDir) zipIt(packageTempDir, filepath.Join("dist", filepath.Base(currentDir)+".zip"), excludedExt) if err != nil { fmt.Println("Error creating package: " + err.Error()) return } err = os.RemoveAll(packageTempDir) if err != nil { fmt.Println("Error deleting temp files: " + err.Error()) return } fmt.Println("Package created successfully") // excludedExt := []string{".ds_store"} // temp_file, err := ioutil.TempFile(os.TempDir(), "shelltemp_") // if err != nil { // fmt.Println("Error while creating dir") // return // } // zipIt("src", temp_file.Name(), excludedExt) // // zipIt("template", "package.zip", excludedExt) }, }, { Name: "list", Aliases: []string{"l"}, Usage: "Gets the list of available templates", Action: func(c *cli.Context) { link, err := getTemplateInfoFile() if err != nil { fmt.Println("Error getting list of templates: " + err.Error()) return } yamlFile, error := downloadFromURL(link) if error != nil { fmt.Println("Can't download file: " + err.Error()) return } printFile(yamlFile) }, }, { Name: "publish", Aliases: []string{"pu"}, Usage: "Not yet implemented", Action: func(c *cli.Context) { }, }, { Name: "install", Aliases: []string{"i"}, Usage: "add to the local path", Action: func(c *cli.Context) { registry_windows.setPath() }, }, } app.Run(os.Args) } updated package main import ( "archive/tar" "archive/zip" "compress/gzip" "encoding/json" "encoding/xml" "fmt" "io" "io/ioutil" "log" "net/http" "os" "path/filepath" "strings" "github.com/codegangsta/cli" "github.com/jmoiron/jsonq" ) type Driver struct { Name string `xml:"Name,attr"` } func overwrite(mpath string) (*os.File, error) { f, err := os.OpenFile(mpath, os.O_RDWR|os.O_TRUNC, 0777) if err != nil { f, err = os.Create(mpath) if err != nil { return f, err } } return f, nil } func read(mpath string) (*os.File, error) { f, err := os.OpenFile(mpath, os.O_RDONLY, 0444) if err != nil { return f, err } return f, nil } func getTemplateInfoFile() (link string, err error) { response, err := http.Get("https://api.github.com/repos/QualiSystems/shell-templates/contents/templates.yaml") if err != nil { return "", err } defer response.Body.Close() resp := make(map[string]interface{}) body, _ := ioutil.ReadAll(response.Body) err = json.Unmarshal(body, &resp) if err != nil { return "", err } jq := jsonq.NewQuery(resp) download_url, err := jq.String("download_url") if err != nil { return "", err } return download_url, nil } func untarIt(mpath string, basepath string, root string) { fr, err := read(mpath) //fmt.Printf("reading %s\n", mpath) defer fr.Close() if err != nil { panic(err) } gr, err := gzip.NewReader(fr) defer gr.Close() if err != nil { panic(err) } tr := tar.NewReader(gr) var index = 0 var topDir = "" var firstDir = true for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { panic(err) } originalPath := hdr.Name //fmt.Printf("current fie %s \n", originalPath) path := filepath.Join(basepath, strings.Replace(hdr.Name, topDir, "", -1)) switch hdr.Typeflag { case tar.TypeDir: if firstDir == true { if filepath.Base(originalPath) == root { topDir = originalPath firstDir = false //fmt.Printf("Setting the top folder %s \n", topDir) } } else if strings.HasPrefix(originalPath, topDir) { if err := os.MkdirAll(path, os.FileMode(hdr.Mode)); err != nil { panic(err) } //fmt.Printf("creating dir %s \n", path) } case tar.TypeReg: if firstDir == false && strings.HasPrefix(originalPath, topDir) { ow, err := overwrite(path) defer ow.Close() if err != nil { panic(err) } if _, err := io.Copy(ow, tr); err != nil { panic(err) } //fmt.Printf("writing %s \n", path) } default: //fmt.Printf("Can't: %c, %s\n", hdr.Typeflag, path) } index++ } } func zipIt(source, target string, excludedExtensions []string) error { zipfile, err := os.Create(target) if err != nil { return err } defer zipfile.Close() archive := zip.NewWriter(zipfile) defer archive.Close() filepath.Walk(source, func(path string, info os.FileInfo, err error) error { if err != nil { return err } /* FileInfoHeader creates a partially-populated FileHeader from an os.FileInfo. Because os.FileInfo's Name method returns only the base name of the file it describes, it may be necessary to modify the Name field of the returned header to provide the full path name of the file. */ header, err := zip.FileInfoHeader(info) if err != nil { return err } if info.IsDir() && path == source { return nil } filename := strings.TrimPrefix(path, source) filename = strings.TrimPrefix(filename, "/") // trims parent part of the path, for example: strings.TrimPrefix("template/file.txt", "template/") -> "file.txt" for index := 0; index < len(excludedExtensions); index++ { if strings.HasSuffix(strings.ToLower(filename), strings.ToLower(excludedExtensions[index])) { return nil } } if filename != "" { header.Name = filename } if info.IsDir() { //fmt.Println("dir: " + info.Name()) header.Name += "/" } else { header.Method = zip.Deflate } if strings.HasPrefix(header.Name, "\\") { header.Name = strings.Replace(header.Name, "\\", "", 1) } header.Name = strings.Replace(header.Name, "\\", "/", -1) fmt.Println("zipping: " + header.Name) /** CreateHeader adds a file to the zip file using the provided FileHeader for the file metadata. It returns a Writer to which the file contents should be written. The file's contents must be written to the io.Writer before the next call to Create, CreateHeader, or Close. The provided FileHeader fh must not be modified after a call to CreateHeader. */ //fmt.Println("header: " + header.Name) writer, err := archive.CreateHeader(header) if err != nil { return err } if info.IsDir() { return nil } //fmt.Println(path) file, err := os.Open(path) if err != nil { return err } defer file.Close() /** Copy copies from src to dst until either EOF is reached on src or an error occurs. It returns the number of bytes copied and the first error encountered while copying, if any. A successful Copy returns err == nil, not err == EOF. Because Copy is defined to read from src until EOF, it does not treat an EOF from Read as an error to be reported. If src implements the WriterTo interface, the copy is implemented by calling src.WriteTo(dst). Otherwise, if dst implements the ReaderFrom interface, the copy is implemented by calling dst.ReadFrom(src). */ _, err = io.Copy(writer /*dst*/, file /*src*/) return err }) return err } func downloadFromURL(url string) (filename string, err error) { temp_file, err := ioutil.TempFile(os.TempDir(), "shelltemp_") //fmt.Println("Downloading", url, "to", temp_file.Name()) defer temp_file.Close() response, err := http.Get(url) if err != nil { return "", fmt.Errorf("Error while downloading", url, "-", err) } defer response.Body.Close() n, err := io.Copy(temp_file, response.Body) if err != nil || n == 0 { return "", fmt.Errorf("Error while downloading", url, "-", err) } return temp_file.Name(), nil } // Copies file source to destination dest. func CopyFile(source string, dest string) (err error) { sf, err := os.Open(source) if err != nil { return err } defer sf.Close() df, err := os.Create(dest) if err != nil { return err } defer df.Close() _, err = io.Copy(df, sf) if err == nil { si, err := os.Stat(source) if err != nil { err = os.Chmod(dest, si.Mode()) } } return } func CreateDirIfNotExists(path string) (err error) { // create dest dir if needed _, err = os.Open(path) if os.IsNotExist(err) { err = os.MkdirAll(path, 0777) if err != nil { return err } } return nil } // Recursively copies a directory tree, attempting to preserve permissions. // Source directory must exist, destination directory must *not* exist. func CopyDir(source string, dest string) (err error) { // get properties of source dir fi, err := os.Stat(source) if err != nil { return err } if !fi.IsDir() { return &CustomError{"Source is not a directory"} } // create dest dir if needed err = CreateDirIfNotExists(dest) if err != nil { return err } // ensure dest dir does not already exist entries, err := ioutil.ReadDir(source) for _, entry := range entries { sfp := source + "/" + entry.Name() dfp := dest + "/" + entry.Name() if entry.IsDir() { err = CopyDir(sfp, dfp) if err != nil { log.Println(err) } } else { // perform copy err = CopyFile(sfp, dfp) if err != nil { log.Println(err) } } } return } // A struct for returning custom error messages type CustomError struct { What string } // Returns the error message defined in What as a string func (e *CustomError) Error() string { return e.What } func Copy(dst, src string) error { in, err := os.Open(src) if err != nil { return err } defer in.Close() out, err := os.Create(dst) if err != nil { return err } defer out.Close() _, err = io.Copy(out, in) cerr := out.Close() if err != nil { return err } //fmt.Println("Copied DST:" + dst) return cerr } func parseXML(filename string) string { xmlFile, err := os.Open(filename) if err != nil { fmt.Println("Error opening file:", err) return "" } defer xmlFile.Close() b, _ := ioutil.ReadAll(xmlFile) var q Driver xml.Unmarshal(b, &q) return q.Name } func printFile(file string) { input, err := ioutil.ReadFile(file) if err != nil { log.Fatalln(err) } lines := strings.Split(string(input), "\n") for _, line := range lines { fmt.Println(line) } output := strings.Join(lines, "\n") err = ioutil.WriteFile(file, []byte(output), 0644) if err != nil { log.Fatalln(err) } } func replaceString(file string, match string, replace string) { input, err := ioutil.ReadFile(file) if err != nil { log.Fatalln(err) } lines := strings.Split(string(input), "\n") for i, line := range lines { if strings.Contains(line, match) { lines[i] = strings.Replace(line, match, replace, -1) } } output := strings.Join(lines, "\n") err = ioutil.WriteFile(file, []byte(output), 0644) if err != nil { log.Fatalln(err) } } func downloadTemplate(template string) (directory string, err error) { url := "https://api.github.com/repos/QualiSystems/shell-templates/tarball/" zipfile, err := downloadFromURL(url) if err != nil { fmt.Println("Error while downloading", url, "-", err) return "", err } packageTempDir := filepath.Join(os.TempDir(), "spool_"+filepath.Base(zipfile)) os.MkdirAll(packageTempDir, 0777) untarIt(zipfile, packageTempDir, template) return packageTempDir, nil } func main() { app := cli.NewApp() app.Version = "0.0.1" app.Name = "shellfoundry" app.Usage = "CloudShell package CLI build tool. Use 'shellfoundry help' for more options" app.Commands = []cli.Command{ { Name: "create", Aliases: []string{"c"}, Usage: "shellfoundry create [Name]", Description: "Creates a new shell project. You can select a predefined template.", UsageText: "blah", ArgsUsage: "[Name] we recommend the name for the shells follows the CloudShell convention of [Vendor OS Type] for example: 'Acme AOS Switch' ", Flags: []cli.Flag{ cli.StringFlag{ Name: "template, t", Value: "base", Usage: "Select the shell template to use", }, cli.StringFlag{ Name: "cloudshell_version, cv", Value: "latest", Usage: "The minimal CloudShell version this shell should be compatible with", }, }, Action: func(c *cli.Context) { template := c.String("template") if len(template) == 0 { template = "base" } packageTempDir, err := downloadTemplate(template) if err != nil { fmt.Println("Error while downloading template: " + err.Error()) return } if len(c.Args()) < 0 { fmt.Println("Usage: shellfoundry create [shellname] ") return } directory := c.Args()[0] dataModelDir := filepath.Join(directory, "datamodel") distDir := filepath.Join(directory, "dist") srcDir := filepath.Join(directory, "src") scriptsDir := filepath.Join(directory, "scripts") err = CreateDirIfNotExists(dataModelDir) if err != nil { fmt.Println("Error while copying template: " + err.Error()) } err = CopyDir(packageTempDir, directory) if err != nil { fmt.Println("Error while copying template: " + err.Error()) } templateName := template + "Shell" err = CreateDirIfNotExists(scriptsDir) if err != nil { fmt.Println("Error while copying template: " + err.Error()) } err = CreateDirIfNotExists(distDir) if err != nil { fmt.Println("Error while copying template: " + err.Error()) } os.Rename(filepath.Join(srcDir, strings.ToLower(templateName)+"driver.py"), filepath.Join(srcDir, directory+"driver.py")) replaceString(filepath.Join(srcDir, "drivermetadata.xml"), fmt.Sprintf("<Driver Name=\"%s Driver\"", strings.Title(templateName)), fmt.Sprintf("<Driver Name=\"%s\"", strings.Title(directory+" driver"))) replaceString(filepath.Join(srcDir, "drivermetadata.xml"), fmt.Sprintf("MainClass=\"%sdriver.%sDriver\"", strings.ToLower(templateName), strings.Title(templateName)), fmt.Sprintf("MainClass=\"%s\"", directory+"driver."+strings.Title(directory+"Driver"))) replaceString(filepath.Join(srcDir, directory+"driver.py"), fmt.Sprintf("class %sDriver", strings.Title(templateName)), fmt.Sprintf("class %s", strings.Title(directory+"Driver"))) replaceString(filepath.Join(dataModelDir, "datamodel.xml"), fmt.Sprintf("<ResourceModel Name=\"%s\"", strings.Title(templateName)), fmt.Sprintf("<ResourceModel Name=\"%s\"", strings.Title(directory))) replaceString(filepath.Join(dataModelDir, "datamodel.xml"), fmt.Sprintf("<DriverDescriptor Name=\"%s\"", strings.Title(templateName+" driver")), fmt.Sprintf("<DriverDescriptor Name=\"%s\"", strings.Title(directory+" driver"))) replaceString(filepath.Join(dataModelDir, "datamodel.xml"), fmt.Sprintf("<DriverName>%s</DriverName>", strings.Title(templateName+" Driver")), fmt.Sprintf("<DriverName>%s</DriverName>", strings.Title(directory+" Driver"))) replaceString(filepath.Join(dataModelDir, "datamodel.xml"), fmt.Sprintf("<ParentModelName>%s</ParentModelName>", strings.Title(templateName)), fmt.Sprintf("<ParentModelName>%s</ParentModelName>", strings.Title(directory))) replaceString(filepath.Join(dataModelDir, "shellconfig.xml"), fmt.Sprintf("<ResourceTemplate Name=\"%s\" Model=\"%s\" Driver=\"%s\">", strings.Title(templateName), strings.Title(templateName), strings.Title(templateName+" driver")), fmt.Sprintf("<ResourceTemplate Name=\"%s\" Model=\"%s\" Driver=\"%s\">", strings.Title(directory), strings.Title(directory), strings.Title(directory+" driver"))) err = os.RemoveAll(packageTempDir) if err != nil { fmt.Println("Error deleting temp files: " + err.Error()) return } }, }, { Name: "package", Aliases: []string{"p"}, Usage: "shellfoundry package", UsageText: "blah", Action: func(c *cli.Context) { driverName := parseXML(filepath.Join("src", "drivermetadata.xml")) driverPath := filepath.Join(os.TempDir(), driverName) + ".zip" excludedExt := []string{".ds_store", ".gitignore"} errr := zipIt("src", driverPath, excludedExt) if errr != nil { fmt.Println("Error while packaging driver: " + errr.Error()) return } template := "package_template" packageTempDir, err := downloadTemplate(template) if err != nil { fmt.Println("Error while downloading template: " + err.Error()) return } err = Copy(filepath.Join(packageTempDir, "Resource Drivers - Python", filepath.Base(driverPath)), driverPath) if err != nil { fmt.Println("Error while copying driver " + err.Error()) return } err = Copy(filepath.Join(packageTempDir, "DataModel", "datamodel.xml"), filepath.Join("DataModel", "datamodel.xml")) if err != nil { fmt.Println("Error while copying data model: " + err.Error()) return } err = Copy(filepath.Join(packageTempDir, "Configuration", "shellconfig.xml"), filepath.Join("DataModel", "shellconfig.xml")) if err != nil { fmt.Println("Error while copying configuration: " + err.Error()) return } currentDir, err := os.Getwd() if err != nil { fmt.Println("Error while getting current dir: " + err.Error()) return } //fmt.Println(packageTempDir) zipIt(packageTempDir, filepath.Join("dist", filepath.Base(currentDir)+".zip"), excludedExt) if err != nil { fmt.Println("Error creating package: " + err.Error()) return } err = os.RemoveAll(packageTempDir) if err != nil { fmt.Println("Error deleting temp files: " + err.Error()) return } fmt.Println("Package created successfully") // excludedExt := []string{".ds_store"} // temp_file, err := ioutil.TempFile(os.TempDir(), "shelltemp_") // if err != nil { // fmt.Println("Error while creating dir") // return // } // zipIt("src", temp_file.Name(), excludedExt) // // zipIt("template", "package.zip", excludedExt) }, }, { Name: "list", Aliases: []string{"l"}, Usage: "Gets the list of available templates", Action: func(c *cli.Context) { link, err := getTemplateInfoFile() if err != nil { fmt.Println("Error getting list of templates: " + err.Error()) return } yamlFile, error := downloadFromURL(link) if error != nil { fmt.Println("Can't download file: " + err.Error()) return } printFile(yamlFile) }, }, { Name: "publish", Aliases: []string{"pu"}, Usage: "Not yet implemented", Action: func(c *cli.Context) { }, }, } app.Run(os.Args) }
package jwthelper import ( "errors" "fmt" "io/ioutil" "net/http" "sync" "github.com/dgrijalva/jwt-go" "github.com/northbright/pathhelper" ) // Key struct consists of algorithm, signning key and verifying key. type Key struct { Method jwt.SigningMethod // jwt.SigningMethod SignKey interface{} // Signing key. HMAC: []byte, RSA / RSAPSS: *crypto/rsa.PrivateKey, ECDSA: *crypto/ecdsa.PrivateKey. VerifyKey interface{} // Verifying key. HMAC: []byte, RSA / RSAPSS: *crypto/rsa.PublicKey, ECDSA: *crypto/ecdsa.PublicKey. } // KeyManger manages the keys by using kid(key id). type KeyManager struct { Keys map[string]*Key // Key map. Key: kid(key id), Value: Key Struct sync.RWMutex // Access map concurrently. } const ( AvailableAlgs string = "Available algs: HS256,HS384,HS512,RS256,RS384,RS512,PS256,PS384,PS512,ES256,ES384,ES512" ) var ( km KeyManager = KeyManager{Keys: make(map[string]*Key)} // Internal key manager. ) // ReadKey() reads key bytes from the key file. func ReadKey(keyFile string) (key []byte, err error) { // Make Abs key file path with current executable path if KeyFilePath is relative. p := "" if p, err = pathhelper.GetAbsPath(keyFile); err != nil { return nil, err } buf := []byte{} if buf, err = ioutil.ReadFile(p); err != nil { return nil, err } return buf, nil } // SetKey() sets the kid - Key pair. // // Params: // kid: Key id. It should be unique. // key: Key struct. func SetKey(kid string, key *Key) { km.Lock() km.Keys[kid] = key km.Unlock() } // GetKey() return the key struct by given kid. func GetKey(kid string) (k *Key, err error) { km.RLock() k, ok := km.Keys[kid] km.RUnlock() if !ok { return nil, errors.New("No such key id.") } return k, nil } // DeleteKey() deletes the specified entry from the key map. func DeleteKey(kid string) (err error) { if _, err := GetKey(kid); err != nil { return err } km.Lock() delete(km.Keys, kid) km.Unlock() return nil } // SetKeyFromFile() reads the key files and stores the unique kid - Key information pair. // // Params: // kid: Key id(unique). // alg: JWT alg. // signKeyFile: Signing key file. // verifyKeyFile: Verifying key file. // Return: // err: error. // Notes: // 1. Current Available JWT "alg": HS256, HS384, HS512, RS256, RS384, RS512, PS256, PS384, PS512, ES256, ES384, ES512. // 2. HMAC using SHA-XXX is a symmetric key algorithm. It just read signKeyFile as secret key(verifyKeyFile is ignored). // 3. How to Generate Keys for JWT algs: // https://github.com/northbright/Notes/blob/master/jwt/generate_keys_for_jwt_alg.md func SetKeyFromFile(kid, alg, signKeyFile, verifyKeyFile string) (err error) { key := &Key{} m := jwt.GetSigningMethod(alg) if m == nil { msg := fmt.Sprintf("Incorrect alg: %s. %s", alg, AvailableAlgs) return errors.New(msg) } // Set Signing Method key.Method = m switch alg { case "HS256", "HS384", "HS512": if key.SignKey, err = ReadKey(signKeyFile); err != nil { return err } key.VerifyKey = key.SignKey case "RS256", "RS384", "RS512", "PS256", "PS384", "PS512": buf := []byte{} if buf, err = ReadKey(signKeyFile); err != nil { return err } if key.SignKey, err = jwt.ParseRSAPrivateKeyFromPEM(buf); err != nil { return err } if buf, err = ReadKey(verifyKeyFile); err != nil { return err } if key.VerifyKey, err = jwt.ParseRSAPublicKeyFromPEM(buf); err != nil { return err } case "ES256", "ES384", "ES512": buf := []byte{} if buf, err = ReadKey(signKeyFile); err != nil { return err } if key.SignKey, err = jwt.ParseECPrivateKeyFromPEM(buf); err != nil { return err } if buf, err = ReadKey(verifyKeyFile); err != nil { return err } if key.VerifyKey, err = jwt.ParseECPublicKeyFromPEM(buf); err != nil { return err } default: msg := fmt.Sprintf("Incorrect alg: %s. %s", alg, AvailableAlgs) return errors.New(msg) } SetKey(kid, key) return nil } // CreateTokenString() creates a new JWT token string. // // Params: // kid: Key id. // claims: map[string]interface{} to fill the jwt.Token[Claims]. // Return: // tokenString: new created JWT token string. // err: error. func CreateTokenString(kid string, claims map[string]interface{}) (tokenString string, err error) { var k *Key if k, err = GetKey(kid); err != nil { return "", err } t := jwt.New(k.Method) t.Header["kid"] = kid t.Claims = claims return t.SignedString(k.SignKey) } // jwt-go's KeyFunc type: // // type Keyfunc func(*Token) (interface{}, error) func keyFunc(token *jwt.Token) (interface{}, error) { kid := "" if str, ok := token.Header["kid"].(string); !ok { msg := fmt.Sprintf("token.Header[\"kid\"]'s type is %T, but not string.", token.Header["kid"]) return nil, errors.New(msg) } else { kid = str } key, err := GetKey(kid) if err != nil { return nil, err } // Check signing method if token.Method.Alg() != key.Method.Alg() { return nil, errors.New("Signing Method Error.") } return key.VerifyKey, nil } // Parse() parses and validates the input token string. // // Params: // tokenString: input JWT token string. // Return: // kid: Key id. // claims: map[string]interface{} to fill the jwt.Token[Claims]. // valid: token is valid or not. // err: error. func Parse(tokenString string) (kid string, claims map[string]interface{}, valid bool, err error) { t, err := jwt.Parse(tokenString, keyFunc) if err != nil { return "", nil, false, err } return t.Header["kid"].(string), t.Claims, t.Valid, nil } // ParseFromRequest() parses and validates the input token string in an http.Request. It's a wrapper of jwt.ParseFromRequest(). // // Params: // r: http.Request may contain jwt token. // Return: // kid: Key id. // claims: map[string]interface{} to fill the jwt.Token[Claims]. // valid: token is valid or not. // err: error. func ParseFromRequest(r *http.Request) (kid string, claims map[string]interface{}, valid bool, err error) { t, err := jwt.ParseFromRequest(r, keyFunc) if err != nil { return "", nil, false, err } return t.Header["kid"].(string), t.Claims, t.Valid, nil } Migrate jwt-go from v2 to v3. See https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md package jwthelper import ( "errors" "fmt" "io/ioutil" "net/http" "sync" "github.com/dgrijalva/jwt-go" "github.com/dgrijalva/jwt-go/request" "github.com/northbright/pathhelper" ) // Key struct consists of algorithm, signning key and verifying key. type Key struct { Method jwt.SigningMethod // jwt.SigningMethod SignKey interface{} // Signing key. HMAC: []byte, RSA / RSAPSS: *crypto/rsa.PrivateKey, ECDSA: *crypto/ecdsa.PrivateKey. VerifyKey interface{} // Verifying key. HMAC: []byte, RSA / RSAPSS: *crypto/rsa.PublicKey, ECDSA: *crypto/ecdsa.PublicKey. } // KeyManger manages the keys by using kid(key id). type KeyManager struct { Keys map[string]*Key // Key map. Key: kid(key id), Value: Key Struct sync.RWMutex // Access map concurrently. } const ( AvailableAlgs string = "Available algs: HS256,HS384,HS512,RS256,RS384,RS512,PS256,PS384,PS512,ES256,ES384,ES512" ) var ( km KeyManager = KeyManager{Keys: make(map[string]*Key)} // Internal key manager. ) // ReadKey() reads key bytes from the key file. func ReadKey(keyFile string) (key []byte, err error) { // Make Abs key file path with current executable path if KeyFilePath is relative. p := "" if p, err = pathhelper.GetAbsPath(keyFile); err != nil { return nil, err } buf := []byte{} if buf, err = ioutil.ReadFile(p); err != nil { return nil, err } return buf, nil } // SetKey() sets the kid - Key pair. // // Params: // kid: Key id. It should be unique. // key: Key struct. func SetKey(kid string, key *Key) { km.Lock() km.Keys[kid] = key km.Unlock() } // GetKey() return the key struct by given kid. func GetKey(kid string) (k *Key, err error) { km.RLock() k, ok := km.Keys[kid] km.RUnlock() if !ok { return nil, errors.New("No such key id.") } return k, nil } // DeleteKey() deletes the specified entry from the key map. func DeleteKey(kid string) (err error) { if _, err := GetKey(kid); err != nil { return err } km.Lock() delete(km.Keys, kid) km.Unlock() return nil } // SetKeyFromFile() reads the key files and stores the unique kid - Key information pair. // // Params: // kid: Key id(unique). // alg: JWT alg. // signKeyFile: Signing key file. // verifyKeyFile: Verifying key file. // Return: // err: error. // Notes: // 1. Current Available JWT "alg": HS256, HS384, HS512, RS256, RS384, RS512, PS256, PS384, PS512, ES256, ES384, ES512. // 2. HMAC using SHA-XXX is a symmetric key algorithm. It just read signKeyFile as secret key(verifyKeyFile is ignored). // 3. How to Generate Keys for JWT algs: // https://github.com/northbright/Notes/blob/master/jwt/generate_keys_for_jwt_alg.md func SetKeyFromFile(kid, alg, signKeyFile, verifyKeyFile string) (err error) { key := &Key{} m := jwt.GetSigningMethod(alg) if m == nil { msg := fmt.Sprintf("Incorrect alg: %s. %s", alg, AvailableAlgs) return errors.New(msg) } // Set Signing Method key.Method = m switch alg { case "HS256", "HS384", "HS512": if key.SignKey, err = ReadKey(signKeyFile); err != nil { return err } key.VerifyKey = key.SignKey case "RS256", "RS384", "RS512", "PS256", "PS384", "PS512": buf := []byte{} if buf, err = ReadKey(signKeyFile); err != nil { return err } if key.SignKey, err = jwt.ParseRSAPrivateKeyFromPEM(buf); err != nil { return err } if buf, err = ReadKey(verifyKeyFile); err != nil { return err } if key.VerifyKey, err = jwt.ParseRSAPublicKeyFromPEM(buf); err != nil { return err } case "ES256", "ES384", "ES512": buf := []byte{} if buf, err = ReadKey(signKeyFile); err != nil { return err } if key.SignKey, err = jwt.ParseECPrivateKeyFromPEM(buf); err != nil { return err } if buf, err = ReadKey(verifyKeyFile); err != nil { return err } if key.VerifyKey, err = jwt.ParseECPublicKeyFromPEM(buf); err != nil { return err } default: msg := fmt.Sprintf("Incorrect alg: %s. %s", alg, AvailableAlgs) return errors.New(msg) } SetKey(kid, key) return nil } // CreateTokenString() creates a new JWT token string. // // Params: // kid: Key id. // claims: map[string]interface{} to fill the jwt.Token[Claims]. // Return: // tokenString: new created JWT token string. // err: error. func CreateTokenString(kid string, claims map[string]interface{}) (tokenString string, err error) { var k *Key if k, err = GetKey(kid); err != nil { return "", err } t := jwt.NewWithClaims(k.Method, jwt.MapClaims(claims)) t.Header["kid"] = kid return t.SignedString(k.SignKey) } // jwt-go's KeyFunc type: // // type Keyfunc func(*Token) (interface{}, error) func keyFunc(token *jwt.Token) (interface{}, error) { kid := "" if str, ok := token.Header["kid"].(string); !ok { msg := fmt.Sprintf("token.Header[\"kid\"]'s type is %T, but not string.", token.Header["kid"]) return nil, errors.New(msg) } else { kid = str } key, err := GetKey(kid) if err != nil { return nil, err } // Check signing method if token.Method.Alg() != key.Method.Alg() { return nil, errors.New("Signing Method Error.") } return key.VerifyKey, nil } // Parse() parses and validates the input token string. // // Params: // tokenString: input JWT token string. // Return: // kid: Key id. // claims: map[string]interface{} to fill the jwt.Token[Claims]. // valid: token is valid or not. // err: error. func Parse(tokenString string) (kid string, claims map[string]interface{}, valid bool, err error) { t, err := jwt.Parse(tokenString, keyFunc) if err != nil { return "", nil, false, err } return t.Header["kid"].(string), t.Claims.(jwt.MapClaims), t.Valid, nil } // ParseFromRequest() parses and validates the input token string in an http.Request. It's a wrapper of jwt.ParseFromRequest(). // // Params: // r: http.Request may contain jwt token. // extractor: Interface for extracting a token from an HTTP request. // See https://godoc.org/github.com/dgrijalva/jwt-go/request#Extractor // Return: // kid: Key id. // claims: map[string]interface{} to fill the jwt.Token[Claims]. // valid: token is valid or not. // err: error. func ParseFromRequest(r *http.Request, e request.Extractor) (kid string, claims map[string]interface{}, valid bool, err error) { t, err := request.ParseFromRequest(r, e, keyFunc) if err != nil { return "", nil, false, err } return t.Header["kid"].(string), t.Claims.(jwt.MapClaims), t.Valid, nil }
/* Copyright 2015 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package suite import ( "sort" "sync/atomic" "time" "github.com/gravitational/teleport" "github.com/gravitational/teleport/lib/backend" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/services" "github.com/gokyle/hotp" "golang.org/x/crypto/ssh" . "gopkg.in/check.v1" ) // NewTestCA returns new test authority with a test key as a public and // signing key func NewTestCA(caType services.CertAuthType, domainName string) *services.CertAuthority { keyBytes := PEMBytes["rsa"] key, err := ssh.ParsePrivateKey(keyBytes) if err != nil { panic(err) } pubKey := key.PublicKey() return &services.CertAuthority{ Type: caType, DomainName: domainName, CheckingKeys: [][]byte{ssh.MarshalAuthorizedKey(pubKey)}, SigningKeys: [][]byte{keyBytes}, } } type ServicesTestSuite struct { CAS services.Trust LockS services.Lock PresenceS services.Presence ProvisioningS services.Provisioner WebS services.Identity ChangesC chan interface{} } func (s *ServicesTestSuite) collectChanges(c *C, expected int) []interface{} { changes := make([]interface{}, expected) for i := range changes { select { case changes[i] = <-s.ChangesC: // successfully collected changes case <-time.After(2 * time.Second): c.Fatalf("Timeout occured waiting for events") } } return changes } func (s *ServicesTestSuite) expectChanges(c *C, expected ...interface{}) { changes := s.collectChanges(c, len(expected)) for i, ch := range changes { c.Assert(ch, DeepEquals, expected[i]) } } func userSlicesEqual(c *C, a []services.User, b []services.User) { comment := Commentf("a: %#v b: %#v", a, b) c.Assert(len(a), Equals, len(b), comment) sort.Sort(services.Users(a)) sort.Sort(services.Users(b)) for i := range a { usersEqual(c, a[i], b[i]) } } func usersEqual(c *C, a services.User, b services.User) { comment := Commentf("a: %#v b: %#v", a, b) c.Assert(a.Equals(b), Equals, true, comment) } func (s *ServicesTestSuite) UsersCRUD(c *C) { u, err := s.WebS.GetUsers() c.Assert(err, IsNil) c.Assert(len(u), Equals, 0) c.Assert(s.WebS.UpsertPasswordHash("user1", []byte("hash")), IsNil) c.Assert(s.WebS.UpsertPasswordHash("user2", []byte("hash2")), IsNil) u, err = s.WebS.GetUsers() c.Assert(err, IsNil) userSlicesEqual(c, u, []services.User{ &services.TeleportUser{Name: "user1"}, &services.TeleportUser{Name: "user2"}}) out, err := s.WebS.GetUser("user1") c.Assert(err, IsNil) usersEqual(c, out, &services.TeleportUser{Name: "user1"}) user := &services.TeleportUser{Name: "user1", AllowedLogins: []string{"admin", "root"}} c.Assert(s.WebS.UpsertUser(user), IsNil) out, err = s.WebS.GetUser("user1") c.Assert(err, IsNil) usersEqual(c, out, user) user.AllowedLogins = nil c.Assert(s.WebS.UpsertUser(user), IsNil) out, err = s.WebS.GetUser("user1") c.Assert(err, IsNil) usersEqual(c, out, user) c.Assert(s.WebS.DeleteUser("user1"), IsNil) u, err = s.WebS.GetUsers() c.Assert(err, IsNil) userSlicesEqual(c, u, []services.User{&services.TeleportUser{Name: "user2"}}) err = s.WebS.DeleteUser("user1") c.Assert(trace.IsNotFound(err), Equals, true, Commentf("unexpected %T %#v", err, err)) // bad username err = s.WebS.UpsertUser(&services.TeleportUser{Name: ""}) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("expected bad parameter error, got %T", err)) // bad allowed login err = s.WebS.UpsertUser(&services.TeleportUser{Name: "bob", AllowedLogins: []string{"oops typo!"}}) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("expected bad parameter error, got %T", err)) } func (s *ServicesTestSuite) CertAuthCRUD(c *C) { ca := NewTestCA(services.UserCA, "example.com") c.Assert(s.CAS.UpsertCertAuthority( *ca, backend.Forever), IsNil) out, err := s.CAS.GetCertAuthority(*ca.ID(), true) c.Assert(err, IsNil) c.Assert(out, DeepEquals, ca) cas, err := s.CAS.GetCertAuthorities(services.UserCA, false) c.Assert(err, IsNil) ca2 := *ca ca2.SigningKeys = nil c.Assert(cas[0], DeepEquals, &ca2) cas, err = s.CAS.GetCertAuthorities(services.UserCA, true) c.Assert(err, IsNil) c.Assert(cas[0], DeepEquals, ca) err = s.CAS.DeleteCertAuthority(*ca.ID()) c.Assert(err, IsNil) } func (s *ServicesTestSuite) ServerCRUD(c *C) { out, err := s.PresenceS.GetNodes() c.Assert(err, IsNil) c.Assert(len(out), Equals, 0) srv := services.Server{ID: "srv1", Addr: "localhost:2022"} c.Assert(s.PresenceS.UpsertNode(srv, 0), IsNil) out, err = s.PresenceS.GetNodes() c.Assert(err, IsNil) c.Assert(out, DeepEquals, []services.Server{srv}) out, err = s.PresenceS.GetProxies() c.Assert(err, IsNil) c.Assert(len(out), Equals, 0) proxy := services.Server{ID: "proxy1", Addr: "localhost:2023"} c.Assert(s.PresenceS.UpsertProxy(proxy, 0), IsNil) out, err = s.PresenceS.GetProxies() c.Assert(err, IsNil) c.Assert(out, DeepEquals, []services.Server{proxy}) out, err = s.PresenceS.GetAuthServers() c.Assert(err, IsNil) c.Assert(len(out), Equals, 0) auth := services.Server{ID: "auth1", Addr: "localhost:2025"} c.Assert(s.PresenceS.UpsertAuthServer(auth, 0), IsNil) out, err = s.PresenceS.GetAuthServers() c.Assert(err, IsNil) c.Assert(out, DeepEquals, []services.Server{auth}) } func (s *ServicesTestSuite) ReverseTunnelsCRUD(c *C) { out, err := s.PresenceS.GetReverseTunnels() c.Assert(err, IsNil) c.Assert(len(out), Equals, 0) tunnel := services.ReverseTunnel{DomainName: "example.com", DialAddrs: []string{"example.com:2023"}} c.Assert(s.PresenceS.UpsertReverseTunnel(tunnel, 0), IsNil) out, err = s.PresenceS.GetReverseTunnels() c.Assert(err, IsNil) c.Assert(out, DeepEquals, []services.ReverseTunnel{tunnel}) err = s.PresenceS.DeleteReverseTunnel(tunnel.DomainName) c.Assert(err, IsNil) out, err = s.PresenceS.GetReverseTunnels() c.Assert(err, IsNil) c.Assert(len(out), Equals, 0) err = s.PresenceS.UpsertReverseTunnel(services.ReverseTunnel{DomainName: " bad domain", DialAddrs: []string{"example.com:2023"}}, 0) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%#v", err)) err = s.PresenceS.UpsertReverseTunnel(services.ReverseTunnel{DomainName: "example.com", DialAddrs: []string{"bad address"}}, 0) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%#v", err)) err = s.PresenceS.UpsertReverseTunnel(services.ReverseTunnel{DomainName: "example.com"}, 0) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%#v", err)) } func (s *ServicesTestSuite) PasswordHashCRUD(c *C) { _, err := s.WebS.GetPasswordHash("user1") c.Assert(trace.IsNotFound(err), Equals, true, Commentf("%#v", err)) err = s.WebS.UpsertPasswordHash("user1", []byte("hello123")) c.Assert(err, IsNil) hash, err := s.WebS.GetPasswordHash("user1") c.Assert(err, IsNil) c.Assert(hash, DeepEquals, []byte("hello123")) err = s.WebS.UpsertPasswordHash("user1", []byte("hello321")) c.Assert(err, IsNil) hash, err = s.WebS.GetPasswordHash("user1") c.Assert(err, IsNil) c.Assert(hash, DeepEquals, []byte("hello321")) } func (s *ServicesTestSuite) WebSessionCRUD(c *C) { _, err := s.WebS.GetWebSession("user1", "sid1") c.Assert(err, FitsTypeOf, &teleport.NotFoundError{}) dt := time.Date(2015, 6, 5, 4, 3, 2, 1, time.UTC).UTC() ws := services.WebSession{ Pub: []byte("pub123"), Priv: []byte("priv123"), Expires: dt, } err = s.WebS.UpsertWebSession("user1", "sid1", ws, 0) c.Assert(err, IsNil) out, err := s.WebS.GetWebSession("user1", "sid1") c.Assert(err, IsNil) c.Assert(out, DeepEquals, &ws) ws1 := services.WebSession{Pub: []byte("pub321"), Priv: []byte("priv321"), Expires: dt} err = s.WebS.UpsertWebSession("user1", "sid1", ws1, 0) c.Assert(err, IsNil) out2, err := s.WebS.GetWebSession("user1", "sid1") c.Assert(err, IsNil) c.Assert(out2, DeepEquals, &ws1) c.Assert(s.WebS.DeleteWebSession("user1", "sid1"), IsNil) _, err = s.WebS.GetWebSession("user1", "sid1") c.Assert(trace.IsNotFound(err), Equals, true, Commentf("%#v", err)) } func (s *ServicesTestSuite) Locking(c *C) { tok1 := "token1" tok2 := "token2" err := s.LockS.ReleaseLock(tok1) c.Assert(trace.IsNotFound(err), Equals, true, Commentf("%#v", err)) c.Assert(s.LockS.AcquireLock(tok1, 30*time.Second), IsNil) x := int32(7) go func() { atomic.StoreInt32(&x, 9) c.Assert(s.LockS.ReleaseLock(tok1), IsNil) }() c.Assert(s.LockS.AcquireLock(tok1, 0), IsNil) atomic.AddInt32(&x, 9) c.Assert(atomic.LoadInt32(&x), Equals, int32(18)) c.Assert(s.LockS.ReleaseLock(tok1), IsNil) c.Assert(s.LockS.AcquireLock(tok1, 0), IsNil) atomic.StoreInt32(&x, 7) go func() { atomic.StoreInt32(&x, 9) c.Assert(s.LockS.ReleaseLock(tok1), IsNil) }() c.Assert(s.LockS.AcquireLock(tok1, 0), IsNil) atomic.AddInt32(&x, 9) c.Assert(atomic.LoadInt32(&x), Equals, int32(18)) c.Assert(s.LockS.ReleaseLock(tok1), IsNil) y := int32(0) go func() { c.Assert(s.LockS.AcquireLock(tok1, 0), IsNil) c.Assert(s.LockS.AcquireLock(tok2, 0), IsNil) c.Assert(s.LockS.ReleaseLock(tok1), IsNil) c.Assert(s.LockS.ReleaseLock(tok2), IsNil) atomic.StoreInt32(&y, 15) }() time.Sleep(1 * time.Second) c.Assert(atomic.LoadInt32(&y), Equals, int32(15)) err = s.LockS.ReleaseLock(tok1) c.Assert(trace.IsNotFound(err), Equals, true, Commentf("%#v", err)) } func (s *ServicesTestSuite) TokenCRUD(c *C) { _, err := s.ProvisioningS.GetToken("token") c.Assert(trace.IsNotFound(err), Equals, true, Commentf("%#v", err)) c.Assert(s.ProvisioningS.UpsertToken("token", "RoleExample", 0), IsNil) token, err := s.ProvisioningS.GetToken("token") c.Assert(token.Role, Equals, "RoleExample") c.Assert(token.TTL > 0 && token.TTL <= defaults.MaxProvisioningTokenTTL, Equals, true, Commentf("%v", token.TTL)) c.Assert(err, IsNil) c.Assert(s.ProvisioningS.DeleteToken("token"), IsNil) _, err = s.ProvisioningS.GetToken("token") c.Assert(trace.IsNotFound(err), Equals, true, Commentf("%#v", err)) outputToken, err := services.JoinTokenRole("token1", "Auth") c.Assert(err, IsNil) tok, role, err := services.SplitTokenRole(outputToken) c.Assert(err, IsNil) c.Assert(tok, Equals, "token1") c.Assert(role, Equals, "Auth") c.Assert(s.ProvisioningS.UpsertToken("token2", "RoleExample", 2*defaults.MaxProvisioningTokenTTL), IsNil) token, err = s.ProvisioningS.GetToken("token2") c.Assert(token.Role, Equals, "RoleExample") c.Assert(token.TTL > 0 && token.TTL <= defaults.MaxProvisioningTokenTTL, Equals, true, Commentf("%v", token.TTL)) c.Assert(err, IsNil) } func (s *ServicesTestSuite) PasswordCRUD(c *C) { pass := []byte("abc123") err := s.WebS.CheckPassword("user1", pass, "123456") c.Assert(err, NotNil) hotpURL, _, err := s.WebS.UpsertPassword("user1", pass) c.Assert(err, IsNil) otp, label, err := hotp.FromURL(hotpURL) c.Assert(err, IsNil) c.Assert(label, Equals, "user1") otp.Increment() token1 := otp.OTP() err = s.WebS.CheckPassword("user1", pass, "123456") c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%T", err)) c.Assert(s.WebS.CheckPassword("user1", pass, token1), IsNil) err = s.WebS.CheckPassword("user1", pass, token1) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%T", err)) token2 := otp.OTP() err = s.WebS.CheckPassword("user1", []byte("abc123123"), token2) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%T", err)) err = s.WebS.CheckPassword("user1", pass, "123456") c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%T", err)) c.Assert(s.WebS.CheckPassword("user1", pass, token2), IsNil) err = s.WebS.CheckPassword("user1", pass, token1) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%T", err)) _ = otp.OTP() _ = otp.OTP() _ = otp.OTP() token6 := otp.OTP() token7 := otp.OTP() err = s.WebS.CheckPassword("user1", pass, token7) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%T", err)) c.Assert(s.WebS.CheckPassword("user1", pass, token6), IsNil) err = s.WebS.CheckPassword("user1", pass, "123456") c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%T", err)) c.Assert(s.WebS.CheckPassword("user1", pass, token7), IsNil) _ = otp.OTP() token9 := otp.OTP() c.Assert(s.WebS.CheckPassword("user1", pass, token9), IsNil) } func (s *ServicesTestSuite) PasswordGarbage(c *C) { garbage := [][]byte{ nil, make([]byte, defaults.MaxPasswordLength+1), make([]byte, defaults.MinPasswordLength-1), } for _, g := range garbage { err := s.WebS.CheckPassword("user1", g, "123456") c.Assert(err, NotNil) } } fix import error /* Copyright 2015 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package suite import ( "sort" "sync/atomic" "time" "github.com/gravitational/teleport" "github.com/gravitational/teleport/lib/backend" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/services" "github.com/gokyle/hotp" "github.com/gravitational/trace" "golang.org/x/crypto/ssh" . "gopkg.in/check.v1" ) // NewTestCA returns new test authority with a test key as a public and // signing key func NewTestCA(caType services.CertAuthType, domainName string) *services.CertAuthority { keyBytes := PEMBytes["rsa"] key, err := ssh.ParsePrivateKey(keyBytes) if err != nil { panic(err) } pubKey := key.PublicKey() return &services.CertAuthority{ Type: caType, DomainName: domainName, CheckingKeys: [][]byte{ssh.MarshalAuthorizedKey(pubKey)}, SigningKeys: [][]byte{keyBytes}, } } type ServicesTestSuite struct { CAS services.Trust LockS services.Lock PresenceS services.Presence ProvisioningS services.Provisioner WebS services.Identity ChangesC chan interface{} } func (s *ServicesTestSuite) collectChanges(c *C, expected int) []interface{} { changes := make([]interface{}, expected) for i := range changes { select { case changes[i] = <-s.ChangesC: // successfully collected changes case <-time.After(2 * time.Second): c.Fatalf("Timeout occured waiting for events") } } return changes } func (s *ServicesTestSuite) expectChanges(c *C, expected ...interface{}) { changes := s.collectChanges(c, len(expected)) for i, ch := range changes { c.Assert(ch, DeepEquals, expected[i]) } } func userSlicesEqual(c *C, a []services.User, b []services.User) { comment := Commentf("a: %#v b: %#v", a, b) c.Assert(len(a), Equals, len(b), comment) sort.Sort(services.Users(a)) sort.Sort(services.Users(b)) for i := range a { usersEqual(c, a[i], b[i]) } } func usersEqual(c *C, a services.User, b services.User) { comment := Commentf("a: %#v b: %#v", a, b) c.Assert(a.Equals(b), Equals, true, comment) } func (s *ServicesTestSuite) UsersCRUD(c *C) { u, err := s.WebS.GetUsers() c.Assert(err, IsNil) c.Assert(len(u), Equals, 0) c.Assert(s.WebS.UpsertPasswordHash("user1", []byte("hash")), IsNil) c.Assert(s.WebS.UpsertPasswordHash("user2", []byte("hash2")), IsNil) u, err = s.WebS.GetUsers() c.Assert(err, IsNil) userSlicesEqual(c, u, []services.User{ &services.TeleportUser{Name: "user1"}, &services.TeleportUser{Name: "user2"}}) out, err := s.WebS.GetUser("user1") c.Assert(err, IsNil) usersEqual(c, out, &services.TeleportUser{Name: "user1"}) user := &services.TeleportUser{Name: "user1", AllowedLogins: []string{"admin", "root"}} c.Assert(s.WebS.UpsertUser(user), IsNil) out, err = s.WebS.GetUser("user1") c.Assert(err, IsNil) usersEqual(c, out, user) user.AllowedLogins = nil c.Assert(s.WebS.UpsertUser(user), IsNil) out, err = s.WebS.GetUser("user1") c.Assert(err, IsNil) usersEqual(c, out, user) c.Assert(s.WebS.DeleteUser("user1"), IsNil) u, err = s.WebS.GetUsers() c.Assert(err, IsNil) userSlicesEqual(c, u, []services.User{&services.TeleportUser{Name: "user2"}}) err = s.WebS.DeleteUser("user1") c.Assert(trace.IsNotFound(err), Equals, true, Commentf("unexpected %T %#v", err, err)) // bad username err = s.WebS.UpsertUser(&services.TeleportUser{Name: ""}) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("expected bad parameter error, got %T", err)) // bad allowed login err = s.WebS.UpsertUser(&services.TeleportUser{Name: "bob", AllowedLogins: []string{"oops typo!"}}) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("expected bad parameter error, got %T", err)) } func (s *ServicesTestSuite) CertAuthCRUD(c *C) { ca := NewTestCA(services.UserCA, "example.com") c.Assert(s.CAS.UpsertCertAuthority( *ca, backend.Forever), IsNil) out, err := s.CAS.GetCertAuthority(*ca.ID(), true) c.Assert(err, IsNil) c.Assert(out, DeepEquals, ca) cas, err := s.CAS.GetCertAuthorities(services.UserCA, false) c.Assert(err, IsNil) ca2 := *ca ca2.SigningKeys = nil c.Assert(cas[0], DeepEquals, &ca2) cas, err = s.CAS.GetCertAuthorities(services.UserCA, true) c.Assert(err, IsNil) c.Assert(cas[0], DeepEquals, ca) err = s.CAS.DeleteCertAuthority(*ca.ID()) c.Assert(err, IsNil) } func (s *ServicesTestSuite) ServerCRUD(c *C) { out, err := s.PresenceS.GetNodes() c.Assert(err, IsNil) c.Assert(len(out), Equals, 0) srv := services.Server{ID: "srv1", Addr: "localhost:2022"} c.Assert(s.PresenceS.UpsertNode(srv, 0), IsNil) out, err = s.PresenceS.GetNodes() c.Assert(err, IsNil) c.Assert(out, DeepEquals, []services.Server{srv}) out, err = s.PresenceS.GetProxies() c.Assert(err, IsNil) c.Assert(len(out), Equals, 0) proxy := services.Server{ID: "proxy1", Addr: "localhost:2023"} c.Assert(s.PresenceS.UpsertProxy(proxy, 0), IsNil) out, err = s.PresenceS.GetProxies() c.Assert(err, IsNil) c.Assert(out, DeepEquals, []services.Server{proxy}) out, err = s.PresenceS.GetAuthServers() c.Assert(err, IsNil) c.Assert(len(out), Equals, 0) auth := services.Server{ID: "auth1", Addr: "localhost:2025"} c.Assert(s.PresenceS.UpsertAuthServer(auth, 0), IsNil) out, err = s.PresenceS.GetAuthServers() c.Assert(err, IsNil) c.Assert(out, DeepEquals, []services.Server{auth}) } func (s *ServicesTestSuite) ReverseTunnelsCRUD(c *C) { out, err := s.PresenceS.GetReverseTunnels() c.Assert(err, IsNil) c.Assert(len(out), Equals, 0) tunnel := services.ReverseTunnel{DomainName: "example.com", DialAddrs: []string{"example.com:2023"}} c.Assert(s.PresenceS.UpsertReverseTunnel(tunnel, 0), IsNil) out, err = s.PresenceS.GetReverseTunnels() c.Assert(err, IsNil) c.Assert(out, DeepEquals, []services.ReverseTunnel{tunnel}) err = s.PresenceS.DeleteReverseTunnel(tunnel.DomainName) c.Assert(err, IsNil) out, err = s.PresenceS.GetReverseTunnels() c.Assert(err, IsNil) c.Assert(len(out), Equals, 0) err = s.PresenceS.UpsertReverseTunnel(services.ReverseTunnel{DomainName: " bad domain", DialAddrs: []string{"example.com:2023"}}, 0) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%#v", err)) err = s.PresenceS.UpsertReverseTunnel(services.ReverseTunnel{DomainName: "example.com", DialAddrs: []string{"bad address"}}, 0) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%#v", err)) err = s.PresenceS.UpsertReverseTunnel(services.ReverseTunnel{DomainName: "example.com"}, 0) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%#v", err)) } func (s *ServicesTestSuite) PasswordHashCRUD(c *C) { _, err := s.WebS.GetPasswordHash("user1") c.Assert(trace.IsNotFound(err), Equals, true, Commentf("%#v", err)) err = s.WebS.UpsertPasswordHash("user1", []byte("hello123")) c.Assert(err, IsNil) hash, err := s.WebS.GetPasswordHash("user1") c.Assert(err, IsNil) c.Assert(hash, DeepEquals, []byte("hello123")) err = s.WebS.UpsertPasswordHash("user1", []byte("hello321")) c.Assert(err, IsNil) hash, err = s.WebS.GetPasswordHash("user1") c.Assert(err, IsNil) c.Assert(hash, DeepEquals, []byte("hello321")) } func (s *ServicesTestSuite) WebSessionCRUD(c *C) { _, err := s.WebS.GetWebSession("user1", "sid1") c.Assert(err, FitsTypeOf, &teleport.NotFoundError{}) dt := time.Date(2015, 6, 5, 4, 3, 2, 1, time.UTC).UTC() ws := services.WebSession{ Pub: []byte("pub123"), Priv: []byte("priv123"), Expires: dt, } err = s.WebS.UpsertWebSession("user1", "sid1", ws, 0) c.Assert(err, IsNil) out, err := s.WebS.GetWebSession("user1", "sid1") c.Assert(err, IsNil) c.Assert(out, DeepEquals, &ws) ws1 := services.WebSession{Pub: []byte("pub321"), Priv: []byte("priv321"), Expires: dt} err = s.WebS.UpsertWebSession("user1", "sid1", ws1, 0) c.Assert(err, IsNil) out2, err := s.WebS.GetWebSession("user1", "sid1") c.Assert(err, IsNil) c.Assert(out2, DeepEquals, &ws1) c.Assert(s.WebS.DeleteWebSession("user1", "sid1"), IsNil) _, err = s.WebS.GetWebSession("user1", "sid1") c.Assert(trace.IsNotFound(err), Equals, true, Commentf("%#v", err)) } func (s *ServicesTestSuite) Locking(c *C) { tok1 := "token1" tok2 := "token2" err := s.LockS.ReleaseLock(tok1) c.Assert(trace.IsNotFound(err), Equals, true, Commentf("%#v", err)) c.Assert(s.LockS.AcquireLock(tok1, 30*time.Second), IsNil) x := int32(7) go func() { atomic.StoreInt32(&x, 9) c.Assert(s.LockS.ReleaseLock(tok1), IsNil) }() c.Assert(s.LockS.AcquireLock(tok1, 0), IsNil) atomic.AddInt32(&x, 9) c.Assert(atomic.LoadInt32(&x), Equals, int32(18)) c.Assert(s.LockS.ReleaseLock(tok1), IsNil) c.Assert(s.LockS.AcquireLock(tok1, 0), IsNil) atomic.StoreInt32(&x, 7) go func() { atomic.StoreInt32(&x, 9) c.Assert(s.LockS.ReleaseLock(tok1), IsNil) }() c.Assert(s.LockS.AcquireLock(tok1, 0), IsNil) atomic.AddInt32(&x, 9) c.Assert(atomic.LoadInt32(&x), Equals, int32(18)) c.Assert(s.LockS.ReleaseLock(tok1), IsNil) y := int32(0) go func() { c.Assert(s.LockS.AcquireLock(tok1, 0), IsNil) c.Assert(s.LockS.AcquireLock(tok2, 0), IsNil) c.Assert(s.LockS.ReleaseLock(tok1), IsNil) c.Assert(s.LockS.ReleaseLock(tok2), IsNil) atomic.StoreInt32(&y, 15) }() time.Sleep(1 * time.Second) c.Assert(atomic.LoadInt32(&y), Equals, int32(15)) err = s.LockS.ReleaseLock(tok1) c.Assert(trace.IsNotFound(err), Equals, true, Commentf("%#v", err)) } func (s *ServicesTestSuite) TokenCRUD(c *C) { _, err := s.ProvisioningS.GetToken("token") c.Assert(trace.IsNotFound(err), Equals, true, Commentf("%#v", err)) c.Assert(s.ProvisioningS.UpsertToken("token", "RoleExample", 0), IsNil) token, err := s.ProvisioningS.GetToken("token") c.Assert(token.Role, Equals, "RoleExample") c.Assert(token.TTL > 0 && token.TTL <= defaults.MaxProvisioningTokenTTL, Equals, true, Commentf("%v", token.TTL)) c.Assert(err, IsNil) c.Assert(s.ProvisioningS.DeleteToken("token"), IsNil) _, err = s.ProvisioningS.GetToken("token") c.Assert(trace.IsNotFound(err), Equals, true, Commentf("%#v", err)) outputToken, err := services.JoinTokenRole("token1", "Auth") c.Assert(err, IsNil) tok, role, err := services.SplitTokenRole(outputToken) c.Assert(err, IsNil) c.Assert(tok, Equals, "token1") c.Assert(role, Equals, "Auth") c.Assert(s.ProvisioningS.UpsertToken("token2", "RoleExample", 2*defaults.MaxProvisioningTokenTTL), IsNil) token, err = s.ProvisioningS.GetToken("token2") c.Assert(token.Role, Equals, "RoleExample") c.Assert(token.TTL > 0 && token.TTL <= defaults.MaxProvisioningTokenTTL, Equals, true, Commentf("%v", token.TTL)) c.Assert(err, IsNil) } func (s *ServicesTestSuite) PasswordCRUD(c *C) { pass := []byte("abc123") err := s.WebS.CheckPassword("user1", pass, "123456") c.Assert(err, NotNil) hotpURL, _, err := s.WebS.UpsertPassword("user1", pass) c.Assert(err, IsNil) otp, label, err := hotp.FromURL(hotpURL) c.Assert(err, IsNil) c.Assert(label, Equals, "user1") otp.Increment() token1 := otp.OTP() err = s.WebS.CheckPassword("user1", pass, "123456") c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%T", err)) c.Assert(s.WebS.CheckPassword("user1", pass, token1), IsNil) err = s.WebS.CheckPassword("user1", pass, token1) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%T", err)) token2 := otp.OTP() err = s.WebS.CheckPassword("user1", []byte("abc123123"), token2) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%T", err)) err = s.WebS.CheckPassword("user1", pass, "123456") c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%T", err)) c.Assert(s.WebS.CheckPassword("user1", pass, token2), IsNil) err = s.WebS.CheckPassword("user1", pass, token1) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%T", err)) _ = otp.OTP() _ = otp.OTP() _ = otp.OTP() token6 := otp.OTP() token7 := otp.OTP() err = s.WebS.CheckPassword("user1", pass, token7) c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%T", err)) c.Assert(s.WebS.CheckPassword("user1", pass, token6), IsNil) err = s.WebS.CheckPassword("user1", pass, "123456") c.Assert(trace.IsBadParameter(err), Equals, true, Commentf("%T", err)) c.Assert(s.WebS.CheckPassword("user1", pass, token7), IsNil) _ = otp.OTP() token9 := otp.OTP() c.Assert(s.WebS.CheckPassword("user1", pass, token9), IsNil) } func (s *ServicesTestSuite) PasswordGarbage(c *C) { garbage := [][]byte{ nil, make([]byte, defaults.MaxPasswordLength+1), make([]byte, defaults.MinPasswordLength-1), } for _, g := range garbage { err := s.WebS.CheckPassword("user1", g, "123456") c.Assert(err, NotNil) } }
// Copyright 2016 The go-github AUTHORS. All rights reserved. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package github import ( "context" "fmt" ) // ProjectsService provides access to the projects functions in the // GitHub API. // // GitHub API docs: https://developer.github.com/v3/projects/ type ProjectsService service // Project represents a GitHub Project. type Project struct { ID *int `json:"id,omitempty"` URL *string `json:"url,omitempty"` OwnerURL *string `json:"owner_url,omitempty"` Name *string `json:"name,omitempty"` Body *string `json:"body,omitempty"` Number *int `json:"number,omitempty"` CreatedAt *Timestamp `json:"created_at,omitempty"` UpdatedAt *Timestamp `json:"updated_at,omitempty"` // The User object that generated the project. Creator *User `json:"creator,omitempty"` } func (p Project) String() string { return Stringify(p) } // GetProject gets a GitHub Project for a repo. // // GitHub API docs: https://developer.github.com/v3/projects/#get-a-project func (s *ProjectsService) GetProject(ctx context.Context, id int) (*Project, *Response, error) { u := fmt.Sprintf("projects/%v", id) req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) project := &Project{} resp, err := s.client.Do(ctx, req, project) if err != nil { return nil, resp, err } return project, resp, nil } // ProjectOptions specifies the parameters to the // RepositoriesService.CreateProject and // ProjectsService.UpdateProject methods. type ProjectOptions struct { // The name of the project. (Required for creation; optional for update.) Name string `json:"name,omitempty"` // The body of the project. (Optional.) Body string `json:"body,omitempty"` } // UpdateProject updates a repository project. // // GitHub API docs: https://developer.github.com/v3/projects/#update-a-project func (s *ProjectsService) UpdateProject(ctx context.Context, id int, opt *ProjectOptions) (*Project, *Response, error) { u := fmt.Sprintf("projects/%v", id) req, err := s.client.NewRequest("PATCH", u, opt) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) project := &Project{} resp, err := s.client.Do(ctx, req, project) if err != nil { return nil, resp, err } return project, resp, nil } // DeleteProject deletes a GitHub Project from a repository. // // GitHub API docs: https://developer.github.com/v3/projects/#delete-a-project func (s *ProjectsService) DeleteProject(ctx context.Context, id int) (*Response, error) { u := fmt.Sprintf("projects/%v", id) req, err := s.client.NewRequest("DELETE", u, nil) if err != nil { return nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) return s.client.Do(ctx, req, nil) } // ProjectColumn represents a column of a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/repos/projects/ type ProjectColumn struct { ID *int `json:"id,omitempty"` Name *string `json:"name,omitempty"` ProjectURL *string `json:"project_url,omitempty"` CreatedAt *Timestamp `json:"created_at,omitempty"` UpdatedAt *Timestamp `json:"updated_at,omitempty"` } // ListProjectColumns lists the columns of a GitHub Project for a repo. // // GitHub API docs: https://developer.github.com/v3/projects/columns/#list-project-columns func (s *ProjectsService) ListProjectColumns(ctx context.Context, projectID int, opt *ListOptions) ([]*ProjectColumn, *Response, error) { u := fmt.Sprintf("projects/%v/columns", projectID) u, err := addOptions(u, opt) if err != nil { return nil, nil, err } req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) columns := []*ProjectColumn{} resp, err := s.client.Do(ctx, req, &columns) if err != nil { return nil, resp, err } return columns, resp, nil } // GetProjectColumn gets a column of a GitHub Project for a repo. // // GitHub API docs: https://developer.github.com/v3/projects/columns/#get-a-project-column func (s *ProjectsService) GetProjectColumn(ctx context.Context, id int) (*ProjectColumn, *Response, error) { u := fmt.Sprintf("projects/columns/%v", id) req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) column := &ProjectColumn{} resp, err := s.client.Do(ctx, req, column) if err != nil { return nil, resp, err } return column, resp, nil } // ProjectColumnOptions specifies the parameters to the // ProjectsService.CreateProjectColumn and // ProjectsService.UpdateProjectColumn methods. type ProjectColumnOptions struct { // The name of the project column. (Required for creation and update.) Name string `json:"name"` } // CreateProjectColumn creates a column for the specified (by number) project. // // GitHub API docs: https://developer.github.com/v3/projects/columns/#create-a-project-column func (s *ProjectsService) CreateProjectColumn(ctx context.Context, projectID int, opt *ProjectColumnOptions) (*ProjectColumn, *Response, error) { u := fmt.Sprintf("projects/%v/columns", projectID) req, err := s.client.NewRequest("POST", u, opt) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) column := &ProjectColumn{} resp, err := s.client.Do(ctx, req, column) if err != nil { return nil, resp, err } return column, resp, nil } // UpdateProjectColumn updates a column of a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/columns/#update-a-project-column func (s *ProjectsService) UpdateProjectColumn(ctx context.Context, columnID int, opt *ProjectColumnOptions) (*ProjectColumn, *Response, error) { u := fmt.Sprintf("projects/columns/%v", columnID) req, err := s.client.NewRequest("PATCH", u, opt) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) column := &ProjectColumn{} resp, err := s.client.Do(ctx, req, column) if err != nil { return nil, resp, err } return column, resp, nil } // DeleteProjectColumn deletes a column from a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/columns/#delete-a-project-column func (s *ProjectsService) DeleteProjectColumn(ctx context.Context, columnID int) (*Response, error) { u := fmt.Sprintf("projects/columns/%v", columnID) req, err := s.client.NewRequest("DELETE", u, nil) if err != nil { return nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) return s.client.Do(ctx, req, nil) } // ProjectColumnMoveOptions specifies the parameters to the // ProjectsService.MoveProjectColumn method. type ProjectColumnMoveOptions struct { // Position can be one of "first", "last", or "after:<column-id>", where // <column-id> is the ID of a column in the same project. (Required.) Position string `json:"position"` } // MoveProjectColumn moves a column within a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/columns/#move-a-project-column func (s *ProjectsService) MoveProjectColumn(ctx context.Context, columnID int, opt *ProjectColumnMoveOptions) (*Response, error) { u := fmt.Sprintf("projects/columns/%v/moves", columnID) req, err := s.client.NewRequest("POST", u, opt) if err != nil { return nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) return s.client.Do(ctx, req, nil) } // ProjectCard represents a card in a column of a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/cards/#get-a-project-card type ProjectCard struct { URL *string `json:"url,omitempty"` ColumnURL *string `json:"column_url,omitempty"` ContentURL *string `json:"content_url,omitempty"` ID *int `json:"id,omitempty"` Note *string `json:"note,omitempty"` Creator *User `json:"creator,omitempty"` CreatedAt *Timestamp `json:"created_at,omitempty"` UpdatedAt *Timestamp `json:"updated_at,omitempty"` // The following fields are only populated by Webhook events. ColumnID *int `json:"column_id,omitempty"` } // ListProjectCards lists the cards in a column of a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/cards/#list-project-cards func (s *ProjectsService) ListProjectCards(ctx context.Context, columnID int, opt *ListOptions) ([]*ProjectCard, *Response, error) { u := fmt.Sprintf("projects/columns/%v/cards", columnID) u, err := addOptions(u, opt) if err != nil { return nil, nil, err } req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) cards := []*ProjectCard{} resp, err := s.client.Do(ctx, req, &cards) if err != nil { return nil, resp, err } return cards, resp, nil } // GetProjectCard gets a card in a column of a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/cards/#get-a-project-card func (s *ProjectsService) GetProjectCard(ctx context.Context, columnID int) (*ProjectCard, *Response, error) { u := fmt.Sprintf("projects/columns/cards/%v", columnID) req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) card := &ProjectCard{} resp, err := s.client.Do(ctx, req, card) if err != nil { return nil, resp, err } return card, resp, nil } // ProjectCardOptions specifies the parameters to the // ProjectsService.CreateProjectCard and // ProjectsService.UpdateProjectCard methods. type ProjectCardOptions struct { // The note of the card. Note and ContentID are mutually exclusive. Note string `json:"note,omitempty"` // The ID (not Number) of the Issue or Pull Request to associate with this card. // Note and ContentID are mutually exclusive. ContentID int `json:"content_id,omitempty"` // The type of content to associate with this card. Possible values are: "Issue", "PullRequest". ContentType string `json:"content_type,omitempty"` } // CreateProjectCard creates a card in the specified column of a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/cards/#create-a-project-card func (s *ProjectsService) CreateProjectCard(ctx context.Context, columnID int, opt *ProjectCardOptions) (*ProjectCard, *Response, error) { u := fmt.Sprintf("projects/columns/%v/cards", columnID) req, err := s.client.NewRequest("POST", u, opt) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) card := &ProjectCard{} resp, err := s.client.Do(ctx, req, card) if err != nil { return nil, resp, err } return card, resp, nil } // UpdateProjectCard updates a card of a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/cards/#update-a-project-card func (s *ProjectsService) UpdateProjectCard(ctx context.Context, cardID int, opt *ProjectCardOptions) (*ProjectCard, *Response, error) { u := fmt.Sprintf("projects/columns/cards/%v", cardID) req, err := s.client.NewRequest("PATCH", u, opt) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) card := &ProjectCard{} resp, err := s.client.Do(ctx, req, card) if err != nil { return nil, resp, err } return card, resp, nil } // DeleteProjectCard deletes a card from a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/cards/#delete-a-project-card func (s *ProjectsService) DeleteProjectCard(ctx context.Context, cardID int) (*Response, error) { u := fmt.Sprintf("projects/columns/cards/%v", cardID) req, err := s.client.NewRequest("DELETE", u, nil) if err != nil { return nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) return s.client.Do(ctx, req, nil) } // ProjectCardMoveOptions specifies the parameters to the // ProjectsService.MoveProjectCard method. type ProjectCardMoveOptions struct { // Position can be one of "top", "bottom", or "after:<card-id>", where // <card-id> is the ID of a card in the same project. Position string `json:"position"` // ColumnID is the ID of a column in the same project. Note that ColumnID // is required when using Position "after:<card-id>" when that card is in // another column; otherwise it is optional. ColumnID int `json:"column_id,omitempty"` } // MoveProjectCard moves a card within a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/cards/#move-a-project-card func (s *ProjectsService) MoveProjectCard(ctx context.Context, cardID int, opt *ProjectCardMoveOptions) (*Response, error) { u := fmt.Sprintf("projects/columns/cards/%v/moves", cardID) req, err := s.client.NewRequest("POST", u, opt) if err != nil { return nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) return s.client.Do(ctx, req, nil) } Removed Inconsistency in documentation for ProjectCardOptions(#696) (#700) Inconsistent documentation was found in file github/projects.go Edited the doc line to match corresponding Github docs. Fixes #696. // Copyright 2016 The go-github AUTHORS. All rights reserved. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package github import ( "context" "fmt" ) // ProjectsService provides access to the projects functions in the // GitHub API. // // GitHub API docs: https://developer.github.com/v3/projects/ type ProjectsService service // Project represents a GitHub Project. type Project struct { ID *int `json:"id,omitempty"` URL *string `json:"url,omitempty"` OwnerURL *string `json:"owner_url,omitempty"` Name *string `json:"name,omitempty"` Body *string `json:"body,omitempty"` Number *int `json:"number,omitempty"` CreatedAt *Timestamp `json:"created_at,omitempty"` UpdatedAt *Timestamp `json:"updated_at,omitempty"` // The User object that generated the project. Creator *User `json:"creator,omitempty"` } func (p Project) String() string { return Stringify(p) } // GetProject gets a GitHub Project for a repo. // // GitHub API docs: https://developer.github.com/v3/projects/#get-a-project func (s *ProjectsService) GetProject(ctx context.Context, id int) (*Project, *Response, error) { u := fmt.Sprintf("projects/%v", id) req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) project := &Project{} resp, err := s.client.Do(ctx, req, project) if err != nil { return nil, resp, err } return project, resp, nil } // ProjectOptions specifies the parameters to the // RepositoriesService.CreateProject and // ProjectsService.UpdateProject methods. type ProjectOptions struct { // The name of the project. (Required for creation; optional for update.) Name string `json:"name,omitempty"` // The body of the project. (Optional.) Body string `json:"body,omitempty"` } // UpdateProject updates a repository project. // // GitHub API docs: https://developer.github.com/v3/projects/#update-a-project func (s *ProjectsService) UpdateProject(ctx context.Context, id int, opt *ProjectOptions) (*Project, *Response, error) { u := fmt.Sprintf("projects/%v", id) req, err := s.client.NewRequest("PATCH", u, opt) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) project := &Project{} resp, err := s.client.Do(ctx, req, project) if err != nil { return nil, resp, err } return project, resp, nil } // DeleteProject deletes a GitHub Project from a repository. // // GitHub API docs: https://developer.github.com/v3/projects/#delete-a-project func (s *ProjectsService) DeleteProject(ctx context.Context, id int) (*Response, error) { u := fmt.Sprintf("projects/%v", id) req, err := s.client.NewRequest("DELETE", u, nil) if err != nil { return nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) return s.client.Do(ctx, req, nil) } // ProjectColumn represents a column of a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/repos/projects/ type ProjectColumn struct { ID *int `json:"id,omitempty"` Name *string `json:"name,omitempty"` ProjectURL *string `json:"project_url,omitempty"` CreatedAt *Timestamp `json:"created_at,omitempty"` UpdatedAt *Timestamp `json:"updated_at,omitempty"` } // ListProjectColumns lists the columns of a GitHub Project for a repo. // // GitHub API docs: https://developer.github.com/v3/projects/columns/#list-project-columns func (s *ProjectsService) ListProjectColumns(ctx context.Context, projectID int, opt *ListOptions) ([]*ProjectColumn, *Response, error) { u := fmt.Sprintf("projects/%v/columns", projectID) u, err := addOptions(u, opt) if err != nil { return nil, nil, err } req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) columns := []*ProjectColumn{} resp, err := s.client.Do(ctx, req, &columns) if err != nil { return nil, resp, err } return columns, resp, nil } // GetProjectColumn gets a column of a GitHub Project for a repo. // // GitHub API docs: https://developer.github.com/v3/projects/columns/#get-a-project-column func (s *ProjectsService) GetProjectColumn(ctx context.Context, id int) (*ProjectColumn, *Response, error) { u := fmt.Sprintf("projects/columns/%v", id) req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) column := &ProjectColumn{} resp, err := s.client.Do(ctx, req, column) if err != nil { return nil, resp, err } return column, resp, nil } // ProjectColumnOptions specifies the parameters to the // ProjectsService.CreateProjectColumn and // ProjectsService.UpdateProjectColumn methods. type ProjectColumnOptions struct { // The name of the project column. (Required for creation and update.) Name string `json:"name"` } // CreateProjectColumn creates a column for the specified (by number) project. // // GitHub API docs: https://developer.github.com/v3/projects/columns/#create-a-project-column func (s *ProjectsService) CreateProjectColumn(ctx context.Context, projectID int, opt *ProjectColumnOptions) (*ProjectColumn, *Response, error) { u := fmt.Sprintf("projects/%v/columns", projectID) req, err := s.client.NewRequest("POST", u, opt) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) column := &ProjectColumn{} resp, err := s.client.Do(ctx, req, column) if err != nil { return nil, resp, err } return column, resp, nil } // UpdateProjectColumn updates a column of a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/columns/#update-a-project-column func (s *ProjectsService) UpdateProjectColumn(ctx context.Context, columnID int, opt *ProjectColumnOptions) (*ProjectColumn, *Response, error) { u := fmt.Sprintf("projects/columns/%v", columnID) req, err := s.client.NewRequest("PATCH", u, opt) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) column := &ProjectColumn{} resp, err := s.client.Do(ctx, req, column) if err != nil { return nil, resp, err } return column, resp, nil } // DeleteProjectColumn deletes a column from a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/columns/#delete-a-project-column func (s *ProjectsService) DeleteProjectColumn(ctx context.Context, columnID int) (*Response, error) { u := fmt.Sprintf("projects/columns/%v", columnID) req, err := s.client.NewRequest("DELETE", u, nil) if err != nil { return nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) return s.client.Do(ctx, req, nil) } // ProjectColumnMoveOptions specifies the parameters to the // ProjectsService.MoveProjectColumn method. type ProjectColumnMoveOptions struct { // Position can be one of "first", "last", or "after:<column-id>", where // <column-id> is the ID of a column in the same project. (Required.) Position string `json:"position"` } // MoveProjectColumn moves a column within a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/columns/#move-a-project-column func (s *ProjectsService) MoveProjectColumn(ctx context.Context, columnID int, opt *ProjectColumnMoveOptions) (*Response, error) { u := fmt.Sprintf("projects/columns/%v/moves", columnID) req, err := s.client.NewRequest("POST", u, opt) if err != nil { return nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) return s.client.Do(ctx, req, nil) } // ProjectCard represents a card in a column of a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/cards/#get-a-project-card type ProjectCard struct { URL *string `json:"url,omitempty"` ColumnURL *string `json:"column_url,omitempty"` ContentURL *string `json:"content_url,omitempty"` ID *int `json:"id,omitempty"` Note *string `json:"note,omitempty"` Creator *User `json:"creator,omitempty"` CreatedAt *Timestamp `json:"created_at,omitempty"` UpdatedAt *Timestamp `json:"updated_at,omitempty"` // The following fields are only populated by Webhook events. ColumnID *int `json:"column_id,omitempty"` } // ListProjectCards lists the cards in a column of a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/cards/#list-project-cards func (s *ProjectsService) ListProjectCards(ctx context.Context, columnID int, opt *ListOptions) ([]*ProjectCard, *Response, error) { u := fmt.Sprintf("projects/columns/%v/cards", columnID) u, err := addOptions(u, opt) if err != nil { return nil, nil, err } req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) cards := []*ProjectCard{} resp, err := s.client.Do(ctx, req, &cards) if err != nil { return nil, resp, err } return cards, resp, nil } // GetProjectCard gets a card in a column of a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/cards/#get-a-project-card func (s *ProjectsService) GetProjectCard(ctx context.Context, columnID int) (*ProjectCard, *Response, error) { u := fmt.Sprintf("projects/columns/cards/%v", columnID) req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) card := &ProjectCard{} resp, err := s.client.Do(ctx, req, card) if err != nil { return nil, resp, err } return card, resp, nil } // ProjectCardOptions specifies the parameters to the // ProjectsService.CreateProjectCard and // ProjectsService.UpdateProjectCard methods. type ProjectCardOptions struct { // The note of the card. Note and ContentID are mutually exclusive. Note string `json:"note,omitempty"` // The ID (not Number) of the Issue to associate with this card. // Note and ContentID are mutually exclusive. ContentID int `json:"content_id,omitempty"` // The type of content to associate with this card. Possible values are: "Issue". ContentType string `json:"content_type,omitempty"` } // CreateProjectCard creates a card in the specified column of a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/cards/#create-a-project-card func (s *ProjectsService) CreateProjectCard(ctx context.Context, columnID int, opt *ProjectCardOptions) (*ProjectCard, *Response, error) { u := fmt.Sprintf("projects/columns/%v/cards", columnID) req, err := s.client.NewRequest("POST", u, opt) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) card := &ProjectCard{} resp, err := s.client.Do(ctx, req, card) if err != nil { return nil, resp, err } return card, resp, nil } // UpdateProjectCard updates a card of a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/cards/#update-a-project-card func (s *ProjectsService) UpdateProjectCard(ctx context.Context, cardID int, opt *ProjectCardOptions) (*ProjectCard, *Response, error) { u := fmt.Sprintf("projects/columns/cards/%v", cardID) req, err := s.client.NewRequest("PATCH", u, opt) if err != nil { return nil, nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) card := &ProjectCard{} resp, err := s.client.Do(ctx, req, card) if err != nil { return nil, resp, err } return card, resp, nil } // DeleteProjectCard deletes a card from a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/cards/#delete-a-project-card func (s *ProjectsService) DeleteProjectCard(ctx context.Context, cardID int) (*Response, error) { u := fmt.Sprintf("projects/columns/cards/%v", cardID) req, err := s.client.NewRequest("DELETE", u, nil) if err != nil { return nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) return s.client.Do(ctx, req, nil) } // ProjectCardMoveOptions specifies the parameters to the // ProjectsService.MoveProjectCard method. type ProjectCardMoveOptions struct { // Position can be one of "top", "bottom", or "after:<card-id>", where // <card-id> is the ID of a card in the same project. Position string `json:"position"` // ColumnID is the ID of a column in the same project. Note that ColumnID // is required when using Position "after:<card-id>" when that card is in // another column; otherwise it is optional. ColumnID int `json:"column_id,omitempty"` } // MoveProjectCard moves a card within a GitHub Project. // // GitHub API docs: https://developer.github.com/v3/projects/cards/#move-a-project-card func (s *ProjectsService) MoveProjectCard(ctx context.Context, cardID int, opt *ProjectCardMoveOptions) (*Response, error) { u := fmt.Sprintf("projects/columns/cards/%v/moves", cardID) req, err := s.client.NewRequest("POST", u, opt) if err != nil { return nil, err } // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeProjectsPreview) return s.client.Do(ctx, req, nil) }
/*- * Copyright (c) 2016,2017, F5 Networks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package appmanager import ( "encoding/json" "fmt" "io/ioutil" "reflect" "sort" "strconv" "strings" "sync" "time" log "github.com/F5Networks/k8s-bigip-ctlr/pkg/vlogger" "github.com/F5Networks/k8s-bigip-ctlr/pkg/writer" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" "k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/pkg/apis/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" watch "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes/scheme" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" rest "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" routeapi "github.com/openshift/origin/pkg/route/api" ) const DefaultConfigMapLabel = "f5type in (virtual-server)" const vsBindAddrAnnotation = "status.virtual-server.f5.com/ip" const ingressSslRedirect = "ingress.kubernetes.io/ssl-redirect" const ingressAllowHttp = "ingress.kubernetes.io/allow-http" const ingHealthMonitorAnnotation = "virtual-server.f5.com/health" type ResourceMap map[int32][]*ResourceConfig type Manager struct { resources *Resources customProfiles CustomProfileStore irulesMap IRulesMap intDgMap InternalDataGroupMap kubeClient kubernetes.Interface restClientv1 rest.Interface restClientv1beta1 rest.Interface routeClientV1 rest.Interface configWriter writer.Writer initialState bool // Use internal node IPs useNodeInternal bool // Running in nodeport (or cluster) mode isNodePort bool // Mutex to control access to node data // FIXME: Simple synchronization for now, it remains to be determined if we'll // need something more complicated (channels, etc?) oldNodesMutex sync.Mutex // Nodes from previous iteration of node polling oldNodes []string // Mutex for all informers (for informer CRUD) informersMutex sync.Mutex // Mutex for irulesMap irulesMutex sync.Mutex // Mutex for intDgMap intDgMutex sync.Mutex // App informer support vsQueue workqueue.RateLimitingInterface appInformers map[string]*appInformer // Namespace informer support (namespace labels) nsQueue workqueue.RateLimitingInterface nsInformer cache.SharedIndexInformer // Event recorder broadcaster record.EventBroadcaster eventRecorder record.EventRecorder eventSource v1.EventSource // Route configurations routeConfig RouteConfig // Currently configured node label selector nodeLabelSelector string } // Struct to allow NewManager to receive all or only specific parameters. type Params struct { KubeClient kubernetes.Interface restClient rest.Interface // package local for unit testing only RouteClientV1 rest.Interface ConfigWriter writer.Writer UseNodeInternal bool IsNodePort bool RouteConfig RouteConfig InitialState bool // Unit testing only EventRecorder record.EventRecorder // Unit testing only NodeLabelSelector string } // Configuration options for Routes in OpenShift type RouteConfig struct { RouteVSAddr string RouteLabel string } // Create and return a new app manager that meets the Manager interface func NewManager(params *Params) *Manager { vsQueue := workqueue.NewNamedRateLimitingQueue( workqueue.DefaultControllerRateLimiter(), "virtual-server-controller") nsQueue := workqueue.NewNamedRateLimitingQueue( workqueue.DefaultControllerRateLimiter(), "namespace-controller") manager := Manager{ resources: NewResources(), customProfiles: NewCustomProfiles(), irulesMap: make(IRulesMap), intDgMap: make(InternalDataGroupMap), kubeClient: params.KubeClient, restClientv1: params.restClient, restClientv1beta1: params.restClient, routeClientV1: params.RouteClientV1, configWriter: params.ConfigWriter, useNodeInternal: params.UseNodeInternal, isNodePort: params.IsNodePort, initialState: params.InitialState, eventRecorder: params.EventRecorder, routeConfig: params.RouteConfig, nodeLabelSelector: params.NodeLabelSelector, vsQueue: vsQueue, nsQueue: nsQueue, appInformers: make(map[string]*appInformer), } if nil != manager.kubeClient && nil == manager.restClientv1 { // This is the normal production case, but need the checks for unit tests. manager.restClientv1 = manager.kubeClient.Core().RESTClient() } if nil != manager.kubeClient && nil == manager.restClientv1beta1 { // This is the normal production case, but need the checks for unit tests. manager.restClientv1beta1 = manager.kubeClient.Extensions().RESTClient() } manager.eventSource = v1.EventSource{Component: "k8s-bigip-ctlr"} manager.broadcaster = record.NewBroadcaster() if nil == manager.eventRecorder { manager.eventRecorder = manager.broadcaster.NewRecorder(scheme.Scheme, manager.eventSource) } return &manager } func (appMgr *Manager) loadDefaultCert( namespace, serverName, vsName string, ) (*ProfileRef, bool) { // OpenShift will put the default server SSL cert on each pod. We create a // server SSL profile for it and associate it to any reencrypt routes that // have not explicitly set a certificate. profileName := "openshift_route_cluster_default-server-ssl" profile := ProfileRef{ Name: profileName, Partition: DEFAULT_PARTITION, Context: customProfileServer, } appMgr.customProfiles.Lock() defer appMgr.customProfiles.Unlock() skey := secretKey{Name: profileName, Namespace: namespace} _, found := appMgr.customProfiles.profs[skey] if !found { path := "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt" data, err := ioutil.ReadFile(path) if nil != err { log.Errorf("Unable to load default cluster certificate '%v': %v", path, err) return nil, false } appMgr.customProfiles.profs[skey] = NewCustomProfile( profile, string(data), "", // no key serverName, vsName, appMgr.customProfiles, ) } return &profile, !found } func (appMgr *Manager) addIRule(name, partition, rule string) { appMgr.irulesMutex.Lock() defer appMgr.irulesMutex.Unlock() key := nameRef{ Name: name, Partition: partition, } appMgr.irulesMap[key] = NewIRule(name, partition, rule) } func (appMgr *Manager) addInternalDataGroup(name, partition string) { appMgr.intDgMutex.Lock() defer appMgr.intDgMutex.Unlock() key := nameRef{ Name: name, Partition: partition, } appMgr.intDgMap[key] = NewInternalDataGroup(name, partition) } func (appMgr *Manager) watchingAllNamespacesLocked() bool { if 0 == len(appMgr.appInformers) { // Not watching any namespaces. return false } _, watchingAll := appMgr.appInformers[""] return watchingAll } func (appMgr *Manager) AddNamespace( namespace string, cfgMapSelector labels.Selector, resyncPeriod time.Duration, ) error { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() _, err := appMgr.addNamespaceLocked(namespace, cfgMapSelector, resyncPeriod) return err } func (appMgr *Manager) addNamespaceLocked( namespace string, cfgMapSelector labels.Selector, resyncPeriod time.Duration, ) (*appInformer, error) { if appMgr.watchingAllNamespacesLocked() { return nil, fmt.Errorf( "Cannot add additional namespaces when already watching all.") } if len(appMgr.appInformers) > 0 && "" == namespace { return nil, fmt.Errorf( "Cannot watch all namespaces when already watching specific ones.") } var appInf *appInformer if appInf, found := appMgr.appInformers[namespace]; found { return appInf, nil } appInf = appMgr.newAppInformer(namespace, cfgMapSelector, resyncPeriod) appMgr.appInformers[namespace] = appInf return appInf, nil } func (appMgr *Manager) removeNamespace(namespace string) error { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() err := appMgr.removeNamespaceLocked(namespace) return err } func (appMgr *Manager) removeNamespaceLocked(namespace string) error { if _, found := appMgr.appInformers[namespace]; !found { return fmt.Errorf("No informers exist for namespace %v\n", namespace) } delete(appMgr.appInformers, namespace) return nil } func (appMgr *Manager) AddNamespaceLabelInformer( labelSelector labels.Selector, resyncPeriod time.Duration, ) error { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() if nil != appMgr.nsInformer { return fmt.Errorf("Already have a namespace label informer added.") } if 0 != len(appMgr.appInformers) { return fmt.Errorf("Cannot set a namespace label informer when informers " + "have been setup for one or more namespaces.") } appMgr.nsInformer = cache.NewSharedIndexInformer( newListWatchWithLabelSelector( appMgr.restClientv1, "namespaces", "", labelSelector, ), &v1.Namespace{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) appMgr.nsInformer.AddEventHandlerWithResyncPeriod( &cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { appMgr.enqueueNamespace(obj) }, UpdateFunc: func(old, cur interface{}) { appMgr.enqueueNamespace(cur) }, DeleteFunc: func(obj interface{}) { appMgr.enqueueNamespace(obj) }, }, resyncPeriod, ) return nil } func (appMgr *Manager) enqueueNamespace(obj interface{}) { ns := obj.(*v1.Namespace) appMgr.nsQueue.Add(ns.ObjectMeta.Name) } func (appMgr *Manager) namespaceWorker() { for appMgr.processNextNamespace() { } } func (appMgr *Manager) processNextNamespace() bool { key, quit := appMgr.nsQueue.Get() if quit { return false } defer appMgr.nsQueue.Done(key) err := appMgr.syncNamespace(key.(string)) if err == nil { appMgr.nsQueue.Forget(key) return true } utilruntime.HandleError(fmt.Errorf("Sync %v failed with %v", key, err)) appMgr.nsQueue.AddRateLimited(key) return true } func (appMgr *Manager) syncNamespace(nsName string) error { startTime := time.Now() defer func() { endTime := time.Now() log.Debugf("Finished syncing namespace %+v (%v)", nsName, endTime.Sub(startTime)) }() _, exists, err := appMgr.nsInformer.GetIndexer().GetByKey(nsName) if nil != err { log.Warningf("Error looking up namespace '%v': %v\n", nsName, err) return err } appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() appInf, found := appMgr.getNamespaceInformerLocked(nsName) if exists && found { return nil } if exists { // exists but not found in informers map, add cfgMapSelector, err := labels.Parse(DefaultConfigMapLabel) if err != nil { return fmt.Errorf("Failed to parse Label Selector string: %v", err) } appInf, err = appMgr.addNamespaceLocked(nsName, cfgMapSelector, 0) if err != nil { return fmt.Errorf("Failed to add informers for namespace %v: %v", nsName, err) } appInf.start() appInf.waitForCacheSync() } else { // does not exist but found in informers map, delete // Clean up all resources that reference a removed namespace appInf.stopInformers() appMgr.removeNamespaceLocked(nsName) appMgr.resources.Lock() defer appMgr.resources.Unlock() rsDeleted := 0 appMgr.resources.ForEach(func(key serviceKey, cfg *ResourceConfig) { if key.Namespace == nsName { if appMgr.resources.Delete(key, "") { rsDeleted += 1 } } }) if rsDeleted > 0 { appMgr.outputConfigLocked() } } return nil } func (appMgr *Manager) GetWatchedNamespaces() []string { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() var namespaces []string for k, _ := range appMgr.appInformers { namespaces = append(namespaces, k) } return namespaces } func (appMgr *Manager) GetNamespaceLabelInformer() cache.SharedIndexInformer { return appMgr.nsInformer } type serviceQueueKey struct { Namespace string ServiceName string } type appInformer struct { namespace string cfgMapInformer cache.SharedIndexInformer svcInformer cache.SharedIndexInformer endptInformer cache.SharedIndexInformer ingInformer cache.SharedIndexInformer routeInformer cache.SharedIndexInformer stopCh chan struct{} } func (appMgr *Manager) newAppInformer( namespace string, cfgMapSelector labels.Selector, resyncPeriod time.Duration, ) *appInformer { appInf := appInformer{ namespace: namespace, stopCh: make(chan struct{}), cfgMapInformer: cache.NewSharedIndexInformer( newListWatchWithLabelSelector( appMgr.restClientv1, "configmaps", namespace, cfgMapSelector, ), &v1.ConfigMap{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ), svcInformer: cache.NewSharedIndexInformer( newListWatchWithLabelSelector( appMgr.restClientv1, "services", namespace, labels.Everything(), ), &v1.Service{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ), endptInformer: cache.NewSharedIndexInformer( newListWatchWithLabelSelector( appMgr.restClientv1, "endpoints", namespace, labels.Everything(), ), &v1.Endpoints{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ), ingInformer: cache.NewSharedIndexInformer( newListWatchWithLabelSelector( appMgr.restClientv1beta1, "ingresses", namespace, labels.Everything(), ), &v1beta1.Ingress{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ), } if nil != appMgr.routeClientV1 { var label labels.Selector var err error if len(appMgr.routeConfig.RouteLabel) == 0 { label = labels.Everything() } else { label, err = labels.Parse(appMgr.routeConfig.RouteLabel) if err != nil { log.Errorf("Failed to parse Label Selector string: %v", err) } } appInf.routeInformer = cache.NewSharedIndexInformer( newListWatchWithLabelSelector( appMgr.routeClientV1, "routes", namespace, label, ), &routeapi.Route{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) } appInf.cfgMapInformer.AddEventHandlerWithResyncPeriod( &cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { appMgr.enqueueConfigMap(obj) }, UpdateFunc: func(old, cur interface{}) { appMgr.enqueueConfigMap(cur) }, DeleteFunc: func(obj interface{}) { appMgr.enqueueConfigMap(obj) }, }, resyncPeriod, ) appInf.svcInformer.AddEventHandlerWithResyncPeriod( &cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { appMgr.enqueueService(obj) }, UpdateFunc: func(old, cur interface{}) { appMgr.enqueueService(cur) }, DeleteFunc: func(obj interface{}) { appMgr.enqueueService(obj) }, }, resyncPeriod, ) appInf.endptInformer.AddEventHandlerWithResyncPeriod( &cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { appMgr.enqueueEndpoints(obj) }, UpdateFunc: func(old, cur interface{}) { appMgr.enqueueEndpoints(cur) }, DeleteFunc: func(obj interface{}) { appMgr.enqueueEndpoints(obj) }, }, resyncPeriod, ) appInf.ingInformer.AddEventHandlerWithResyncPeriod( &cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { appMgr.enqueueIngress(obj) }, UpdateFunc: func(old, cur interface{}) { appMgr.enqueueIngress(cur) }, DeleteFunc: func(obj interface{}) { appMgr.enqueueIngress(obj) }, }, resyncPeriod, ) if nil != appMgr.routeClientV1 { appInf.routeInformer.AddEventHandlerWithResyncPeriod( &cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { appMgr.enqueueRoute(obj) }, UpdateFunc: func(old, cur interface{}) { appMgr.enqueueRoute(cur) }, DeleteFunc: func(obj interface{}) { appMgr.enqueueRoute(obj) }, }, resyncPeriod, ) } return &appInf } func newListWatchWithLabelSelector( c cache.Getter, resource string, namespace string, labelSelector labels.Selector, ) cache.ListerWatcher { listFunc := func(options metav1.ListOptions) (runtime.Object, error) { return c.Get(). Namespace(namespace). Resource(resource). VersionedParams(&options, metav1.ParameterCodec). LabelsSelectorParam(labelSelector). Do(). Get() } watchFunc := func(options metav1.ListOptions) (watch.Interface, error) { return c.Get(). Prefix("watch"). Namespace(namespace). Resource(resource). VersionedParams(&options, metav1.ParameterCodec). LabelsSelectorParam(labelSelector). Watch() } return &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} } func (appMgr *Manager) enqueueConfigMap(obj interface{}) { if ok, keys := appMgr.checkValidConfigMap(obj); ok { for _, key := range keys { appMgr.vsQueue.Add(*key) } } } func (appMgr *Manager) enqueueService(obj interface{}) { if ok, keys := appMgr.checkValidService(obj); ok { for _, key := range keys { appMgr.vsQueue.Add(*key) } } } func (appMgr *Manager) enqueueEndpoints(obj interface{}) { if ok, keys := appMgr.checkValidEndpoints(obj); ok { for _, key := range keys { appMgr.vsQueue.Add(*key) } } } func (appMgr *Manager) enqueueIngress(obj interface{}) { if ok, keys := appMgr.checkValidIngress(obj); ok { for _, key := range keys { appMgr.vsQueue.Add(*key) } } } func (appMgr *Manager) enqueueRoute(obj interface{}) { if ok, key := appMgr.checkValidRoute(obj); ok { appMgr.vsQueue.Add(*key) } } func (appMgr *Manager) getNamespaceInformer( ns string, ) (*appInformer, bool) { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() appInf, found := appMgr.getNamespaceInformerLocked(ns) return appInf, found } func (appMgr *Manager) getNamespaceInformerLocked( ns string, ) (*appInformer, bool) { toFind := ns if appMgr.watchingAllNamespacesLocked() { toFind = "" } appInf, found := appMgr.appInformers[toFind] return appInf, found } func (appInf *appInformer) start() { go appInf.cfgMapInformer.Run(appInf.stopCh) go appInf.svcInformer.Run(appInf.stopCh) go appInf.endptInformer.Run(appInf.stopCh) go appInf.ingInformer.Run(appInf.stopCh) if nil != appInf.routeInformer { go appInf.routeInformer.Run(appInf.stopCh) } } func (appInf *appInformer) waitForCacheSync() { if nil != appInf.routeInformer { cache.WaitForCacheSync( appInf.stopCh, appInf.cfgMapInformer.HasSynced, appInf.svcInformer.HasSynced, appInf.endptInformer.HasSynced, appInf.ingInformer.HasSynced, appInf.routeInformer.HasSynced, ) } else { cache.WaitForCacheSync( appInf.stopCh, appInf.cfgMapInformer.HasSynced, appInf.svcInformer.HasSynced, appInf.endptInformer.HasSynced, appInf.ingInformer.HasSynced, ) } } func (appInf *appInformer) stopInformers() { close(appInf.stopCh) } func (appMgr *Manager) IsNodePort() bool { return appMgr.isNodePort } func (appMgr *Manager) UseNodeInternal() bool { return appMgr.useNodeInternal } func (appMgr *Manager) ConfigWriter() writer.Writer { return appMgr.configWriter } func (appMgr *Manager) Run(stopCh <-chan struct{}) { go appMgr.runImpl(stopCh) } func (appMgr *Manager) runImpl(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer appMgr.vsQueue.ShutDown() defer appMgr.nsQueue.ShutDown() appMgr.addIRule(httpRedirectIRuleName, DEFAULT_PARTITION, httpRedirectIRule(DEFAULT_HTTPS_PORT)) if nil != appMgr.routeClientV1 { appMgr.addIRule( sslPassthroughIRuleName, DEFAULT_PARTITION, sslPassthroughIRule()) appMgr.addInternalDataGroup(passthroughHostsDgName, DEFAULT_PARTITION) appMgr.addInternalDataGroup(reencryptHostsDgName, DEFAULT_PARTITION) } if nil != appMgr.nsInformer { // Using one worker for namespace label changes. appMgr.startAndSyncNamespaceInformer(stopCh) go wait.Until(appMgr.namespaceWorker, time.Second, stopCh) } appMgr.startAndSyncAppInformers() // Using only one virtual server worker currently. go wait.Until(appMgr.virtualServerWorker, time.Second, stopCh) <-stopCh appMgr.stopAppInformers() } func (appMgr *Manager) startAndSyncNamespaceInformer(stopCh <-chan struct{}) { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() go appMgr.nsInformer.Run(stopCh) cache.WaitForCacheSync(stopCh, appMgr.nsInformer.HasSynced) } func (appMgr *Manager) startAndSyncAppInformers() { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() appMgr.startAppInformersLocked() appMgr.waitForCacheSyncLocked() } func (appMgr *Manager) startAppInformersLocked() { for _, appInf := range appMgr.appInformers { appInf.start() } } func (appMgr *Manager) waitForCacheSync() { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() appMgr.waitForCacheSyncLocked() } func (appMgr *Manager) waitForCacheSyncLocked() { for _, appInf := range appMgr.appInformers { appInf.waitForCacheSync() } } func (appMgr *Manager) stopAppInformers() { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() for _, appInf := range appMgr.appInformers { appInf.stopInformers() } } func (appMgr *Manager) virtualServerWorker() { for appMgr.processNextVirtualServer() { } } func (appMgr *Manager) processNextVirtualServer() bool { key, quit := appMgr.vsQueue.Get() if quit { // The controller is shutting down. return false } defer appMgr.vsQueue.Done(key) err := appMgr.syncVirtualServer(key.(serviceQueueKey)) if err == nil { appMgr.vsQueue.Forget(key) return true } utilruntime.HandleError(fmt.Errorf("Sync %v failed with %v", key, err)) appMgr.vsQueue.AddRateLimited(key) return true } type vsSyncStats struct { vsFound int vsUpdated int vsDeleted int cpUpdated int dgUpdated int } func (appMgr *Manager) syncVirtualServer(sKey serviceQueueKey) error { startTime := time.Now() defer func() { endTime := time.Now() log.Debugf("Finished syncing virtual servers %+v (%v)", sKey, endTime.Sub(startTime)) }() // Get the informers for the namespace. This will tell us if we care about // this item. appInf, haveNamespace := appMgr.getNamespaceInformer(sKey.Namespace) if !haveNamespace { // This shouldn't happen as the namespace is checked for every item before // it is added to the queue, but issue a warning if it does. log.Warningf( "Received an update for an item from an un-watched namespace %v", sKey.Namespace) return nil } // Lookup the service svcKey := sKey.Namespace + "/" + sKey.ServiceName obj, svcFound, err := appInf.svcInformer.GetIndexer().GetByKey(svcKey) if nil != err { // Returning non-nil err will re-queue this item with rate-limiting. log.Warningf("Error looking up service '%v': %v\n", svcKey, err) return err } // Use a map to allow ports in the service to be looked up quickly while // looping through the ConfigMaps. The value is not currently used. svcPortMap := make(map[int32]bool) var svc *v1.Service if svcFound { svc = obj.(*v1.Service) for _, portSpec := range svc.Spec.Ports { svcPortMap[portSpec.Port] = false } } // rsMap stores all resources currently in Resources matching sKey, indexed by port rsMap := appMgr.getResourcesForKey(sKey) var stats vsSyncStats err = appMgr.syncConfigMaps(&stats, sKey, rsMap, svcPortMap, svc, appInf) if nil != err { return err } err = appMgr.syncIngresses(&stats, sKey, rsMap, svcPortMap, svc, appInf) if nil != err { return err } if nil != appInf.routeInformer { err = appMgr.syncRoutes(&stats, sKey, rsMap, svcPortMap, svc, appInf) if nil != err { return err } } if len(rsMap) > 0 { // We get here when there are ports defined in the service that don't // have a corresponding config map. stats.vsDeleted = appMgr.deleteUnusedResources(sKey, rsMap) appMgr.deleteUnusedRoutes(sKey.Namespace) } log.Debugf("Updated %v of %v virtual server configs, deleted %v", stats.vsUpdated, stats.vsFound, stats.vsDeleted) // delete any custom profiles that are no longer referenced appMgr.deleteUnusedProfiles(sKey.Namespace) if stats.vsUpdated > 0 || stats.vsDeleted > 0 || stats.cpUpdated > 0 || stats.dgUpdated > 0 { appMgr.outputConfig() } else if appMgr.vsQueue.Len() == 0 && appMgr.nsQueue.Len() == 0 { appMgr.resources.Lock() defer appMgr.resources.Unlock() if !appMgr.initialState { appMgr.outputConfigLocked() } } return nil } func (appMgr *Manager) syncConfigMaps( stats *vsSyncStats, sKey serviceQueueKey, rsMap ResourceMap, svcPortMap map[int32]bool, svc *v1.Service, appInf *appInformer, ) error { cfgMapsByIndex, err := appInf.cfgMapInformer.GetIndexer().ByIndex( "namespace", sKey.Namespace) if nil != err { log.Warningf("Unable to list config maps for namespace '%v': %v", sKey.Namespace, err) return err } for _, obj := range cfgMapsByIndex { // We need to look at all config maps in the store, parse the data blob, // and see if it belongs to the service that has changed. cm := obj.(*v1.ConfigMap) if cm.ObjectMeta.Namespace != sKey.Namespace { continue } rsCfg, err := parseConfigMap(cm) if nil != err { // Ignore this config map for the time being. When the user updates it // so that it is valid it will be requeued. fmt.Errorf("Error parsing ConfigMap %v_%v", cm.ObjectMeta.Namespace, cm.ObjectMeta.Name) continue } // Check if SSLProfile(s) are contained in Secrets for _, profile := range rsCfg.Virtual.GetFrontendSslProfileNames() { // Check if profile is contained in a Secret secret, err := appMgr.kubeClient.Core().Secrets(cm.ObjectMeta.Namespace). Get(profile, metav1.GetOptions{}) if err != nil { // No secret, so we assume the profile is a BIG-IP default log.Infof("Couldn't find Secret with name '%s', parsing secretName as path.", profile) continue } err, updated := appMgr.handleSslProfile(rsCfg, secret, cm.ObjectMeta.Namespace) if err != nil { log.Warningf("%v", err) continue } if updated { stats.cpUpdated += 1 } // Replace the current stored sslProfile with a correctly formatted // profile (since this profile is just a secret name) rsCfg.Virtual.RemoveFrontendSslProfileName(profile) secretName := formatIngressSslProfileName( rsCfg.Virtual.Partition + "/" + profile) rsCfg.Virtual.AddFrontendSslProfileName(secretName) } rsName := rsCfg.Virtual.VirtualServerName if ok, found, updated := appMgr.handleConfigForType( rsCfg, sKey, rsMap, rsName, svcPortMap, svc, appInf, ""); !ok { stats.vsUpdated += updated continue } else { stats.vsFound += found stats.vsUpdated += updated } // Set a status annotation to contain the virtualAddress bindAddr if rsCfg.Virtual.IApp == "" && rsCfg.Virtual.VirtualAddress != nil && rsCfg.Virtual.VirtualAddress.BindAddr != "" { appMgr.setBindAddrAnnotation(cm, sKey, rsCfg) } } return nil } func (appMgr *Manager) syncIngresses( stats *vsSyncStats, sKey serviceQueueKey, rsMap ResourceMap, svcPortMap map[int32]bool, svc *v1.Service, appInf *appInformer, ) error { ingByIndex, err := appInf.ingInformer.GetIndexer().ByIndex( "namespace", sKey.Namespace) if nil != err { log.Warningf("Unable to list ingresses for namespace '%v': %v", sKey.Namespace, err) return err } for _, obj := range ingByIndex { // We need to look at all ingresses in the store, parse the data blob, // and see if it belongs to the service that has changed. ing := obj.(*v1beta1.Ingress) if ing.ObjectMeta.Namespace != sKey.Namespace { continue } for _, portStruct := range appMgr.virtualPorts(ing) { rsCfg := createRSConfigFromIngress(ing, sKey.Namespace, appInf.svcInformer.GetIndexer(), portStruct) if rsCfg == nil { // Currently, an error is returned only if the Ingress is one we // do not care about continue } // Handle TLS configuration updated := appMgr.handleIngressTls(rsCfg, ing) if updated { stats.cpUpdated += 1 } // Handle Ingress health monitors rsName := rsCfg.Virtual.VirtualServerName hmStr, found := ing.ObjectMeta.Annotations[ingHealthMonitorAnnotation] if found { var monitors IngressHealthMonitors err := json.Unmarshal([]byte(hmStr), &monitors) if err != nil { msg := fmt.Sprintf( "Unable to parse health monitor JSON array '%v': %v", hmStr, err) log.Errorf("%s", msg) appMgr.recordIngressEvent(ing, "InvalidData", msg, rsName) } else { if nil != ing.Spec.Backend { appMgr.handleSingleServiceHealthMonitors( rsName, rsCfg, ing, monitors) } else { appMgr.handleMultiServiceHealthMonitors( rsName, rsCfg, ing, monitors) } } rsCfg.SortMonitors() } // make sure all policies across configs for this Ingress match each other appMgr.resources.Lock() cfgs, keys := appMgr.resources.GetAllWithName(rsCfg.Virtual.VirtualServerName) for i, cfg := range cfgs { for _, policy := range rsCfg.Policies { if policy.Name == rsCfg.Virtual.VirtualServerName { cfg.SetPolicy(policy) } } appMgr.resources.Assign(keys[i], rsCfg.Virtual.VirtualServerName, cfg) } appMgr.resources.Unlock() if ok, found, updated := appMgr.handleConfigForType( rsCfg, sKey, rsMap, rsName, svcPortMap, svc, appInf, ""); !ok { stats.vsUpdated += updated continue } else { if updated > 0 && !appMgr.processAllMultiSvc(len(rsCfg.Pools), rsCfg.Virtual.VirtualServerName) { updated -= 1 } stats.vsFound += found stats.vsUpdated += updated if updated > 0 { msg := fmt.Sprintf( "Created a ResourceConfig '%v' for the Ingress.", rsCfg.Virtual.VirtualServerName) appMgr.recordIngressEvent(ing, "ResourceConfigured", msg, "") } } // Set the Ingress Status IP address appMgr.setIngressStatus(ing, rsCfg) } } return nil } func (appMgr *Manager) syncRoutes( stats *vsSyncStats, sKey serviceQueueKey, rsMap ResourceMap, svcPortMap map[int32]bool, svc *v1.Service, appInf *appInformer, ) error { routeByIndex, err := appInf.getOrderedRoutes(sKey.Namespace) if nil != err { log.Warningf("Unable to list routes for namespace '%v': %v", sKey.Namespace, err) return err } // Rebuild all internal data groups for routes as we process each dgMap := make(InternalDataGroupMap) for _, route := range routeByIndex { // We need to look at all routes in the store, parse the data blob, // and see if it belongs to the service that has changed. if nil != route.Spec.TLS { // The information stored in the internal data groups can span multiple // namespaces, so we need to keep them updated with all current routes // regardless of anything that happens below. switch route.Spec.TLS.Termination { case routeapi.TLSTerminationPassthrough: updateDataGroupForPassthroughRoute(route, DEFAULT_PARTITION, dgMap) case routeapi.TLSTerminationReencrypt: updateDataGroupForReencryptRoute(route, DEFAULT_PARTITION, dgMap) } } if route.ObjectMeta.Namespace != sKey.Namespace { continue } pStructs := []portStruct{{protocol: "http", port: DEFAULT_HTTP_PORT}, {protocol: "https", port: DEFAULT_HTTPS_PORT}} for _, ps := range pStructs { rsCfg, err := createRSConfigFromRoute(route, *appMgr.resources, appMgr.routeConfig, ps) if err != nil { // We return err if there was an error creating a rule log.Warningf("%v", err) continue } rsName := rsCfg.Virtual.VirtualServerName if ok, found, updated := appMgr.handleConfigForType( &rsCfg, sKey, rsMap, rsName, svcPortMap, svc, appInf, route.Spec.To.Name); !ok { stats.vsUpdated += updated continue } else { stats.vsFound += found stats.vsUpdated += updated } // We store this same config on every route that has the same protocol, but it is only // written to the BIG-IP once. This block guarantees that all of these configs // are in the same state. appMgr.resources.Lock() cfgs, keys := appMgr.resources.GetAllWithName(rsCfg.Virtual.VirtualServerName) for i, cfg := range cfgs { if cfg.Virtual.Partition == rsCfg.Virtual.Partition && !reflect.DeepEqual(cfg, rsCfg) { cfg = &rsCfg appMgr.resources.Assign(keys[i], cfg.Virtual.VirtualServerName, cfg) } } appMgr.resources.Unlock() // TLS Cert/Key if nil != route.Spec.TLS && rsCfg.Virtual.VirtualAddress.Port == DEFAULT_HTTPS_PORT { switch route.Spec.TLS.Termination { case routeapi.TLSTerminationEdge: appMgr.setClientSslProfile(stats, sKey, &rsCfg, route) case routeapi.TLSTerminationReencrypt: appMgr.setClientSslProfile(stats, sKey, &rsCfg, route) appMgr.setServerSslProfile(stats, sKey, &rsCfg, route) } } } } // Update internal data groups for routes if changed appMgr.updateRouteDataGroups(stats, dgMap) return nil } func (appMgr *Manager) setClientSslProfile( stats *vsSyncStats, sKey serviceQueueKey, rsCfg *ResourceConfig, route *routeapi.Route, ) { profileName := "Common/clientssl" if "" != route.Spec.TLS.Certificate && "" != route.Spec.TLS.Key { profile := ProfileRef{ Name: route.ObjectMeta.Name + "-https-cert", Partition: rsCfg.Virtual.Partition, Context: customProfileClient, } cp := NewCustomProfile( profile, route.Spec.TLS.Certificate, route.Spec.TLS.Key, route.Spec.Host, rsCfg.Virtual.VirtualServerName, appMgr.customProfiles, ) skey := secretKey{ Name: cp.Name, Namespace: sKey.Namespace, ResourceName: rsCfg.Virtual.VirtualServerName, } appMgr.customProfiles.Lock() defer appMgr.customProfiles.Unlock() if prof, ok := appMgr.customProfiles.profs[skey]; ok { if !reflect.DeepEqual(prof, cp) { stats.cpUpdated += 1 } } appMgr.customProfiles.profs[skey] = cp profileName = fmt.Sprintf("%s/%s", cp.Partition, cp.Name) } rsCfg.Virtual.AddFrontendSslProfileName(profileName) } func (appMgr *Manager) setServerSslProfile( stats *vsSyncStats, sKey serviceQueueKey, rsCfg *ResourceConfig, route *routeapi.Route, ) { if "" != route.Spec.TLS.DestinationCACertificate { // Create new SSL server profile with the provided CA Certificate. ruleName := formatRouteRuleName(route) profile := ProfileRef{ Name: ruleName + "-server-ssl", Partition: rsCfg.Virtual.Partition, Context: customProfileServer, } cp := NewCustomProfile( profile, route.Spec.TLS.DestinationCACertificate, "", // no key route.Spec.Host, rsCfg.Virtual.VirtualServerName, appMgr.customProfiles, ) skey := secretKey{ Name: cp.Name, Namespace: sKey.Namespace, ResourceName: rsCfg.Virtual.VirtualServerName, } appMgr.customProfiles.Lock() defer appMgr.customProfiles.Unlock() if prof, ok := appMgr.customProfiles.profs[skey]; ok { if !reflect.DeepEqual(prof, cp) { stats.cpUpdated += 1 } } appMgr.customProfiles.profs[skey] = cp rsCfg.Virtual.AddOrUpdateProfile(profile) } else { profile, added := appMgr.loadDefaultCert( sKey.Namespace, route.Spec.Host, rsCfg.Virtual.VirtualServerName, ) if nil != profile { rsCfg.Virtual.AddOrUpdateProfile(*profile) } if added { stats.cpUpdated += 1 } } } func getBooleanAnnotation( annotations map[string]string, key string, defaultValue bool, ) bool { val, found := annotations[key] if !found { return defaultValue } bVal, err := strconv.ParseBool(val) if nil != err { log.Errorf("Unable to parse boolean value '%v': %v", val, err) return defaultValue } return bVal } type secretKey struct { Name string Namespace string ResourceName string } // Return value is whether or not a custom profile was updated func (appMgr *Manager) handleIngressTls( rsCfg *ResourceConfig, ing *v1beta1.Ingress, ) bool { if 0 == len(ing.Spec.TLS) { // Nothing to do if no TLS section return false } if nil == rsCfg.Virtual.VirtualAddress || rsCfg.Virtual.VirtualAddress.BindAddr == "" { // Nothing to do for pool-only mode return false } var httpsPort int32 if port, ok := ing.ObjectMeta.Annotations["virtual-server.f5.com/https-port"]; ok == true { p, _ := strconv.ParseInt(port, 10, 32) httpsPort = int32(p) } else { httpsPort = DEFAULT_HTTPS_PORT } // If we are processing the HTTPS server, // then we don't need a redirect policy, only profiles if rsCfg.Virtual.VirtualAddress.Port == httpsPort { var cpUpdated, updateState bool for _, tls := range ing.Spec.TLS { // Check if profile is contained in a Secret secret, err := appMgr.kubeClient.Core().Secrets(ing.ObjectMeta.Namespace). Get(tls.SecretName, metav1.GetOptions{}) if err != nil { // No secret, so we assume the profile is a BIG-IP default log.Infof("Couldn't find Secret with name '%s': %s. Parsing secretName as path.", tls.SecretName, err) secretName := formatIngressSslProfileName(tls.SecretName) rsCfg.Virtual.AddFrontendSslProfileName(secretName) continue } err, cpUpdated = appMgr.handleSslProfile(rsCfg, secret, ing.ObjectMeta.Namespace) if err != nil { log.Warningf("%v", err) continue } updateState = updateState || cpUpdated secretName := formatIngressSslProfileName( rsCfg.Virtual.Partition + "/" + tls.SecretName) rsCfg.Virtual.AddFrontendSslProfileName(secretName) } return cpUpdated } // sslRedirect defaults to true, allowHttp defaults to false. sslRedirect := getBooleanAnnotation(ing.ObjectMeta.Annotations, ingressSslRedirect, true) allowHttp := getBooleanAnnotation(ing.ObjectMeta.Annotations, ingressAllowHttp, false) // ----------------------------------------------------------------- // | State | sslRedirect | allowHttp | Description | // ----------------------------------------------------------------- // | 1 | F | F | Just HTTPS, nothing on HTTP | // ----------------------------------------------------------------- // | 2 | T | F | HTTP redirects to HTTPS | // ----------------------------------------------------------------- // | 2 | T | T | Honor sslRedirect == true | // ----------------------------------------------------------------- // | 3 | F | T | Both HTTP and HTTPS | // ----------------------------------------------------------------- var rule *Rule var policyName string if sslRedirect { // State 2, set HTTP redirect iRule log.Debugf("TLS: Applying HTTP redirect iRule.") ruleName := fmt.Sprintf("/%s/%s", DEFAULT_PARTITION, httpRedirectIRuleName) if httpsPort != DEFAULT_HTTPS_PORT { ruleName = fmt.Sprintf("%s_%d", ruleName, httpsPort) appMgr.addIRule(ruleName, DEFAULT_PARTITION, httpRedirectIRule(httpsPort)) } rsCfg.Virtual.AddIRule(ruleName) } else if allowHttp { // State 3, do not apply any policy log.Debugf("TLS: Not applying any policies.") } if nil != rule && "" != policyName { policy := rsCfg.FindPolicy("forwarding") if nil == policy { policy = createPolicy(Rules{rule}, policyName, rsCfg.Virtual.Partition) } else { rule.Ordinal = len(policy.Rules) policy.Rules = append(policy.Rules, rule) } rsCfg.SetPolicy(*policy) } return false } func (appMgr *Manager) handleSslProfile( rsCfg *ResourceConfig, secret *v1.Secret, namespace string) (error, bool) { if _, ok := secret.Data["tls.crt"]; !ok { err := fmt.Errorf("Invalid Secret '%v': 'tls.crt' field not specified.", secret.ObjectMeta.Name) return err, false } if _, ok := secret.Data["tls.key"]; !ok { err := fmt.Errorf("Invalid Secret '%v': 'tls.key' field not specified.", secret.ObjectMeta.Name) return err, false } cp := CustomProfile{ Name: secret.ObjectMeta.Name, Partition: rsCfg.Virtual.Partition, Context: customProfileClient, Cert: string(secret.Data["tls.crt"]), Key: string(secret.Data["tls.key"]), } skey := secretKey{ Name: cp.Name, Namespace: namespace, ResourceName: rsCfg.Virtual.VirtualServerName, } appMgr.customProfiles.Lock() defer appMgr.customProfiles.Unlock() if prof, ok := appMgr.customProfiles.profs[skey]; ok { if !reflect.DeepEqual(prof, cp) { appMgr.customProfiles.profs[skey] = cp return nil, true } else { return nil, false } } appMgr.customProfiles.profs[skey] = cp return nil, false } type portStruct struct { protocol string port int32 } // Return the required ports for Ingress VS (depending on sslRedirect/allowHttp vals) func (appMgr *Manager) virtualPorts(ing *v1beta1.Ingress) []portStruct { var httpPort int32 var httpsPort int32 if port, ok := ing.ObjectMeta.Annotations["virtual-server.f5.com/http-port"]; ok == true { p, _ := strconv.ParseInt(port, 10, 32) httpPort = int32(p) } else { httpPort = DEFAULT_HTTP_PORT } if port, ok := ing.ObjectMeta.Annotations["virtual-server.f5.com/https-port"]; ok == true { p, _ := strconv.ParseInt(port, 10, 32) httpsPort = int32(p) } else { httpsPort = DEFAULT_HTTPS_PORT } // sslRedirect defaults to true, allowHttp defaults to false. sslRedirect := getBooleanAnnotation(ing.ObjectMeta.Annotations, ingressSslRedirect, true) allowHttp := getBooleanAnnotation(ing.ObjectMeta.Annotations, ingressAllowHttp, false) http := portStruct{ protocol: "http", port: httpPort, } https := portStruct{ protocol: "https", port: httpsPort, } var ports []portStruct if len(ing.Spec.TLS) > 0 { if sslRedirect || allowHttp { // States 2,3; both HTTP and HTTPS // 2 virtual servers needed ports = append(ports, http) ports = append(ports, https) } else { // State 1; HTTPS only ports = append(ports, https) } } else { // HTTP only, no TLS ports = append(ports, http) } return ports } // Common handling function for ConfigMaps, Ingresses, and Routes func (appMgr *Manager) handleConfigForType( rsCfg *ResourceConfig, sKey serviceQueueKey, rsMap ResourceMap, rsName string, svcPortMap map[int32]bool, svc *v1.Service, appInf *appInformer, currRouteSvc string, // Only used for Routes ) (bool, int, int) { vsFound := 0 vsUpdated := 0 var pool Pool found := false plIdx := 0 for i, pl := range rsCfg.Pools { if pl.ServiceName == sKey.ServiceName { found = true pool = pl plIdx = i break } } if !found { // If the current cfg has no pool for this service, // remove any pools associated with the service, // across all stored keys for the resource appMgr.resources.Lock() defer appMgr.resources.Unlock() cfgs, keys := appMgr.resources.GetAllWithName(rsName) for i, cfg := range cfgs { for j, pool := range cfg.Pools { if pool.ServiceName == sKey.ServiceName { copy(cfg.Pools[j:], cfg.Pools[j+1:]) cfg.Pools[len(cfg.Pools)-1] = Pool{} cfg.Pools = cfg.Pools[:len(cfg.Pools)-1] } } appMgr.resources.Assign(keys[i], rsName, cfg) } return false, vsFound, vsUpdated } svcKey := serviceKey{ Namespace: sKey.Namespace, ServiceName: pool.ServiceName, ServicePort: pool.ServicePort, } // Match, remove from rsMap so we don't delete it at the end. // In the case of Routes: If the svc of the currently processed route doesn't match // the svc in our serviceKey, then we don't want to delete it from the map (all routes // with the same protocol have the same VS name, so we don't want to ignore a route that // was actually deleted). cfgList := rsMap[pool.ServicePort] if currRouteSvc == "" || currRouteSvc == sKey.ServiceName { if len(cfgList) == 1 { delete(rsMap, pool.ServicePort) } else if len(cfgList) > 1 { for index, val := range cfgList { if val.Virtual.VirtualServerName == rsName { cfgList = append(cfgList[:index], cfgList[index+1:]...) } } rsMap[pool.ServicePort] = cfgList } } if _, ok := svcPortMap[pool.ServicePort]; !ok { log.Debugf("Process Service delete - name: %v namespace: %v", pool.ServiceName, svcKey.Namespace) log.Infof("Port '%v' for service '%v' was not found.", pool.ServicePort, pool.ServiceName) if appMgr.deactivateVirtualServer(svcKey, rsName, rsCfg, plIdx) { vsUpdated += 1 } } if nil == svc { // The service is gone, de-activate it in the config. log.Infof("Service '%v' has not been found.", pool.ServiceName) if appMgr.deactivateVirtualServer(svcKey, rsName, rsCfg, plIdx) { vsUpdated += 1 } // If this is an Ingress resource, add an event that the service wasn't found if strings.HasSuffix(rsName, "ingress") { msg := fmt.Sprintf("Service '%v' has not been found.", pool.ServiceName) appMgr.recordIngressEvent(nil, "ServiceNotFound", msg, rsName) } return false, vsFound, vsUpdated } // Update pool members. vsFound += 1 correctBackend := true var reason string var msg string if appMgr.IsNodePort() { correctBackend, reason, msg = appMgr.updatePoolMembersForNodePort(svc, svcKey, rsCfg, plIdx) } else { correctBackend, reason, msg = appMgr.updatePoolMembersForCluster(svc, svcKey, rsCfg, appInf, plIdx) } // This will only update the config if the vs actually changed. if appMgr.saveVirtualServer(svcKey, rsName, rsCfg) { vsUpdated += 1 // If this is an Ingress resource, add an event if there was a backend error if !correctBackend { if strings.HasSuffix(rsCfg.Virtual.VirtualServerName, "ingress") { appMgr.recordIngressEvent(nil, reason, msg, rsCfg.Virtual.VirtualServerName) } } } return true, vsFound, vsUpdated } func (appMgr *Manager) updatePoolMembersForNodePort( svc *v1.Service, svcKey serviceKey, rsCfg *ResourceConfig, index int, ) (bool, string, string) { if svc.Spec.Type == v1.ServiceTypeNodePort { for _, portSpec := range svc.Spec.Ports { if portSpec.Port == svcKey.ServicePort { log.Debugf("Service backend matched %+v: using node port %v", svcKey, portSpec.NodePort) rsCfg.MetaData.Active = true rsCfg.MetaData.NodePort = portSpec.NodePort rsCfg.Pools[index].Members = appMgr.getEndpointsForNodePort(portSpec.NodePort) } } return true, "", "" } else { msg := fmt.Sprintf("Requested service backend '%+v' not of NodePort type", svcKey.ServiceName) log.Debug(msg) return false, "IncorrectBackendServiceType", msg } } func (appMgr *Manager) updatePoolMembersForCluster( svc *v1.Service, sKey serviceKey, rsCfg *ResourceConfig, appInf *appInformer, index int, ) (bool, string, string) { svcKey := sKey.Namespace + "/" + sKey.ServiceName item, found, _ := appInf.endptInformer.GetStore().GetByKey(svcKey) if !found { msg := fmt.Sprintf("Endpoints for service '%v' not found!", svcKey) log.Debug(msg) return false, "EndpointsNotFound", msg } eps, _ := item.(*v1.Endpoints) for _, portSpec := range svc.Spec.Ports { if portSpec.Port == sKey.ServicePort { ipPorts := getEndpointsForService(portSpec.Name, eps) log.Debugf("Found endpoints for backend %+v: %v", sKey, ipPorts) rsCfg.MetaData.Active = true rsCfg.Pools[index].Members = ipPorts } } return true, "", "" } func (appMgr *Manager) deactivateVirtualServer( sKey serviceKey, rsName string, rsCfg *ResourceConfig, index int, ) bool { updateConfig := false appMgr.resources.Lock() defer appMgr.resources.Unlock() if rs, ok := appMgr.resources.Get(sKey, rsName); ok { rsCfg.MetaData.Active = false rsCfg.Pools[index].Members = nil if !reflect.DeepEqual(rs, rsCfg) { log.Debugf("Service delete matching backend %v %v deactivating config", sKey, rsName) updateConfig = true } } else { // We have a config map but not a server. Put in the virtual server from // the config map. updateConfig = true } if updateConfig { appMgr.resources.Assign(sKey, rsName, rsCfg) } return updateConfig } func (appMgr *Manager) saveVirtualServer( sKey serviceKey, rsName string, newRsCfg *ResourceConfig, ) bool { appMgr.resources.Lock() defer appMgr.resources.Unlock() if oldRsCfg, ok := appMgr.resources.Get(sKey, rsName); ok { if reflect.DeepEqual(oldRsCfg, newRsCfg) { // not changed, don't trigger a config write return false } log.Warningf("Overwriting existing entry for backend %+v", sKey) } appMgr.resources.Assign(sKey, rsName, newRsCfg) return true } func (appMgr *Manager) getResourcesForKey(sKey serviceQueueKey) ResourceMap { // Return a copy of what is stored in resources appMgr.resources.Lock() defer appMgr.resources.Unlock() rsMap := make(ResourceMap) appMgr.resources.ForEach(func(key serviceKey, cfg *ResourceConfig) { if key.Namespace == sKey.Namespace && key.ServiceName == sKey.ServiceName { rsMap[key.ServicePort] = append(rsMap[key.ServicePort], cfg) } }) return rsMap } func (appMgr *Manager) processAllMultiSvc(numPools int, rsName string) bool { // If multi-service and we haven't yet configured keys/cfgs for each service, // then we don't want to update appMgr.resources.Lock() defer appMgr.resources.Unlock() _, keys := appMgr.resources.GetAllWithName(rsName) if len(keys) != numPools { return false } return true } func (appMgr *Manager) deleteUnusedResources( sKey serviceQueueKey, rsMap ResourceMap) int { rsDeleted := 0 appMgr.resources.Lock() defer appMgr.resources.Unlock() for port, cfgList := range rsMap { tmpKey := serviceKey{ Namespace: sKey.Namespace, ServiceName: sKey.ServiceName, ServicePort: port, } for _, cfg := range cfgList { rsName := cfg.Virtual.VirtualServerName if appMgr.resources.Delete(tmpKey, rsName) { rsDeleted += 1 } } } return rsDeleted } // If a route is deleted, loop through other route configs and delete pools/rules/profiles // for the deleted route. func (appMgr *Manager) deleteUnusedRoutes(namespace string) { appMgr.resources.Lock() defer appMgr.resources.Unlock() var routeName string appMgr.resources.ForEach(func(key serviceKey, cfg *ResourceConfig) { if cfg.MetaData.ResourceType == "route" { for i, pool := range cfg.Pools { sKey := serviceKey{ ServiceName: pool.ServiceName, ServicePort: pool.ServicePort, Namespace: key.Namespace, } if _, ok := appMgr.resources.Get(sKey, cfg.Virtual.VirtualServerName); !ok { poolName := fmt.Sprintf("/%s/%s", cfg.Virtual.Partition, pool.Name) // Delete rule for _, pol := range cfg.Policies { if len(pol.Rules) == 1 { nr := nameRef{ Name: pol.Name, Partition: pol.Partition, } cfg.RemovePolicy(nr) continue } for i, rule := range pol.Rules { if len(rule.Actions) > 0 && rule.Actions[0].Pool == poolName { routeName = strings.Split(rule.Name, "_")[3] if i >= len(pol.Rules)-1 { pol.Rules = pol.Rules[:len(pol.Rules)-1] } else { copy(pol.Rules[i:], pol.Rules[i+1:]) pol.Rules[len(pol.Rules)-1] = &Rule{} pol.Rules = pol.Rules[:len(pol.Rules)-1] } cfg.SetPolicy(pol) } } } // Delete pool if i >= len(cfg.Pools)-1 { cfg.Pools = cfg.Pools[:len(cfg.Pools)-1] } else { copy(cfg.Pools[i:], cfg.Pools[i+1:]) cfg.Pools[len(cfg.Pools)-1] = Pool{} cfg.Pools = cfg.Pools[:len(cfg.Pools)-1] } // Delete profile if routeName != "" { profileName := fmt.Sprintf("%s/%s-https-cert", cfg.Virtual.Partition, routeName) cfg.Virtual.RemoveFrontendSslProfileName(profileName) } } } appMgr.resources.Assign(key, cfg.Virtual.VirtualServerName, cfg) } }) } func (appMgr *Manager) deleteUnusedProfiles(namespace string) { var found bool appMgr.customProfiles.Lock() defer appMgr.customProfiles.Unlock() for key, profile := range appMgr.customProfiles.profs { found = false appMgr.resources.ForEach(func(k serviceKey, rsCfg *ResourceConfig) { if key.ResourceName == rsCfg.Virtual.VirtualServerName && key.Namespace == namespace { if rsCfg.Virtual.ReferencesProfile(profile) { found = true } } }) if !found { delete(appMgr.customProfiles.profs, key) } } } func (appMgr *Manager) setBindAddrAnnotation( cm *v1.ConfigMap, sKey serviceQueueKey, rsCfg *ResourceConfig, ) { var doUpdate bool if cm.ObjectMeta.Annotations == nil { cm.ObjectMeta.Annotations = make(map[string]string) doUpdate = true } else if cm.ObjectMeta.Annotations[vsBindAddrAnnotation] != rsCfg.Virtual.VirtualAddress.BindAddr { doUpdate = true } if doUpdate { cm.ObjectMeta.Annotations[vsBindAddrAnnotation] = rsCfg.Virtual.VirtualAddress.BindAddr _, err := appMgr.kubeClient.CoreV1().ConfigMaps(sKey.Namespace).Update(cm) if nil != err { log.Warningf("Error when creating status IP annotation: %s", err) } else { log.Debugf("Updating ConfigMap %+v annotation - %v: %v", sKey, vsBindAddrAnnotation, rsCfg.Virtual.VirtualAddress.BindAddr) } } } func (appMgr *Manager) setIngressStatus( ing *v1beta1.Ingress, rsCfg *ResourceConfig, ) { // Set the ingress status to include the virtual IP lbIngress := v1.LoadBalancerIngress{IP: rsCfg.Virtual.VirtualAddress.BindAddr} if len(ing.Status.LoadBalancer.Ingress) == 0 { ing.Status.LoadBalancer.Ingress = append(ing.Status.LoadBalancer.Ingress, lbIngress) } else if ing.Status.LoadBalancer.Ingress[0].IP != rsCfg.Virtual.VirtualAddress.BindAddr { ing.Status.LoadBalancer.Ingress[0] = lbIngress } _, updateErr := appMgr.kubeClient.ExtensionsV1beta1(). Ingresses(ing.ObjectMeta.Namespace).UpdateStatus(ing) if nil != updateErr { // Multi-service causes the controller to try to update the status multiple times // at once. Ignore this error. if strings.Contains(updateErr.Error(), "object has been modified") { return } warning := fmt.Sprintf( "Error when setting Ingress status IP for virtual server %v: %v", rsCfg.Virtual.VirtualServerName, updateErr) log.Warning(warning) appMgr.recordIngressEvent(ing, "StatusIPError", warning, "") } } // This function expects either an Ingress resource or the name of a VS for an Ingress func (appMgr *Manager) recordIngressEvent(ing *v1beta1.Ingress, reason, message, rsName string) { var namespace string var name string if ing != nil { namespace = ing.ObjectMeta.Namespace } else { namespace = strings.Split(rsName, "_")[0] name = rsName[len(namespace)+1 : len(rsName)-len("-ingress")] } appMgr.broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{ Interface: appMgr.kubeClient.Core().Events(namespace)}) // If we aren't given an Ingress resource, we use the name to find it var err error if ing == nil { ing, err = appMgr.kubeClient.Extensions().Ingresses(namespace). Get(name, metav1.GetOptions{}) if nil != err { log.Warningf("Could not find Ingress resource '%v'.", name) return } } // Create the event appMgr.eventRecorder.Event(ing, v1.EventTypeNormal, reason, message) } func getEndpointsForService( portName string, eps *v1.Endpoints, ) []Member { var members []Member if eps == nil { return members } for _, subset := range eps.Subsets { for _, p := range subset.Ports { if portName == p.Name { for _, addr := range subset.Addresses { member := Member{ Address: addr.IP, Port: p.Port, Session: "user-enabled", } members = append(members, member) } } } } return members } func (appMgr *Manager) getEndpointsForNodePort( nodePort int32, ) []Member { nodes := appMgr.getNodesFromCache() var members []Member for _, v := range nodes { member := Member{ Address: v, Port: nodePort, Session: "user-enabled", } members = append(members, member) } return members } func handleConfigMapParseFailure( appMgr *Manager, cm *v1.ConfigMap, cfg *ResourceConfig, err error, ) bool { log.Warningf("Could not get config for ConfigMap: %v - %v", cm.ObjectMeta.Name, err) // If virtual server exists for invalid configmap, delete it var serviceName string var servicePort int32 if nil != cfg { if len(cfg.Pools) == 0 { serviceName = "" servicePort = 0 } else { serviceName = cfg.Pools[0].ServiceName servicePort = cfg.Pools[0].ServicePort } sKey := serviceKey{serviceName, servicePort, cm.ObjectMeta.Namespace} rsName := formatConfigMapVSName(cm) if _, ok := appMgr.resources.Get(sKey, rsName); ok { appMgr.resources.Lock() defer appMgr.resources.Unlock() appMgr.resources.Delete(sKey, rsName) delete(cm.ObjectMeta.Annotations, vsBindAddrAnnotation) appMgr.kubeClient.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(cm) log.Warningf("Deleted virtual server associated with ConfigMap: %v", cm.ObjectMeta.Name) return true } } return false } // Check for a change in Node state func (appMgr *Manager) ProcessNodeUpdate( obj interface{}, err error, ) { if nil != err { log.Warningf("Unable to get list of nodes, err=%+v", err) return } newNodes, err := appMgr.getNodeAddresses(obj) if nil != err { log.Warningf("Unable to get list of nodes, err=%+v", err) return } sort.Strings(newNodes) appMgr.resources.Lock() defer appMgr.resources.Unlock() appMgr.oldNodesMutex.Lock() defer appMgr.oldNodesMutex.Unlock() // Only check for updates once we are in our initial state if appMgr.initialState { // Compare last set of nodes with new one if !reflect.DeepEqual(newNodes, appMgr.oldNodes) { log.Infof("ProcessNodeUpdate: Change in Node state detected") appMgr.resources.ForEach(func(key serviceKey, cfg *ResourceConfig) { var members []Member for _, node := range newNodes { member := Member{ Address: node, Port: cfg.MetaData.NodePort, Session: "user-enabled", } members = append(members, member) } cfg.Pools[0].Members = members }) // Output the Big-IP config appMgr.outputConfigLocked() // Update node cache appMgr.oldNodes = newNodes } } else { // Initialize appMgr nodes on our first pass through appMgr.oldNodes = newNodes } } // Return a copy of the node cache func (appMgr *Manager) getNodesFromCache() []string { appMgr.oldNodesMutex.Lock() defer appMgr.oldNodesMutex.Unlock() nodes := make([]string, len(appMgr.oldNodes)) copy(nodes, appMgr.oldNodes) return nodes } // Get a list of Node addresses func (appMgr *Manager) getNodeAddresses( obj interface{}, ) ([]string, error) { nodes, ok := obj.([]v1.Node) if false == ok { return nil, fmt.Errorf("poll update unexpected type, interface is not []v1.Node") } addrs := []string{} var addrType v1.NodeAddressType if appMgr.UseNodeInternal() { addrType = v1.NodeInternalIP } else { addrType = v1.NodeExternalIP } isUnSchedulable := func(node v1.Node) bool { for _, t := range node.Spec.Taints { if v1.TaintEffectNoSchedule == t.Effect { return true } } return node.Spec.Unschedulable } for _, node := range nodes { if 0 == len(appMgr.nodeLabelSelector) && isUnSchedulable(node) { // Skip unschedulable nodes only when there isn't a node // selector continue } else { nodeAddrs := node.Status.Addresses for _, addr := range nodeAddrs { if addr.Type == addrType { addrs = append(addrs, addr.Address) } } } } return addrs, nil } Change Secret SSL profile log to be DEBUG Problem: The log informing users that the SSL profile name provided couldn't be found as a Secret and instead is used as a regular path was misleading. Users may think there was an error if a normal path is provided, even though there wasn't. Solution: Add this log into DEBUG rather than INFO, and clear up the message. /*- * Copyright (c) 2016,2017, F5 Networks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package appmanager import ( "encoding/json" "fmt" "io/ioutil" "reflect" "sort" "strconv" "strings" "sync" "time" log "github.com/F5Networks/k8s-bigip-ctlr/pkg/vlogger" "github.com/F5Networks/k8s-bigip-ctlr/pkg/writer" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" "k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/pkg/apis/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" watch "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes/scheme" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" rest "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" routeapi "github.com/openshift/origin/pkg/route/api" ) const DefaultConfigMapLabel = "f5type in (virtual-server)" const vsBindAddrAnnotation = "status.virtual-server.f5.com/ip" const ingressSslRedirect = "ingress.kubernetes.io/ssl-redirect" const ingressAllowHttp = "ingress.kubernetes.io/allow-http" const ingHealthMonitorAnnotation = "virtual-server.f5.com/health" type ResourceMap map[int32][]*ResourceConfig type Manager struct { resources *Resources customProfiles CustomProfileStore irulesMap IRulesMap intDgMap InternalDataGroupMap kubeClient kubernetes.Interface restClientv1 rest.Interface restClientv1beta1 rest.Interface routeClientV1 rest.Interface configWriter writer.Writer initialState bool // Use internal node IPs useNodeInternal bool // Running in nodeport (or cluster) mode isNodePort bool // Mutex to control access to node data // FIXME: Simple synchronization for now, it remains to be determined if we'll // need something more complicated (channels, etc?) oldNodesMutex sync.Mutex // Nodes from previous iteration of node polling oldNodes []string // Mutex for all informers (for informer CRUD) informersMutex sync.Mutex // Mutex for irulesMap irulesMutex sync.Mutex // Mutex for intDgMap intDgMutex sync.Mutex // App informer support vsQueue workqueue.RateLimitingInterface appInformers map[string]*appInformer // Namespace informer support (namespace labels) nsQueue workqueue.RateLimitingInterface nsInformer cache.SharedIndexInformer // Event recorder broadcaster record.EventBroadcaster eventRecorder record.EventRecorder eventSource v1.EventSource // Route configurations routeConfig RouteConfig // Currently configured node label selector nodeLabelSelector string } // Struct to allow NewManager to receive all or only specific parameters. type Params struct { KubeClient kubernetes.Interface restClient rest.Interface // package local for unit testing only RouteClientV1 rest.Interface ConfigWriter writer.Writer UseNodeInternal bool IsNodePort bool RouteConfig RouteConfig InitialState bool // Unit testing only EventRecorder record.EventRecorder // Unit testing only NodeLabelSelector string } // Configuration options for Routes in OpenShift type RouteConfig struct { RouteVSAddr string RouteLabel string } // Create and return a new app manager that meets the Manager interface func NewManager(params *Params) *Manager { vsQueue := workqueue.NewNamedRateLimitingQueue( workqueue.DefaultControllerRateLimiter(), "virtual-server-controller") nsQueue := workqueue.NewNamedRateLimitingQueue( workqueue.DefaultControllerRateLimiter(), "namespace-controller") manager := Manager{ resources: NewResources(), customProfiles: NewCustomProfiles(), irulesMap: make(IRulesMap), intDgMap: make(InternalDataGroupMap), kubeClient: params.KubeClient, restClientv1: params.restClient, restClientv1beta1: params.restClient, routeClientV1: params.RouteClientV1, configWriter: params.ConfigWriter, useNodeInternal: params.UseNodeInternal, isNodePort: params.IsNodePort, initialState: params.InitialState, eventRecorder: params.EventRecorder, routeConfig: params.RouteConfig, nodeLabelSelector: params.NodeLabelSelector, vsQueue: vsQueue, nsQueue: nsQueue, appInformers: make(map[string]*appInformer), } if nil != manager.kubeClient && nil == manager.restClientv1 { // This is the normal production case, but need the checks for unit tests. manager.restClientv1 = manager.kubeClient.Core().RESTClient() } if nil != manager.kubeClient && nil == manager.restClientv1beta1 { // This is the normal production case, but need the checks for unit tests. manager.restClientv1beta1 = manager.kubeClient.Extensions().RESTClient() } manager.eventSource = v1.EventSource{Component: "k8s-bigip-ctlr"} manager.broadcaster = record.NewBroadcaster() if nil == manager.eventRecorder { manager.eventRecorder = manager.broadcaster.NewRecorder(scheme.Scheme, manager.eventSource) } return &manager } func (appMgr *Manager) loadDefaultCert( namespace, serverName, vsName string, ) (*ProfileRef, bool) { // OpenShift will put the default server SSL cert on each pod. We create a // server SSL profile for it and associate it to any reencrypt routes that // have not explicitly set a certificate. profileName := "openshift_route_cluster_default-server-ssl" profile := ProfileRef{ Name: profileName, Partition: DEFAULT_PARTITION, Context: customProfileServer, } appMgr.customProfiles.Lock() defer appMgr.customProfiles.Unlock() skey := secretKey{Name: profileName, Namespace: namespace} _, found := appMgr.customProfiles.profs[skey] if !found { path := "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt" data, err := ioutil.ReadFile(path) if nil != err { log.Errorf("Unable to load default cluster certificate '%v': %v", path, err) return nil, false } appMgr.customProfiles.profs[skey] = NewCustomProfile( profile, string(data), "", // no key serverName, vsName, appMgr.customProfiles, ) } return &profile, !found } func (appMgr *Manager) addIRule(name, partition, rule string) { appMgr.irulesMutex.Lock() defer appMgr.irulesMutex.Unlock() key := nameRef{ Name: name, Partition: partition, } appMgr.irulesMap[key] = NewIRule(name, partition, rule) } func (appMgr *Manager) addInternalDataGroup(name, partition string) { appMgr.intDgMutex.Lock() defer appMgr.intDgMutex.Unlock() key := nameRef{ Name: name, Partition: partition, } appMgr.intDgMap[key] = NewInternalDataGroup(name, partition) } func (appMgr *Manager) watchingAllNamespacesLocked() bool { if 0 == len(appMgr.appInformers) { // Not watching any namespaces. return false } _, watchingAll := appMgr.appInformers[""] return watchingAll } func (appMgr *Manager) AddNamespace( namespace string, cfgMapSelector labels.Selector, resyncPeriod time.Duration, ) error { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() _, err := appMgr.addNamespaceLocked(namespace, cfgMapSelector, resyncPeriod) return err } func (appMgr *Manager) addNamespaceLocked( namespace string, cfgMapSelector labels.Selector, resyncPeriod time.Duration, ) (*appInformer, error) { if appMgr.watchingAllNamespacesLocked() { return nil, fmt.Errorf( "Cannot add additional namespaces when already watching all.") } if len(appMgr.appInformers) > 0 && "" == namespace { return nil, fmt.Errorf( "Cannot watch all namespaces when already watching specific ones.") } var appInf *appInformer if appInf, found := appMgr.appInformers[namespace]; found { return appInf, nil } appInf = appMgr.newAppInformer(namespace, cfgMapSelector, resyncPeriod) appMgr.appInformers[namespace] = appInf return appInf, nil } func (appMgr *Manager) removeNamespace(namespace string) error { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() err := appMgr.removeNamespaceLocked(namespace) return err } func (appMgr *Manager) removeNamespaceLocked(namespace string) error { if _, found := appMgr.appInformers[namespace]; !found { return fmt.Errorf("No informers exist for namespace %v\n", namespace) } delete(appMgr.appInformers, namespace) return nil } func (appMgr *Manager) AddNamespaceLabelInformer( labelSelector labels.Selector, resyncPeriod time.Duration, ) error { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() if nil != appMgr.nsInformer { return fmt.Errorf("Already have a namespace label informer added.") } if 0 != len(appMgr.appInformers) { return fmt.Errorf("Cannot set a namespace label informer when informers " + "have been setup for one or more namespaces.") } appMgr.nsInformer = cache.NewSharedIndexInformer( newListWatchWithLabelSelector( appMgr.restClientv1, "namespaces", "", labelSelector, ), &v1.Namespace{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) appMgr.nsInformer.AddEventHandlerWithResyncPeriod( &cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { appMgr.enqueueNamespace(obj) }, UpdateFunc: func(old, cur interface{}) { appMgr.enqueueNamespace(cur) }, DeleteFunc: func(obj interface{}) { appMgr.enqueueNamespace(obj) }, }, resyncPeriod, ) return nil } func (appMgr *Manager) enqueueNamespace(obj interface{}) { ns := obj.(*v1.Namespace) appMgr.nsQueue.Add(ns.ObjectMeta.Name) } func (appMgr *Manager) namespaceWorker() { for appMgr.processNextNamespace() { } } func (appMgr *Manager) processNextNamespace() bool { key, quit := appMgr.nsQueue.Get() if quit { return false } defer appMgr.nsQueue.Done(key) err := appMgr.syncNamespace(key.(string)) if err == nil { appMgr.nsQueue.Forget(key) return true } utilruntime.HandleError(fmt.Errorf("Sync %v failed with %v", key, err)) appMgr.nsQueue.AddRateLimited(key) return true } func (appMgr *Manager) syncNamespace(nsName string) error { startTime := time.Now() defer func() { endTime := time.Now() log.Debugf("Finished syncing namespace %+v (%v)", nsName, endTime.Sub(startTime)) }() _, exists, err := appMgr.nsInformer.GetIndexer().GetByKey(nsName) if nil != err { log.Warningf("Error looking up namespace '%v': %v\n", nsName, err) return err } appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() appInf, found := appMgr.getNamespaceInformerLocked(nsName) if exists && found { return nil } if exists { // exists but not found in informers map, add cfgMapSelector, err := labels.Parse(DefaultConfigMapLabel) if err != nil { return fmt.Errorf("Failed to parse Label Selector string: %v", err) } appInf, err = appMgr.addNamespaceLocked(nsName, cfgMapSelector, 0) if err != nil { return fmt.Errorf("Failed to add informers for namespace %v: %v", nsName, err) } appInf.start() appInf.waitForCacheSync() } else { // does not exist but found in informers map, delete // Clean up all resources that reference a removed namespace appInf.stopInformers() appMgr.removeNamespaceLocked(nsName) appMgr.resources.Lock() defer appMgr.resources.Unlock() rsDeleted := 0 appMgr.resources.ForEach(func(key serviceKey, cfg *ResourceConfig) { if key.Namespace == nsName { if appMgr.resources.Delete(key, "") { rsDeleted += 1 } } }) if rsDeleted > 0 { appMgr.outputConfigLocked() } } return nil } func (appMgr *Manager) GetWatchedNamespaces() []string { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() var namespaces []string for k, _ := range appMgr.appInformers { namespaces = append(namespaces, k) } return namespaces } func (appMgr *Manager) GetNamespaceLabelInformer() cache.SharedIndexInformer { return appMgr.nsInformer } type serviceQueueKey struct { Namespace string ServiceName string } type appInformer struct { namespace string cfgMapInformer cache.SharedIndexInformer svcInformer cache.SharedIndexInformer endptInformer cache.SharedIndexInformer ingInformer cache.SharedIndexInformer routeInformer cache.SharedIndexInformer stopCh chan struct{} } func (appMgr *Manager) newAppInformer( namespace string, cfgMapSelector labels.Selector, resyncPeriod time.Duration, ) *appInformer { appInf := appInformer{ namespace: namespace, stopCh: make(chan struct{}), cfgMapInformer: cache.NewSharedIndexInformer( newListWatchWithLabelSelector( appMgr.restClientv1, "configmaps", namespace, cfgMapSelector, ), &v1.ConfigMap{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ), svcInformer: cache.NewSharedIndexInformer( newListWatchWithLabelSelector( appMgr.restClientv1, "services", namespace, labels.Everything(), ), &v1.Service{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ), endptInformer: cache.NewSharedIndexInformer( newListWatchWithLabelSelector( appMgr.restClientv1, "endpoints", namespace, labels.Everything(), ), &v1.Endpoints{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ), ingInformer: cache.NewSharedIndexInformer( newListWatchWithLabelSelector( appMgr.restClientv1beta1, "ingresses", namespace, labels.Everything(), ), &v1beta1.Ingress{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ), } if nil != appMgr.routeClientV1 { var label labels.Selector var err error if len(appMgr.routeConfig.RouteLabel) == 0 { label = labels.Everything() } else { label, err = labels.Parse(appMgr.routeConfig.RouteLabel) if err != nil { log.Errorf("Failed to parse Label Selector string: %v", err) } } appInf.routeInformer = cache.NewSharedIndexInformer( newListWatchWithLabelSelector( appMgr.routeClientV1, "routes", namespace, label, ), &routeapi.Route{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) } appInf.cfgMapInformer.AddEventHandlerWithResyncPeriod( &cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { appMgr.enqueueConfigMap(obj) }, UpdateFunc: func(old, cur interface{}) { appMgr.enqueueConfigMap(cur) }, DeleteFunc: func(obj interface{}) { appMgr.enqueueConfigMap(obj) }, }, resyncPeriod, ) appInf.svcInformer.AddEventHandlerWithResyncPeriod( &cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { appMgr.enqueueService(obj) }, UpdateFunc: func(old, cur interface{}) { appMgr.enqueueService(cur) }, DeleteFunc: func(obj interface{}) { appMgr.enqueueService(obj) }, }, resyncPeriod, ) appInf.endptInformer.AddEventHandlerWithResyncPeriod( &cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { appMgr.enqueueEndpoints(obj) }, UpdateFunc: func(old, cur interface{}) { appMgr.enqueueEndpoints(cur) }, DeleteFunc: func(obj interface{}) { appMgr.enqueueEndpoints(obj) }, }, resyncPeriod, ) appInf.ingInformer.AddEventHandlerWithResyncPeriod( &cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { appMgr.enqueueIngress(obj) }, UpdateFunc: func(old, cur interface{}) { appMgr.enqueueIngress(cur) }, DeleteFunc: func(obj interface{}) { appMgr.enqueueIngress(obj) }, }, resyncPeriod, ) if nil != appMgr.routeClientV1 { appInf.routeInformer.AddEventHandlerWithResyncPeriod( &cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { appMgr.enqueueRoute(obj) }, UpdateFunc: func(old, cur interface{}) { appMgr.enqueueRoute(cur) }, DeleteFunc: func(obj interface{}) { appMgr.enqueueRoute(obj) }, }, resyncPeriod, ) } return &appInf } func newListWatchWithLabelSelector( c cache.Getter, resource string, namespace string, labelSelector labels.Selector, ) cache.ListerWatcher { listFunc := func(options metav1.ListOptions) (runtime.Object, error) { return c.Get(). Namespace(namespace). Resource(resource). VersionedParams(&options, metav1.ParameterCodec). LabelsSelectorParam(labelSelector). Do(). Get() } watchFunc := func(options metav1.ListOptions) (watch.Interface, error) { return c.Get(). Prefix("watch"). Namespace(namespace). Resource(resource). VersionedParams(&options, metav1.ParameterCodec). LabelsSelectorParam(labelSelector). Watch() } return &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} } func (appMgr *Manager) enqueueConfigMap(obj interface{}) { if ok, keys := appMgr.checkValidConfigMap(obj); ok { for _, key := range keys { appMgr.vsQueue.Add(*key) } } } func (appMgr *Manager) enqueueService(obj interface{}) { if ok, keys := appMgr.checkValidService(obj); ok { for _, key := range keys { appMgr.vsQueue.Add(*key) } } } func (appMgr *Manager) enqueueEndpoints(obj interface{}) { if ok, keys := appMgr.checkValidEndpoints(obj); ok { for _, key := range keys { appMgr.vsQueue.Add(*key) } } } func (appMgr *Manager) enqueueIngress(obj interface{}) { if ok, keys := appMgr.checkValidIngress(obj); ok { for _, key := range keys { appMgr.vsQueue.Add(*key) } } } func (appMgr *Manager) enqueueRoute(obj interface{}) { if ok, key := appMgr.checkValidRoute(obj); ok { appMgr.vsQueue.Add(*key) } } func (appMgr *Manager) getNamespaceInformer( ns string, ) (*appInformer, bool) { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() appInf, found := appMgr.getNamespaceInformerLocked(ns) return appInf, found } func (appMgr *Manager) getNamespaceInformerLocked( ns string, ) (*appInformer, bool) { toFind := ns if appMgr.watchingAllNamespacesLocked() { toFind = "" } appInf, found := appMgr.appInformers[toFind] return appInf, found } func (appInf *appInformer) start() { go appInf.cfgMapInformer.Run(appInf.stopCh) go appInf.svcInformer.Run(appInf.stopCh) go appInf.endptInformer.Run(appInf.stopCh) go appInf.ingInformer.Run(appInf.stopCh) if nil != appInf.routeInformer { go appInf.routeInformer.Run(appInf.stopCh) } } func (appInf *appInformer) waitForCacheSync() { if nil != appInf.routeInformer { cache.WaitForCacheSync( appInf.stopCh, appInf.cfgMapInformer.HasSynced, appInf.svcInformer.HasSynced, appInf.endptInformer.HasSynced, appInf.ingInformer.HasSynced, appInf.routeInformer.HasSynced, ) } else { cache.WaitForCacheSync( appInf.stopCh, appInf.cfgMapInformer.HasSynced, appInf.svcInformer.HasSynced, appInf.endptInformer.HasSynced, appInf.ingInformer.HasSynced, ) } } func (appInf *appInformer) stopInformers() { close(appInf.stopCh) } func (appMgr *Manager) IsNodePort() bool { return appMgr.isNodePort } func (appMgr *Manager) UseNodeInternal() bool { return appMgr.useNodeInternal } func (appMgr *Manager) ConfigWriter() writer.Writer { return appMgr.configWriter } func (appMgr *Manager) Run(stopCh <-chan struct{}) { go appMgr.runImpl(stopCh) } func (appMgr *Manager) runImpl(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer appMgr.vsQueue.ShutDown() defer appMgr.nsQueue.ShutDown() appMgr.addIRule(httpRedirectIRuleName, DEFAULT_PARTITION, httpRedirectIRule(DEFAULT_HTTPS_PORT)) if nil != appMgr.routeClientV1 { appMgr.addIRule( sslPassthroughIRuleName, DEFAULT_PARTITION, sslPassthroughIRule()) appMgr.addInternalDataGroup(passthroughHostsDgName, DEFAULT_PARTITION) appMgr.addInternalDataGroup(reencryptHostsDgName, DEFAULT_PARTITION) } if nil != appMgr.nsInformer { // Using one worker for namespace label changes. appMgr.startAndSyncNamespaceInformer(stopCh) go wait.Until(appMgr.namespaceWorker, time.Second, stopCh) } appMgr.startAndSyncAppInformers() // Using only one virtual server worker currently. go wait.Until(appMgr.virtualServerWorker, time.Second, stopCh) <-stopCh appMgr.stopAppInformers() } func (appMgr *Manager) startAndSyncNamespaceInformer(stopCh <-chan struct{}) { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() go appMgr.nsInformer.Run(stopCh) cache.WaitForCacheSync(stopCh, appMgr.nsInformer.HasSynced) } func (appMgr *Manager) startAndSyncAppInformers() { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() appMgr.startAppInformersLocked() appMgr.waitForCacheSyncLocked() } func (appMgr *Manager) startAppInformersLocked() { for _, appInf := range appMgr.appInformers { appInf.start() } } func (appMgr *Manager) waitForCacheSync() { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() appMgr.waitForCacheSyncLocked() } func (appMgr *Manager) waitForCacheSyncLocked() { for _, appInf := range appMgr.appInformers { appInf.waitForCacheSync() } } func (appMgr *Manager) stopAppInformers() { appMgr.informersMutex.Lock() defer appMgr.informersMutex.Unlock() for _, appInf := range appMgr.appInformers { appInf.stopInformers() } } func (appMgr *Manager) virtualServerWorker() { for appMgr.processNextVirtualServer() { } } func (appMgr *Manager) processNextVirtualServer() bool { key, quit := appMgr.vsQueue.Get() if quit { // The controller is shutting down. return false } defer appMgr.vsQueue.Done(key) err := appMgr.syncVirtualServer(key.(serviceQueueKey)) if err == nil { appMgr.vsQueue.Forget(key) return true } utilruntime.HandleError(fmt.Errorf("Sync %v failed with %v", key, err)) appMgr.vsQueue.AddRateLimited(key) return true } type vsSyncStats struct { vsFound int vsUpdated int vsDeleted int cpUpdated int dgUpdated int } func (appMgr *Manager) syncVirtualServer(sKey serviceQueueKey) error { startTime := time.Now() defer func() { endTime := time.Now() log.Debugf("Finished syncing virtual servers %+v (%v)", sKey, endTime.Sub(startTime)) }() // Get the informers for the namespace. This will tell us if we care about // this item. appInf, haveNamespace := appMgr.getNamespaceInformer(sKey.Namespace) if !haveNamespace { // This shouldn't happen as the namespace is checked for every item before // it is added to the queue, but issue a warning if it does. log.Warningf( "Received an update for an item from an un-watched namespace %v", sKey.Namespace) return nil } // Lookup the service svcKey := sKey.Namespace + "/" + sKey.ServiceName obj, svcFound, err := appInf.svcInformer.GetIndexer().GetByKey(svcKey) if nil != err { // Returning non-nil err will re-queue this item with rate-limiting. log.Warningf("Error looking up service '%v': %v\n", svcKey, err) return err } // Use a map to allow ports in the service to be looked up quickly while // looping through the ConfigMaps. The value is not currently used. svcPortMap := make(map[int32]bool) var svc *v1.Service if svcFound { svc = obj.(*v1.Service) for _, portSpec := range svc.Spec.Ports { svcPortMap[portSpec.Port] = false } } // rsMap stores all resources currently in Resources matching sKey, indexed by port rsMap := appMgr.getResourcesForKey(sKey) var stats vsSyncStats err = appMgr.syncConfigMaps(&stats, sKey, rsMap, svcPortMap, svc, appInf) if nil != err { return err } err = appMgr.syncIngresses(&stats, sKey, rsMap, svcPortMap, svc, appInf) if nil != err { return err } if nil != appInf.routeInformer { err = appMgr.syncRoutes(&stats, sKey, rsMap, svcPortMap, svc, appInf) if nil != err { return err } } if len(rsMap) > 0 { // We get here when there are ports defined in the service that don't // have a corresponding config map. stats.vsDeleted = appMgr.deleteUnusedResources(sKey, rsMap) appMgr.deleteUnusedRoutes(sKey.Namespace) } log.Debugf("Updated %v of %v virtual server configs, deleted %v", stats.vsUpdated, stats.vsFound, stats.vsDeleted) // delete any custom profiles that are no longer referenced appMgr.deleteUnusedProfiles(sKey.Namespace) if stats.vsUpdated > 0 || stats.vsDeleted > 0 || stats.cpUpdated > 0 || stats.dgUpdated > 0 { appMgr.outputConfig() } else if appMgr.vsQueue.Len() == 0 && appMgr.nsQueue.Len() == 0 { appMgr.resources.Lock() defer appMgr.resources.Unlock() if !appMgr.initialState { appMgr.outputConfigLocked() } } return nil } func (appMgr *Manager) syncConfigMaps( stats *vsSyncStats, sKey serviceQueueKey, rsMap ResourceMap, svcPortMap map[int32]bool, svc *v1.Service, appInf *appInformer, ) error { cfgMapsByIndex, err := appInf.cfgMapInformer.GetIndexer().ByIndex( "namespace", sKey.Namespace) if nil != err { log.Warningf("Unable to list config maps for namespace '%v': %v", sKey.Namespace, err) return err } for _, obj := range cfgMapsByIndex { // We need to look at all config maps in the store, parse the data blob, // and see if it belongs to the service that has changed. cm := obj.(*v1.ConfigMap) if cm.ObjectMeta.Namespace != sKey.Namespace { continue } rsCfg, err := parseConfigMap(cm) if nil != err { // Ignore this config map for the time being. When the user updates it // so that it is valid it will be requeued. fmt.Errorf("Error parsing ConfigMap %v_%v", cm.ObjectMeta.Namespace, cm.ObjectMeta.Name) continue } // Check if SSLProfile(s) are contained in Secrets for _, profile := range rsCfg.Virtual.GetFrontendSslProfileNames() { // Check if profile is contained in a Secret secret, err := appMgr.kubeClient.Core().Secrets(cm.ObjectMeta.Namespace). Get(profile, metav1.GetOptions{}) if err != nil { // No secret, so we assume the profile is a BIG-IP default log.Debugf("No Secret with name '%s', parsing secretName as path instead.", profile) continue } err, updated := appMgr.handleSslProfile(rsCfg, secret, cm.ObjectMeta.Namespace) if err != nil { log.Warningf("%v", err) continue } if updated { stats.cpUpdated += 1 } // Replace the current stored sslProfile with a correctly formatted // profile (since this profile is just a secret name) rsCfg.Virtual.RemoveFrontendSslProfileName(profile) secretName := formatIngressSslProfileName( rsCfg.Virtual.Partition + "/" + profile) rsCfg.Virtual.AddFrontendSslProfileName(secretName) } rsName := rsCfg.Virtual.VirtualServerName if ok, found, updated := appMgr.handleConfigForType( rsCfg, sKey, rsMap, rsName, svcPortMap, svc, appInf, ""); !ok { stats.vsUpdated += updated continue } else { stats.vsFound += found stats.vsUpdated += updated } // Set a status annotation to contain the virtualAddress bindAddr if rsCfg.Virtual.IApp == "" && rsCfg.Virtual.VirtualAddress != nil && rsCfg.Virtual.VirtualAddress.BindAddr != "" { appMgr.setBindAddrAnnotation(cm, sKey, rsCfg) } } return nil } func (appMgr *Manager) syncIngresses( stats *vsSyncStats, sKey serviceQueueKey, rsMap ResourceMap, svcPortMap map[int32]bool, svc *v1.Service, appInf *appInformer, ) error { ingByIndex, err := appInf.ingInformer.GetIndexer().ByIndex( "namespace", sKey.Namespace) if nil != err { log.Warningf("Unable to list ingresses for namespace '%v': %v", sKey.Namespace, err) return err } for _, obj := range ingByIndex { // We need to look at all ingresses in the store, parse the data blob, // and see if it belongs to the service that has changed. ing := obj.(*v1beta1.Ingress) if ing.ObjectMeta.Namespace != sKey.Namespace { continue } for _, portStruct := range appMgr.virtualPorts(ing) { rsCfg := createRSConfigFromIngress(ing, sKey.Namespace, appInf.svcInformer.GetIndexer(), portStruct) if rsCfg == nil { // Currently, an error is returned only if the Ingress is one we // do not care about continue } // Handle TLS configuration updated := appMgr.handleIngressTls(rsCfg, ing) if updated { stats.cpUpdated += 1 } // Handle Ingress health monitors rsName := rsCfg.Virtual.VirtualServerName hmStr, found := ing.ObjectMeta.Annotations[ingHealthMonitorAnnotation] if found { var monitors IngressHealthMonitors err := json.Unmarshal([]byte(hmStr), &monitors) if err != nil { msg := fmt.Sprintf( "Unable to parse health monitor JSON array '%v': %v", hmStr, err) log.Errorf("%s", msg) appMgr.recordIngressEvent(ing, "InvalidData", msg, rsName) } else { if nil != ing.Spec.Backend { appMgr.handleSingleServiceHealthMonitors( rsName, rsCfg, ing, monitors) } else { appMgr.handleMultiServiceHealthMonitors( rsName, rsCfg, ing, monitors) } } rsCfg.SortMonitors() } // make sure all policies across configs for this Ingress match each other appMgr.resources.Lock() cfgs, keys := appMgr.resources.GetAllWithName(rsCfg.Virtual.VirtualServerName) for i, cfg := range cfgs { for _, policy := range rsCfg.Policies { if policy.Name == rsCfg.Virtual.VirtualServerName { cfg.SetPolicy(policy) } } appMgr.resources.Assign(keys[i], rsCfg.Virtual.VirtualServerName, cfg) } appMgr.resources.Unlock() if ok, found, updated := appMgr.handleConfigForType( rsCfg, sKey, rsMap, rsName, svcPortMap, svc, appInf, ""); !ok { stats.vsUpdated += updated continue } else { if updated > 0 && !appMgr.processAllMultiSvc(len(rsCfg.Pools), rsCfg.Virtual.VirtualServerName) { updated -= 1 } stats.vsFound += found stats.vsUpdated += updated if updated > 0 { msg := fmt.Sprintf( "Created a ResourceConfig '%v' for the Ingress.", rsCfg.Virtual.VirtualServerName) appMgr.recordIngressEvent(ing, "ResourceConfigured", msg, "") } } // Set the Ingress Status IP address appMgr.setIngressStatus(ing, rsCfg) } } return nil } func (appMgr *Manager) syncRoutes( stats *vsSyncStats, sKey serviceQueueKey, rsMap ResourceMap, svcPortMap map[int32]bool, svc *v1.Service, appInf *appInformer, ) error { routeByIndex, err := appInf.getOrderedRoutes(sKey.Namespace) if nil != err { log.Warningf("Unable to list routes for namespace '%v': %v", sKey.Namespace, err) return err } // Rebuild all internal data groups for routes as we process each dgMap := make(InternalDataGroupMap) for _, route := range routeByIndex { // We need to look at all routes in the store, parse the data blob, // and see if it belongs to the service that has changed. if nil != route.Spec.TLS { // The information stored in the internal data groups can span multiple // namespaces, so we need to keep them updated with all current routes // regardless of anything that happens below. switch route.Spec.TLS.Termination { case routeapi.TLSTerminationPassthrough: updateDataGroupForPassthroughRoute(route, DEFAULT_PARTITION, dgMap) case routeapi.TLSTerminationReencrypt: updateDataGroupForReencryptRoute(route, DEFAULT_PARTITION, dgMap) } } if route.ObjectMeta.Namespace != sKey.Namespace { continue } pStructs := []portStruct{{protocol: "http", port: DEFAULT_HTTP_PORT}, {protocol: "https", port: DEFAULT_HTTPS_PORT}} for _, ps := range pStructs { rsCfg, err := createRSConfigFromRoute(route, *appMgr.resources, appMgr.routeConfig, ps) if err != nil { // We return err if there was an error creating a rule log.Warningf("%v", err) continue } rsName := rsCfg.Virtual.VirtualServerName if ok, found, updated := appMgr.handleConfigForType( &rsCfg, sKey, rsMap, rsName, svcPortMap, svc, appInf, route.Spec.To.Name); !ok { stats.vsUpdated += updated continue } else { stats.vsFound += found stats.vsUpdated += updated } // We store this same config on every route that has the same protocol, but it is only // written to the BIG-IP once. This block guarantees that all of these configs // are in the same state. appMgr.resources.Lock() cfgs, keys := appMgr.resources.GetAllWithName(rsCfg.Virtual.VirtualServerName) for i, cfg := range cfgs { if cfg.Virtual.Partition == rsCfg.Virtual.Partition && !reflect.DeepEqual(cfg, rsCfg) { cfg = &rsCfg appMgr.resources.Assign(keys[i], cfg.Virtual.VirtualServerName, cfg) } } appMgr.resources.Unlock() // TLS Cert/Key if nil != route.Spec.TLS && rsCfg.Virtual.VirtualAddress.Port == DEFAULT_HTTPS_PORT { switch route.Spec.TLS.Termination { case routeapi.TLSTerminationEdge: appMgr.setClientSslProfile(stats, sKey, &rsCfg, route) case routeapi.TLSTerminationReencrypt: appMgr.setClientSslProfile(stats, sKey, &rsCfg, route) appMgr.setServerSslProfile(stats, sKey, &rsCfg, route) } } } } // Update internal data groups for routes if changed appMgr.updateRouteDataGroups(stats, dgMap) return nil } func (appMgr *Manager) setClientSslProfile( stats *vsSyncStats, sKey serviceQueueKey, rsCfg *ResourceConfig, route *routeapi.Route, ) { profileName := "Common/clientssl" if "" != route.Spec.TLS.Certificate && "" != route.Spec.TLS.Key { profile := ProfileRef{ Name: route.ObjectMeta.Name + "-https-cert", Partition: rsCfg.Virtual.Partition, Context: customProfileClient, } cp := NewCustomProfile( profile, route.Spec.TLS.Certificate, route.Spec.TLS.Key, route.Spec.Host, rsCfg.Virtual.VirtualServerName, appMgr.customProfiles, ) skey := secretKey{ Name: cp.Name, Namespace: sKey.Namespace, ResourceName: rsCfg.Virtual.VirtualServerName, } appMgr.customProfiles.Lock() defer appMgr.customProfiles.Unlock() if prof, ok := appMgr.customProfiles.profs[skey]; ok { if !reflect.DeepEqual(prof, cp) { stats.cpUpdated += 1 } } appMgr.customProfiles.profs[skey] = cp profileName = fmt.Sprintf("%s/%s", cp.Partition, cp.Name) } rsCfg.Virtual.AddFrontendSslProfileName(profileName) } func (appMgr *Manager) setServerSslProfile( stats *vsSyncStats, sKey serviceQueueKey, rsCfg *ResourceConfig, route *routeapi.Route, ) { if "" != route.Spec.TLS.DestinationCACertificate { // Create new SSL server profile with the provided CA Certificate. ruleName := formatRouteRuleName(route) profile := ProfileRef{ Name: ruleName + "-server-ssl", Partition: rsCfg.Virtual.Partition, Context: customProfileServer, } cp := NewCustomProfile( profile, route.Spec.TLS.DestinationCACertificate, "", // no key route.Spec.Host, rsCfg.Virtual.VirtualServerName, appMgr.customProfiles, ) skey := secretKey{ Name: cp.Name, Namespace: sKey.Namespace, ResourceName: rsCfg.Virtual.VirtualServerName, } appMgr.customProfiles.Lock() defer appMgr.customProfiles.Unlock() if prof, ok := appMgr.customProfiles.profs[skey]; ok { if !reflect.DeepEqual(prof, cp) { stats.cpUpdated += 1 } } appMgr.customProfiles.profs[skey] = cp rsCfg.Virtual.AddOrUpdateProfile(profile) } else { profile, added := appMgr.loadDefaultCert( sKey.Namespace, route.Spec.Host, rsCfg.Virtual.VirtualServerName, ) if nil != profile { rsCfg.Virtual.AddOrUpdateProfile(*profile) } if added { stats.cpUpdated += 1 } } } func getBooleanAnnotation( annotations map[string]string, key string, defaultValue bool, ) bool { val, found := annotations[key] if !found { return defaultValue } bVal, err := strconv.ParseBool(val) if nil != err { log.Errorf("Unable to parse boolean value '%v': %v", val, err) return defaultValue } return bVal } type secretKey struct { Name string Namespace string ResourceName string } // Return value is whether or not a custom profile was updated func (appMgr *Manager) handleIngressTls( rsCfg *ResourceConfig, ing *v1beta1.Ingress, ) bool { if 0 == len(ing.Spec.TLS) { // Nothing to do if no TLS section return false } if nil == rsCfg.Virtual.VirtualAddress || rsCfg.Virtual.VirtualAddress.BindAddr == "" { // Nothing to do for pool-only mode return false } var httpsPort int32 if port, ok := ing.ObjectMeta.Annotations["virtual-server.f5.com/https-port"]; ok == true { p, _ := strconv.ParseInt(port, 10, 32) httpsPort = int32(p) } else { httpsPort = DEFAULT_HTTPS_PORT } // If we are processing the HTTPS server, // then we don't need a redirect policy, only profiles if rsCfg.Virtual.VirtualAddress.Port == httpsPort { var cpUpdated, updateState bool for _, tls := range ing.Spec.TLS { // Check if profile is contained in a Secret secret, err := appMgr.kubeClient.Core().Secrets(ing.ObjectMeta.Namespace). Get(tls.SecretName, metav1.GetOptions{}) if err != nil { // No secret, so we assume the profile is a BIG-IP default log.Debugf("No Secret with name '%s': %s. Parsing secretName as path instead.", tls.SecretName, err) secretName := formatIngressSslProfileName(tls.SecretName) rsCfg.Virtual.AddFrontendSslProfileName(secretName) continue } err, cpUpdated = appMgr.handleSslProfile(rsCfg, secret, ing.ObjectMeta.Namespace) if err != nil { log.Warningf("%v", err) continue } updateState = updateState || cpUpdated secretName := formatIngressSslProfileName( rsCfg.Virtual.Partition + "/" + tls.SecretName) rsCfg.Virtual.AddFrontendSslProfileName(secretName) } return cpUpdated } // sslRedirect defaults to true, allowHttp defaults to false. sslRedirect := getBooleanAnnotation(ing.ObjectMeta.Annotations, ingressSslRedirect, true) allowHttp := getBooleanAnnotation(ing.ObjectMeta.Annotations, ingressAllowHttp, false) // ----------------------------------------------------------------- // | State | sslRedirect | allowHttp | Description | // ----------------------------------------------------------------- // | 1 | F | F | Just HTTPS, nothing on HTTP | // ----------------------------------------------------------------- // | 2 | T | F | HTTP redirects to HTTPS | // ----------------------------------------------------------------- // | 2 | T | T | Honor sslRedirect == true | // ----------------------------------------------------------------- // | 3 | F | T | Both HTTP and HTTPS | // ----------------------------------------------------------------- var rule *Rule var policyName string if sslRedirect { // State 2, set HTTP redirect iRule log.Debugf("TLS: Applying HTTP redirect iRule.") ruleName := fmt.Sprintf("/%s/%s", DEFAULT_PARTITION, httpRedirectIRuleName) if httpsPort != DEFAULT_HTTPS_PORT { ruleName = fmt.Sprintf("%s_%d", ruleName, httpsPort) appMgr.addIRule(ruleName, DEFAULT_PARTITION, httpRedirectIRule(httpsPort)) } rsCfg.Virtual.AddIRule(ruleName) } else if allowHttp { // State 3, do not apply any policy log.Debugf("TLS: Not applying any policies.") } if nil != rule && "" != policyName { policy := rsCfg.FindPolicy("forwarding") if nil == policy { policy = createPolicy(Rules{rule}, policyName, rsCfg.Virtual.Partition) } else { rule.Ordinal = len(policy.Rules) policy.Rules = append(policy.Rules, rule) } rsCfg.SetPolicy(*policy) } return false } func (appMgr *Manager) handleSslProfile( rsCfg *ResourceConfig, secret *v1.Secret, namespace string) (error, bool) { if _, ok := secret.Data["tls.crt"]; !ok { err := fmt.Errorf("Invalid Secret '%v': 'tls.crt' field not specified.", secret.ObjectMeta.Name) return err, false } if _, ok := secret.Data["tls.key"]; !ok { err := fmt.Errorf("Invalid Secret '%v': 'tls.key' field not specified.", secret.ObjectMeta.Name) return err, false } cp := CustomProfile{ Name: secret.ObjectMeta.Name, Partition: rsCfg.Virtual.Partition, Context: customProfileClient, Cert: string(secret.Data["tls.crt"]), Key: string(secret.Data["tls.key"]), } skey := secretKey{ Name: cp.Name, Namespace: namespace, ResourceName: rsCfg.Virtual.VirtualServerName, } appMgr.customProfiles.Lock() defer appMgr.customProfiles.Unlock() if prof, ok := appMgr.customProfiles.profs[skey]; ok { if !reflect.DeepEqual(prof, cp) { appMgr.customProfiles.profs[skey] = cp return nil, true } else { return nil, false } } appMgr.customProfiles.profs[skey] = cp return nil, false } type portStruct struct { protocol string port int32 } // Return the required ports for Ingress VS (depending on sslRedirect/allowHttp vals) func (appMgr *Manager) virtualPorts(ing *v1beta1.Ingress) []portStruct { var httpPort int32 var httpsPort int32 if port, ok := ing.ObjectMeta.Annotations["virtual-server.f5.com/http-port"]; ok == true { p, _ := strconv.ParseInt(port, 10, 32) httpPort = int32(p) } else { httpPort = DEFAULT_HTTP_PORT } if port, ok := ing.ObjectMeta.Annotations["virtual-server.f5.com/https-port"]; ok == true { p, _ := strconv.ParseInt(port, 10, 32) httpsPort = int32(p) } else { httpsPort = DEFAULT_HTTPS_PORT } // sslRedirect defaults to true, allowHttp defaults to false. sslRedirect := getBooleanAnnotation(ing.ObjectMeta.Annotations, ingressSslRedirect, true) allowHttp := getBooleanAnnotation(ing.ObjectMeta.Annotations, ingressAllowHttp, false) http := portStruct{ protocol: "http", port: httpPort, } https := portStruct{ protocol: "https", port: httpsPort, } var ports []portStruct if len(ing.Spec.TLS) > 0 { if sslRedirect || allowHttp { // States 2,3; both HTTP and HTTPS // 2 virtual servers needed ports = append(ports, http) ports = append(ports, https) } else { // State 1; HTTPS only ports = append(ports, https) } } else { // HTTP only, no TLS ports = append(ports, http) } return ports } // Common handling function for ConfigMaps, Ingresses, and Routes func (appMgr *Manager) handleConfigForType( rsCfg *ResourceConfig, sKey serviceQueueKey, rsMap ResourceMap, rsName string, svcPortMap map[int32]bool, svc *v1.Service, appInf *appInformer, currRouteSvc string, // Only used for Routes ) (bool, int, int) { vsFound := 0 vsUpdated := 0 var pool Pool found := false plIdx := 0 for i, pl := range rsCfg.Pools { if pl.ServiceName == sKey.ServiceName { found = true pool = pl plIdx = i break } } if !found { // If the current cfg has no pool for this service, // remove any pools associated with the service, // across all stored keys for the resource appMgr.resources.Lock() defer appMgr.resources.Unlock() cfgs, keys := appMgr.resources.GetAllWithName(rsName) for i, cfg := range cfgs { for j, pool := range cfg.Pools { if pool.ServiceName == sKey.ServiceName { copy(cfg.Pools[j:], cfg.Pools[j+1:]) cfg.Pools[len(cfg.Pools)-1] = Pool{} cfg.Pools = cfg.Pools[:len(cfg.Pools)-1] } } appMgr.resources.Assign(keys[i], rsName, cfg) } return false, vsFound, vsUpdated } svcKey := serviceKey{ Namespace: sKey.Namespace, ServiceName: pool.ServiceName, ServicePort: pool.ServicePort, } // Match, remove from rsMap so we don't delete it at the end. // In the case of Routes: If the svc of the currently processed route doesn't match // the svc in our serviceKey, then we don't want to delete it from the map (all routes // with the same protocol have the same VS name, so we don't want to ignore a route that // was actually deleted). cfgList := rsMap[pool.ServicePort] if currRouteSvc == "" || currRouteSvc == sKey.ServiceName { if len(cfgList) == 1 { delete(rsMap, pool.ServicePort) } else if len(cfgList) > 1 { for index, val := range cfgList { if val.Virtual.VirtualServerName == rsName { cfgList = append(cfgList[:index], cfgList[index+1:]...) } } rsMap[pool.ServicePort] = cfgList } } if _, ok := svcPortMap[pool.ServicePort]; !ok { log.Debugf("Process Service delete - name: %v namespace: %v", pool.ServiceName, svcKey.Namespace) log.Infof("Port '%v' for service '%v' was not found.", pool.ServicePort, pool.ServiceName) if appMgr.deactivateVirtualServer(svcKey, rsName, rsCfg, plIdx) { vsUpdated += 1 } } if nil == svc { // The service is gone, de-activate it in the config. log.Infof("Service '%v' has not been found.", pool.ServiceName) if appMgr.deactivateVirtualServer(svcKey, rsName, rsCfg, plIdx) { vsUpdated += 1 } // If this is an Ingress resource, add an event that the service wasn't found if strings.HasSuffix(rsName, "ingress") { msg := fmt.Sprintf("Service '%v' has not been found.", pool.ServiceName) appMgr.recordIngressEvent(nil, "ServiceNotFound", msg, rsName) } return false, vsFound, vsUpdated } // Update pool members. vsFound += 1 correctBackend := true var reason string var msg string if appMgr.IsNodePort() { correctBackend, reason, msg = appMgr.updatePoolMembersForNodePort(svc, svcKey, rsCfg, plIdx) } else { correctBackend, reason, msg = appMgr.updatePoolMembersForCluster(svc, svcKey, rsCfg, appInf, plIdx) } // This will only update the config if the vs actually changed. if appMgr.saveVirtualServer(svcKey, rsName, rsCfg) { vsUpdated += 1 // If this is an Ingress resource, add an event if there was a backend error if !correctBackend { if strings.HasSuffix(rsCfg.Virtual.VirtualServerName, "ingress") { appMgr.recordIngressEvent(nil, reason, msg, rsCfg.Virtual.VirtualServerName) } } } return true, vsFound, vsUpdated } func (appMgr *Manager) updatePoolMembersForNodePort( svc *v1.Service, svcKey serviceKey, rsCfg *ResourceConfig, index int, ) (bool, string, string) { if svc.Spec.Type == v1.ServiceTypeNodePort { for _, portSpec := range svc.Spec.Ports { if portSpec.Port == svcKey.ServicePort { log.Debugf("Service backend matched %+v: using node port %v", svcKey, portSpec.NodePort) rsCfg.MetaData.Active = true rsCfg.MetaData.NodePort = portSpec.NodePort rsCfg.Pools[index].Members = appMgr.getEndpointsForNodePort(portSpec.NodePort) } } return true, "", "" } else { msg := fmt.Sprintf("Requested service backend '%+v' not of NodePort type", svcKey.ServiceName) log.Debug(msg) return false, "IncorrectBackendServiceType", msg } } func (appMgr *Manager) updatePoolMembersForCluster( svc *v1.Service, sKey serviceKey, rsCfg *ResourceConfig, appInf *appInformer, index int, ) (bool, string, string) { svcKey := sKey.Namespace + "/" + sKey.ServiceName item, found, _ := appInf.endptInformer.GetStore().GetByKey(svcKey) if !found { msg := fmt.Sprintf("Endpoints for service '%v' not found!", svcKey) log.Debug(msg) return false, "EndpointsNotFound", msg } eps, _ := item.(*v1.Endpoints) for _, portSpec := range svc.Spec.Ports { if portSpec.Port == sKey.ServicePort { ipPorts := getEndpointsForService(portSpec.Name, eps) log.Debugf("Found endpoints for backend %+v: %v", sKey, ipPorts) rsCfg.MetaData.Active = true rsCfg.Pools[index].Members = ipPorts } } return true, "", "" } func (appMgr *Manager) deactivateVirtualServer( sKey serviceKey, rsName string, rsCfg *ResourceConfig, index int, ) bool { updateConfig := false appMgr.resources.Lock() defer appMgr.resources.Unlock() if rs, ok := appMgr.resources.Get(sKey, rsName); ok { rsCfg.MetaData.Active = false rsCfg.Pools[index].Members = nil if !reflect.DeepEqual(rs, rsCfg) { log.Debugf("Service delete matching backend %v %v deactivating config", sKey, rsName) updateConfig = true } } else { // We have a config map but not a server. Put in the virtual server from // the config map. updateConfig = true } if updateConfig { appMgr.resources.Assign(sKey, rsName, rsCfg) } return updateConfig } func (appMgr *Manager) saveVirtualServer( sKey serviceKey, rsName string, newRsCfg *ResourceConfig, ) bool { appMgr.resources.Lock() defer appMgr.resources.Unlock() if oldRsCfg, ok := appMgr.resources.Get(sKey, rsName); ok { if reflect.DeepEqual(oldRsCfg, newRsCfg) { // not changed, don't trigger a config write return false } log.Warningf("Overwriting existing entry for backend %+v", sKey) } appMgr.resources.Assign(sKey, rsName, newRsCfg) return true } func (appMgr *Manager) getResourcesForKey(sKey serviceQueueKey) ResourceMap { // Return a copy of what is stored in resources appMgr.resources.Lock() defer appMgr.resources.Unlock() rsMap := make(ResourceMap) appMgr.resources.ForEach(func(key serviceKey, cfg *ResourceConfig) { if key.Namespace == sKey.Namespace && key.ServiceName == sKey.ServiceName { rsMap[key.ServicePort] = append(rsMap[key.ServicePort], cfg) } }) return rsMap } func (appMgr *Manager) processAllMultiSvc(numPools int, rsName string) bool { // If multi-service and we haven't yet configured keys/cfgs for each service, // then we don't want to update appMgr.resources.Lock() defer appMgr.resources.Unlock() _, keys := appMgr.resources.GetAllWithName(rsName) if len(keys) != numPools { return false } return true } func (appMgr *Manager) deleteUnusedResources( sKey serviceQueueKey, rsMap ResourceMap) int { rsDeleted := 0 appMgr.resources.Lock() defer appMgr.resources.Unlock() for port, cfgList := range rsMap { tmpKey := serviceKey{ Namespace: sKey.Namespace, ServiceName: sKey.ServiceName, ServicePort: port, } for _, cfg := range cfgList { rsName := cfg.Virtual.VirtualServerName if appMgr.resources.Delete(tmpKey, rsName) { rsDeleted += 1 } } } return rsDeleted } // If a route is deleted, loop through other route configs and delete pools/rules/profiles // for the deleted route. func (appMgr *Manager) deleteUnusedRoutes(namespace string) { appMgr.resources.Lock() defer appMgr.resources.Unlock() var routeName string appMgr.resources.ForEach(func(key serviceKey, cfg *ResourceConfig) { if cfg.MetaData.ResourceType == "route" { for i, pool := range cfg.Pools { sKey := serviceKey{ ServiceName: pool.ServiceName, ServicePort: pool.ServicePort, Namespace: key.Namespace, } if _, ok := appMgr.resources.Get(sKey, cfg.Virtual.VirtualServerName); !ok { poolName := fmt.Sprintf("/%s/%s", cfg.Virtual.Partition, pool.Name) // Delete rule for _, pol := range cfg.Policies { if len(pol.Rules) == 1 { nr := nameRef{ Name: pol.Name, Partition: pol.Partition, } cfg.RemovePolicy(nr) continue } for i, rule := range pol.Rules { if len(rule.Actions) > 0 && rule.Actions[0].Pool == poolName { routeName = strings.Split(rule.Name, "_")[3] if i >= len(pol.Rules)-1 { pol.Rules = pol.Rules[:len(pol.Rules)-1] } else { copy(pol.Rules[i:], pol.Rules[i+1:]) pol.Rules[len(pol.Rules)-1] = &Rule{} pol.Rules = pol.Rules[:len(pol.Rules)-1] } cfg.SetPolicy(pol) } } } // Delete pool if i >= len(cfg.Pools)-1 { cfg.Pools = cfg.Pools[:len(cfg.Pools)-1] } else { copy(cfg.Pools[i:], cfg.Pools[i+1:]) cfg.Pools[len(cfg.Pools)-1] = Pool{} cfg.Pools = cfg.Pools[:len(cfg.Pools)-1] } // Delete profile if routeName != "" { profileName := fmt.Sprintf("%s/%s-https-cert", cfg.Virtual.Partition, routeName) cfg.Virtual.RemoveFrontendSslProfileName(profileName) } } } appMgr.resources.Assign(key, cfg.Virtual.VirtualServerName, cfg) } }) } func (appMgr *Manager) deleteUnusedProfiles(namespace string) { var found bool appMgr.customProfiles.Lock() defer appMgr.customProfiles.Unlock() for key, profile := range appMgr.customProfiles.profs { found = false appMgr.resources.ForEach(func(k serviceKey, rsCfg *ResourceConfig) { if key.ResourceName == rsCfg.Virtual.VirtualServerName && key.Namespace == namespace { if rsCfg.Virtual.ReferencesProfile(profile) { found = true } } }) if !found { delete(appMgr.customProfiles.profs, key) } } } func (appMgr *Manager) setBindAddrAnnotation( cm *v1.ConfigMap, sKey serviceQueueKey, rsCfg *ResourceConfig, ) { var doUpdate bool if cm.ObjectMeta.Annotations == nil { cm.ObjectMeta.Annotations = make(map[string]string) doUpdate = true } else if cm.ObjectMeta.Annotations[vsBindAddrAnnotation] != rsCfg.Virtual.VirtualAddress.BindAddr { doUpdate = true } if doUpdate { cm.ObjectMeta.Annotations[vsBindAddrAnnotation] = rsCfg.Virtual.VirtualAddress.BindAddr _, err := appMgr.kubeClient.CoreV1().ConfigMaps(sKey.Namespace).Update(cm) if nil != err { log.Warningf("Error when creating status IP annotation: %s", err) } else { log.Debugf("Updating ConfigMap %+v annotation - %v: %v", sKey, vsBindAddrAnnotation, rsCfg.Virtual.VirtualAddress.BindAddr) } } } func (appMgr *Manager) setIngressStatus( ing *v1beta1.Ingress, rsCfg *ResourceConfig, ) { // Set the ingress status to include the virtual IP lbIngress := v1.LoadBalancerIngress{IP: rsCfg.Virtual.VirtualAddress.BindAddr} if len(ing.Status.LoadBalancer.Ingress) == 0 { ing.Status.LoadBalancer.Ingress = append(ing.Status.LoadBalancer.Ingress, lbIngress) } else if ing.Status.LoadBalancer.Ingress[0].IP != rsCfg.Virtual.VirtualAddress.BindAddr { ing.Status.LoadBalancer.Ingress[0] = lbIngress } _, updateErr := appMgr.kubeClient.ExtensionsV1beta1(). Ingresses(ing.ObjectMeta.Namespace).UpdateStatus(ing) if nil != updateErr { // Multi-service causes the controller to try to update the status multiple times // at once. Ignore this error. if strings.Contains(updateErr.Error(), "object has been modified") { return } warning := fmt.Sprintf( "Error when setting Ingress status IP for virtual server %v: %v", rsCfg.Virtual.VirtualServerName, updateErr) log.Warning(warning) appMgr.recordIngressEvent(ing, "StatusIPError", warning, "") } } // This function expects either an Ingress resource or the name of a VS for an Ingress func (appMgr *Manager) recordIngressEvent(ing *v1beta1.Ingress, reason, message, rsName string) { var namespace string var name string if ing != nil { namespace = ing.ObjectMeta.Namespace } else { namespace = strings.Split(rsName, "_")[0] name = rsName[len(namespace)+1 : len(rsName)-len("-ingress")] } appMgr.broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{ Interface: appMgr.kubeClient.Core().Events(namespace)}) // If we aren't given an Ingress resource, we use the name to find it var err error if ing == nil { ing, err = appMgr.kubeClient.Extensions().Ingresses(namespace). Get(name, metav1.GetOptions{}) if nil != err { log.Warningf("Could not find Ingress resource '%v'.", name) return } } // Create the event appMgr.eventRecorder.Event(ing, v1.EventTypeNormal, reason, message) } func getEndpointsForService( portName string, eps *v1.Endpoints, ) []Member { var members []Member if eps == nil { return members } for _, subset := range eps.Subsets { for _, p := range subset.Ports { if portName == p.Name { for _, addr := range subset.Addresses { member := Member{ Address: addr.IP, Port: p.Port, Session: "user-enabled", } members = append(members, member) } } } } return members } func (appMgr *Manager) getEndpointsForNodePort( nodePort int32, ) []Member { nodes := appMgr.getNodesFromCache() var members []Member for _, v := range nodes { member := Member{ Address: v, Port: nodePort, Session: "user-enabled", } members = append(members, member) } return members } func handleConfigMapParseFailure( appMgr *Manager, cm *v1.ConfigMap, cfg *ResourceConfig, err error, ) bool { log.Warningf("Could not get config for ConfigMap: %v - %v", cm.ObjectMeta.Name, err) // If virtual server exists for invalid configmap, delete it var serviceName string var servicePort int32 if nil != cfg { if len(cfg.Pools) == 0 { serviceName = "" servicePort = 0 } else { serviceName = cfg.Pools[0].ServiceName servicePort = cfg.Pools[0].ServicePort } sKey := serviceKey{serviceName, servicePort, cm.ObjectMeta.Namespace} rsName := formatConfigMapVSName(cm) if _, ok := appMgr.resources.Get(sKey, rsName); ok { appMgr.resources.Lock() defer appMgr.resources.Unlock() appMgr.resources.Delete(sKey, rsName) delete(cm.ObjectMeta.Annotations, vsBindAddrAnnotation) appMgr.kubeClient.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(cm) log.Warningf("Deleted virtual server associated with ConfigMap: %v", cm.ObjectMeta.Name) return true } } return false } // Check for a change in Node state func (appMgr *Manager) ProcessNodeUpdate( obj interface{}, err error, ) { if nil != err { log.Warningf("Unable to get list of nodes, err=%+v", err) return } newNodes, err := appMgr.getNodeAddresses(obj) if nil != err { log.Warningf("Unable to get list of nodes, err=%+v", err) return } sort.Strings(newNodes) appMgr.resources.Lock() defer appMgr.resources.Unlock() appMgr.oldNodesMutex.Lock() defer appMgr.oldNodesMutex.Unlock() // Only check for updates once we are in our initial state if appMgr.initialState { // Compare last set of nodes with new one if !reflect.DeepEqual(newNodes, appMgr.oldNodes) { log.Infof("ProcessNodeUpdate: Change in Node state detected") appMgr.resources.ForEach(func(key serviceKey, cfg *ResourceConfig) { var members []Member for _, node := range newNodes { member := Member{ Address: node, Port: cfg.MetaData.NodePort, Session: "user-enabled", } members = append(members, member) } cfg.Pools[0].Members = members }) // Output the Big-IP config appMgr.outputConfigLocked() // Update node cache appMgr.oldNodes = newNodes } } else { // Initialize appMgr nodes on our first pass through appMgr.oldNodes = newNodes } } // Return a copy of the node cache func (appMgr *Manager) getNodesFromCache() []string { appMgr.oldNodesMutex.Lock() defer appMgr.oldNodesMutex.Unlock() nodes := make([]string, len(appMgr.oldNodes)) copy(nodes, appMgr.oldNodes) return nodes } // Get a list of Node addresses func (appMgr *Manager) getNodeAddresses( obj interface{}, ) ([]string, error) { nodes, ok := obj.([]v1.Node) if false == ok { return nil, fmt.Errorf("poll update unexpected type, interface is not []v1.Node") } addrs := []string{} var addrType v1.NodeAddressType if appMgr.UseNodeInternal() { addrType = v1.NodeInternalIP } else { addrType = v1.NodeExternalIP } isUnSchedulable := func(node v1.Node) bool { for _, t := range node.Spec.Taints { if v1.TaintEffectNoSchedule == t.Effect { return true } } return node.Spec.Unschedulable } for _, node := range nodes { if 0 == len(appMgr.nodeLabelSelector) && isUnSchedulable(node) { // Skip unschedulable nodes only when there isn't a node // selector continue } else { nodeAddrs := node.Status.Addresses for _, addr := range nodeAddrs { if addr.Type == addrType { addrs = append(addrs, addr.Address) } } } } return addrs, nil }
package chunked import ( archivetar "archive/tar" "context" "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "reflect" "sort" "strings" "sync/atomic" "syscall" "time" "github.com/containerd/stargz-snapshotter/estargz" storage "github.com/containers/storage" graphdriver "github.com/containers/storage/drivers" driversCopy "github.com/containers/storage/drivers/copy" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chunked/internal" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "github.com/containers/storage/types" securejoin "github.com/cyphar/filepath-securejoin" "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/archive/tar" "golang.org/x/sys/unix" ) const ( maxNumberMissingChunks = 1024 newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY) containersOverrideXattr = "user.containers.override_stat" bigDataKey = "zstd-chunked-manifest" fileTypeZstdChunked = iota fileTypeEstargz = iota fileTypeNoCompression = iota ) type compressedFileType int type chunkedDiffer struct { stream ImageSourceSeekable manifest []byte layersCache *layersCache tocOffset int64 fileType compressedFileType copyBuffer []byte gzipReader *pgzip.Reader zstdReader *zstd.Decoder } var xattrsToIgnore = map[string]interface{}{ "security.selinux": true, } func timeToTimespec(time *time.Time) (ts unix.Timespec) { if time == nil || time.IsZero() { // Return UTIME_OMIT special value ts.Sec = 0 ts.Nsec = ((1 << 30) - 2) return } return unix.NsecToTimespec(time.UnixNano()) } func doHardLink(srcFd int, destDirFd int, destBase string) error { doLink := func() error { // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses // /proc/self/fd doesn't and can be used with rootless. srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd) return unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW) } err := doLink() // if the destination exists, unlink it first and try again if err != nil && os.IsExist(err) { unix.Unlinkat(destDirFd, destBase, 0) return doLink() } return err } func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) { src := fmt.Sprintf("/proc/self/fd/%d", srcFd) st, err := os.Stat(src) if err != nil { return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err) } copyWithFileRange, copyWithFileClone := true, true if useHardLinks { destDirPath := filepath.Dir(destFile) destBase := filepath.Base(destFile) destDir, err := openFileUnderRoot(destDirPath, dirfd, 0, mode) if err == nil { defer destDir.Close() err := doHardLink(srcFd, int(destDir.Fd()), destBase) if err == nil { return nil, st.Size(), nil } } } // If the destination file already exists, we shouldn't blow it away dstFile, err := openFileUnderRoot(destFile, dirfd, newFileFlags, mode) if err != nil { return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err) } err = driversCopy.CopyRegularToFile(src, dstFile, st, &copyWithFileRange, &copyWithFileClone) if err != nil { dstFile.Close() return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err) } return dstFile, st.Size(), nil } // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { if _, ok := annotations[internal.ManifestChecksumKey]; ok { return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss) } if _, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok { return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss) } return nil, errors.New("blob type not supported for partial retrieval") } func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { manifest, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations) if err != nil { return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } layersCache, err := getLayersCache(store) if err != nil { return nil, err } return &chunkedDiffer{ copyBuffer: makeCopyBuffer(), stream: iss, manifest: manifest, layersCache: layersCache, tocOffset: tocOffset, fileType: fileTypeZstdChunked, }, nil } func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations) if err != nil { return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } layersCache, err := getLayersCache(store) if err != nil { return nil, err } return &chunkedDiffer{ copyBuffer: makeCopyBuffer(), stream: iss, manifest: manifest, layersCache: layersCache, tocOffset: tocOffset, fileType: fileTypeEstargz, }, nil } func makeCopyBuffer() []byte { return make([]byte, 2<<20) } // copyFileFromOtherLayer copies a file from another layer // file is the file to look for. // source is the path to the source layer checkout. // otherFile contains the metadata for the file. // dirfd is an open file descriptor to the destination root directory. // useHardLinks defines whether the deduplication can be performed using hard links. func copyFileFromOtherLayer(file *internal.FileMetadata, source string, otherFile *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0) if err != nil { return false, nil, 0, fmt.Errorf("open source file %q: %w", source, err) } defer unix.Close(srcDirfd) srcFile, err := openFileUnderRoot(otherFile.Name, srcDirfd, unix.O_RDONLY, 0) if err != nil { return false, nil, 0, fmt.Errorf("open source file %q under target rootfs: %w", otherFile.Name, err) } defer srcFile.Close() dstFile, written, err := copyFileContent(int(srcFile.Fd()), file.Name, dirfd, 0, useHardLinks) if err != nil { return false, nil, 0, fmt.Errorf("copy content to %q: %w", file.Name, err) } return true, dstFile, written, nil } // canDedupMetadataWithHardLink says whether it is possible to deduplicate file with otherFile. // It checks that the two files have the same UID, GID, file mode and xattrs. func canDedupMetadataWithHardLink(file *internal.FileMetadata, otherFile *internal.FileMetadata) bool { if file.UID != otherFile.UID { return false } if file.GID != otherFile.GID { return false } if file.Mode != otherFile.Mode { return false } if !reflect.DeepEqual(file.Xattrs, otherFile.Xattrs) { return false } return true } // canDedupFileWithHardLink checks if the specified file can be deduplicated by an // open file, given its descriptor and stat data. func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo) bool { st, ok := s.Sys().(*syscall.Stat_t) if !ok { return false } path := fmt.Sprintf("/proc/self/fd/%d", fd) listXattrs, err := system.Llistxattr(path) if err != nil { return false } xattrs := make(map[string]string) for _, x := range listXattrs { v, err := system.Lgetxattr(path, x) if err != nil { return false } if _, found := xattrsToIgnore[x]; found { continue } xattrs[x] = string(v) } // fill only the attributes used by canDedupMetadataWithHardLink. otherFile := internal.FileMetadata{ UID: int(st.Uid), GID: int(st.Gid), Mode: int64(st.Mode), Xattrs: xattrs, } return canDedupMetadataWithHardLink(file, &otherFile) } func getFileDigest(f *os.File, buf []byte) (digest.Digest, error) { digester := digest.Canonical.Digester() if _, err := io.CopyBuffer(digester.Hash(), f, buf); err != nil { return "", err } return digester.Digest(), nil } // findFileInOSTreeRepos checks whether the requested file already exist in one of the OSTree repo and copies the file content from there if possible. // file is the file to look for. // ostreeRepos is a list of OSTree repos. // dirfd is an open fd to the destination checkout. // useHardLinks defines whether the deduplication can be performed using hard links. func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { digest, err := digest.Parse(file.Digest) if err != nil { return false, nil, 0, nil } payloadLink := digest.Encoded() + ".payload-link" if len(payloadLink) < 2 { return false, nil, 0, nil } for _, repo := range ostreeRepos { sourceFile := filepath.Join(repo, "objects", payloadLink[:2], payloadLink[2:]) st, err := os.Stat(sourceFile) if err != nil || !st.Mode().IsRegular() { continue } if st.Size() != file.Size { continue } fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0) if err != nil { return false, nil, 0, nil } f := os.NewFile(uintptr(fd), "fd") defer f.Close() // check if the open file can be deduplicated with hard links if useHardLinks && !canDedupFileWithHardLink(file, fd, st) { continue } dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks) if err != nil { return false, nil, 0, nil } return true, dstFile, written, nil } // If hard links deduplication was used and it has failed, try again without hard links. if useHardLinks { return findFileInOSTreeRepos(file, ostreeRepos, dirfd, false) } return false, nil, 0, nil } // findFileOnTheHost checks whether the requested file already exist on the host and copies the file content from there if possible. // It is currently implemented to look only at the file with the same path. Ideally it can detect the same content also at different // paths. // file is the file to look for. // dirfd is an open fd to the destination checkout. // useHardLinks defines whether the deduplication can be performed using hard links. func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool, buf []byte) (bool, *os.File, int64, error) { sourceFile := filepath.Clean(filepath.Join("/", file.Name)) if !strings.HasPrefix(sourceFile, "/usr/") { // limit host deduplication to files under /usr. return false, nil, 0, nil } st, err := os.Stat(sourceFile) if err != nil || !st.Mode().IsRegular() { return false, nil, 0, nil } if st.Size() != file.Size { return false, nil, 0, nil } fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0) if err != nil { return false, nil, 0, nil } f := os.NewFile(uintptr(fd), "fd") defer f.Close() manifestChecksum, err := digest.Parse(file.Digest) if err != nil { return false, nil, 0, err } checksum, err := getFileDigest(f, buf) if err != nil { return false, nil, 0, err } if checksum != manifestChecksum { return false, nil, 0, nil } // check if the open file can be deduplicated with hard links useHardLinks = useHardLinks && canDedupFileWithHardLink(file, fd, st) dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks) if err != nil { return false, nil, 0, nil } // calculate the checksum again to make sure the file wasn't modified while it was copied if _, err := f.Seek(0, 0); err != nil { dstFile.Close() return false, nil, 0, err } checksum, err = getFileDigest(f, buf) if err != nil { dstFile.Close() return false, nil, 0, err } if checksum != manifestChecksum { dstFile.Close() return false, nil, 0, nil } return true, dstFile, written, nil } type findFileState struct { file *internal.FileMetadata useHardLinks bool dirfd int found bool dstFile *os.File written int64 retError error } func (v *findFileState) VisitFile(candidate *internal.FileMetadata, target string) (bool, error) { if v.useHardLinks && !canDedupMetadataWithHardLink(v.file, candidate) { return true, nil } found, dstFile, written, err := copyFileFromOtherLayer(v.file, target, candidate, v.dirfd, v.useHardLinks) if found && err == nil { v.found = found v.dstFile = dstFile v.written = written v.retError = err return false, nil } return true, nil } // findFileInOtherLayers finds the specified file in other layers. // cache is the layers cache to use. // file is the file to look for. // dirfd is an open file descriptor to the checkout root directory. // useHardLinks defines whether the deduplication can be performed using hard links. func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { visitor := &findFileState{ file: file, useHardLinks: useHardLinks, dirfd: dirfd, } if err := cache.findFileInOtherLayers(file, visitor); err != nil { return false, nil, 0, err } return visitor.found, visitor.dstFile, visitor.written, visitor.retError } func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error { if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 { return nil } idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) for i := range manifest { if options.ChownOpts != nil { manifest[i].UID = options.ChownOpts.UID manifest[i].GID = options.ChownOpts.GID } else { pair := idtools.IDPair{ UID: manifest[i].UID, GID: manifest[i].GID, } var err error manifest[i].UID, manifest[i].GID, err = idMappings.ToContainer(pair) if err != nil { return err } } } return nil } type originFile struct { Root string Path string Offset int64 } type missingFileChunk struct { Gap int64 File *internal.FileMetadata CompressedSize int64 UncompressedSize int64 } type missingPart struct { SourceChunk *ImageSourceChunk OriginFile *originFile Chunks []missingFileChunk } func (o *originFile) OpenFile() (io.ReadCloser, error) { srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY, 0) if err != nil { return nil, fmt.Errorf("open source file %q: %w", o.Root, err) } defer unix.Close(srcDirfd) srcFile, err := openFileUnderRoot(o.Path, srcDirfd, unix.O_RDONLY, 0) if err != nil { return nil, fmt.Errorf("open source file %q under target rootfs: %w", o.Path, err) } if _, err := srcFile.Seek(o.Offset, 0); err != nil { srcFile.Close() return nil, err } return srcFile, nil } // setFileAttrs sets the file attributes for file given metadata func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions, usePath bool) error { if file == nil || file.Fd() < 0 { return errors.Errorf("invalid file") } fd := int(file.Fd()) t, err := typeToTarType(metadata.Type) if err != nil { return err } // If it is a symlink, force to use the path if t == tar.TypeSymlink { usePath = true } baseName := "" if usePath { dirName := filepath.Dir(metadata.Name) if dirName != "" { parentFd, err := openFileUnderRoot(dirName, dirfd, unix.O_PATH|unix.O_DIRECTORY, 0) if err != nil { return err } defer parentFd.Close() dirfd = int(parentFd.Fd()) } baseName = filepath.Base(metadata.Name) } doChown := func() error { if usePath { return unix.Fchownat(dirfd, baseName, metadata.UID, metadata.GID, unix.AT_SYMLINK_NOFOLLOW) } return unix.Fchown(fd, metadata.UID, metadata.GID) } doSetXattr := func(k string, v []byte) error { return unix.Fsetxattr(fd, k, v, 0) } doUtimes := func() error { ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} if usePath { return unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW) } return unix.UtimesNanoAt(unix.AT_FDCWD, fmt.Sprintf("/proc/self/fd/%d", fd), ts, 0) } doChmod := func() error { if usePath { return unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW) } return unix.Fchmod(fd, uint32(mode)) } if err := doChown(); err != nil { if !options.IgnoreChownErrors { return fmt.Errorf("chown %q to %d:%d: %w", metadata.Name, metadata.UID, metadata.GID, err) } } canIgnore := func(err error) bool { return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP) } for k, v := range metadata.Xattrs { if _, found := xattrsToIgnore[k]; found { continue } data, err := base64.StdEncoding.DecodeString(v) if err != nil { return fmt.Errorf("decode xattr %q: %w", v, err) } if err := doSetXattr(k, data); !canIgnore(err) { return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err) } } if err := doUtimes(); !canIgnore(err) { return fmt.Errorf("set utimes for %q: %w", metadata.Name, err) } if err := doChmod(); !canIgnore(err) { return fmt.Errorf("chmod %q: %w", metadata.Name, err) } return nil } func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { root := fmt.Sprintf("/proc/self/fd/%d", dirfd) targetRoot, err := os.Readlink(root) if err != nil { return -1, err } hasNoFollow := (flags & unix.O_NOFOLLOW) != 0 fd := -1 // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the // last component as the path to openat(). if hasNoFollow { dirName := filepath.Dir(name) if dirName != "" { newRoot, err := securejoin.SecureJoin(root, filepath.Dir(name)) if err != nil { return -1, err } root = newRoot } parentDirfd, err := unix.Open(root, unix.O_PATH, 0) if err != nil { return -1, err } defer unix.Close(parentDirfd) fd, err = unix.Openat(parentDirfd, filepath.Base(name), int(flags), uint32(mode)) if err != nil { return -1, err } } else { newPath, err := securejoin.SecureJoin(root, name) if err != nil { return -1, err } fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode)) if err != nil { return -1, err } } target, err := os.Readlink(fmt.Sprintf("/proc/self/fd/%d", fd)) if err != nil { unix.Close(fd) return -1, err } // Add an additional check to make sure the opened fd is inside the rootfs if !strings.HasPrefix(target, targetRoot) { unix.Close(fd) return -1, fmt.Errorf("error while resolving %q. It resolves outside the root directory", name) } return fd, err } func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { how := unix.OpenHow{ Flags: flags, Mode: uint64(mode & 07777), Resolve: unix.RESOLVE_IN_ROOT, } return unix.Openat2(dirfd, name, &how) } // skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid // using it again. var skipOpenat2 int32 // openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a // userspace lookup. func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { var fd int var err error if atomic.LoadInt32(&skipOpenat2) > 0 { fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) } else { fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode) // If the function failed with ENOSYS, switch off the support for openat2 // and fallback to using safejoin. if err != nil && errors.Is(err, unix.ENOSYS) { atomic.StoreInt32(&skipOpenat2, 1) fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) } } return fd, err } // openFileUnderRoot safely opens a file under the specified root directory using openat2 // name is the path to open relative to dirfd. // dirfd is an open file descriptor to the target checkout directory. // flags are the flags to pass to the open syscall. // mode specifies the mode to use for newly created files. func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) { fd, err := openFileUnderRootRaw(dirfd, name, flags, mode) if err == nil { return os.NewFile(uintptr(fd), name), nil } hasCreate := (flags & unix.O_CREAT) != 0 if errors.Is(err, unix.ENOENT) && hasCreate { parent := filepath.Dir(name) if parent != "" { newDirfd, err2 := openOrCreateDirUnderRoot(parent, dirfd, 0) if err2 == nil { defer newDirfd.Close() fd, err := openFileUnderRootRaw(dirfd, name, flags, mode) if err == nil { return os.NewFile(uintptr(fd), name), nil } } } } return nil, fmt.Errorf("open %q under the rootfs: %w", name, err) } // openOrCreateDirUnderRoot safely opens a directory or create it if it is missing. // name is the path to open relative to dirfd. // dirfd is an open file descriptor to the target checkout directory. // mode specifies the mode to use for newly created files. func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.File, error) { fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, mode) if err == nil { return os.NewFile(uintptr(fd), name), nil } if errors.Is(err, unix.ENOENT) { parent := filepath.Dir(name) if parent != "" { pDir, err2 := openOrCreateDirUnderRoot(parent, dirfd, mode) if err2 != nil { return nil, err } defer pDir.Close() baseName := filepath.Base(name) if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0755); err2 != nil { return nil, err } fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, mode) if err == nil { return os.NewFile(uintptr(fd), name), nil } } } return nil, err } func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, reader io.Reader, size int64) (err error) { switch compression { case fileTypeZstdChunked: if c.zstdReader == nil { r, err := zstd.NewReader(reader) if err != nil { return err } c.zstdReader = r } else { if err := c.zstdReader.Reset(reader); err != nil { return err } } defer c.zstdReader.Reset(nil) if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil { return err } case fileTypeEstargz: if c.gzipReader == nil { r, err := pgzip.NewReader(reader) if err != nil { return err } c.gzipReader = r } else { if err := c.gzipReader.Reset(reader); err != nil { return err } } defer c.gzipReader.Close() if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.gzipReader, size), c.copyBuffer); err != nil { return err } case fileTypeNoCompression: _, err := io.CopyBuffer(destFile.to, io.LimitReader(reader, size), c.copyBuffer) if err != nil { return err } default: return fmt.Errorf("unknown file type %q", c.fileType) } return nil } type destinationFile struct { dirfd int file *os.File digester digest.Digester to io.Writer metadata *internal.FileMetadata options *archive.TarOptions } func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions) (*destinationFile, error) { file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) if err != nil { return nil, err } digester := digest.Canonical.Digester() to := io.MultiWriter(file, digester.Hash()) return &destinationFile{ file: file, digester: digester, to: to, metadata: metadata, options: options, dirfd: dirfd, }, nil } func (d *destinationFile) Close() error { manifestChecksum, err := digest.Parse(d.metadata.Digest) if err != nil { return err } if d.digester.Digest() != manifestChecksum { return fmt.Errorf("checksum mismatch for %q (got %q instead of %q)", d.file.Name(), d.digester.Digest(), manifestChecksum) } return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false) } func closeDestinationFiles(files chan *destinationFile, errors chan error) { for f := range files { errors <- f.Close() } close(errors) } func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) { var destFile *destinationFile filesToClose := make(chan *destinationFile, 3) closeFilesErrors := make(chan error, 2) go closeDestinationFiles(filesToClose, closeFilesErrors) defer func() { close(filesToClose) for e := range closeFilesErrors { if e != nil && Err == nil { Err = e } } }() for _, missingPart := range missingParts { var part io.ReadCloser compression := c.fileType switch { case missingPart.OriginFile != nil: var err error part, err = missingPart.OriginFile.OpenFile() if err != nil { return err } compression = fileTypeNoCompression case missingPart.SourceChunk != nil: select { case p := <-streams: part = p case err := <-errs: return err } if part == nil { return errors.Errorf("invalid stream returned") } default: return errors.Errorf("internal error: missing part misses both local and remote data stream") } for _, mf := range missingPart.Chunks { if mf.Gap > 0 { limitReader := io.LimitReader(part, mf.Gap) _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) if err != nil { Err = err goto exit } continue } if mf.File.Name == "" { Err = errors.Errorf("file name empty") goto exit } // Open the new file if it is different that what is already // opened if destFile == nil || destFile.metadata.Name != mf.File.Name { var err error if destFile != nil { cleanup: for { select { case err = <-closeFilesErrors: if err != nil { Err = err goto exit } default: break cleanup } } filesToClose <- destFile } destFile, err = openDestinationFile(dirfd, mf.File, options) if err != nil { Err = err goto exit } } streamLength := mf.CompressedSize if compression == fileTypeNoCompression { streamLength = mf.UncompressedSize } limitReader := io.LimitReader(part, streamLength) if err := c.appendCompressedStreamToFile(compression, destFile, limitReader, mf.UncompressedSize); err != nil { Err = err goto exit } if _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer); err != nil { Err = err goto exit } } exit: part.Close() if Err != nil { break } } if destFile != nil { return destFile.Close() } return nil } func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { getGap := func(missingParts []missingPart, i int) int { prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length return int(missingParts[i].SourceChunk.Offset - prev) } getCost := func(missingParts []missingPart, i int) int { cost := getGap(missingParts, i) if missingParts[i-1].OriginFile != nil { cost += int(missingParts[i-1].SourceChunk.Length) } if missingParts[i].OriginFile != nil { cost += int(missingParts[i].SourceChunk.Length) } return cost } // simple case: merge chunks from the same file. newMissingParts := missingParts[0:1] prevIndex := 0 for i := 1; i < len(missingParts); i++ { gap := getGap(missingParts, i) if gap == 0 && missingParts[prevIndex].OriginFile == nil && missingParts[i].OriginFile == nil && len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 && missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name { missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize } else { newMissingParts = append(newMissingParts, missingParts[i]) prevIndex++ } } missingParts = newMissingParts if len(missingParts) <= target { return missingParts } // this implementation doesn't account for duplicates, so it could merge // more than necessary to reach the specified target. Since target itself // is a heuristic value, it doesn't matter. costs := make([]int, len(missingParts)-1) for i := 1; i < len(missingParts); i++ { costs[i-1] = getCost(missingParts, i) } sort.Ints(costs) toShrink := len(missingParts) - target targetValue := costs[toShrink] newMissingParts = missingParts[0:1] for i := 1; i < len(missingParts); i++ { if getCost(missingParts, i) > targetValue { newMissingParts = append(newMissingParts, missingParts[i]) } else { gap := getGap(missingParts, i) prev := &newMissingParts[len(newMissingParts)-1] prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length prev.OriginFile = nil if gap > 0 { gapFile := missingFileChunk{ Gap: int64(gap), } prev.Chunks = append(prev.Chunks, gapFile) } prev.Chunks = append(prev.Chunks, missingParts[i].Chunks...) } } return newMissingParts } func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { var chunksToRequest []ImageSourceChunk for _, c := range missingParts { if c.OriginFile == nil { chunksToRequest = append(chunksToRequest, *c.SourceChunk) } } // There are some missing files. Prepare a multirange request for the missing chunks. var streams chan io.ReadCloser var err error var errs chan error for { streams, errs, err = c.stream.GetBlobAt(chunksToRequest) if err == nil { break } if _, ok := err.(ErrBadRequest); ok { requested := len(missingParts) // If the server cannot handle at least 64 chunks in a single request, just give up. if requested < 64 { return err } // Merge more chunks to request missingParts = mergeMissingChunks(missingParts, requested/2) continue } return err } if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil { return err } return nil } func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.FileMetadata, options *archive.TarOptions) error { parent := filepath.Dir(name) base := filepath.Base(name) parentFd := dirfd if parent != "." { parentFile, err := openOrCreateDirUnderRoot(parent, dirfd, 0) if err != nil { return err } defer parentFile.Close() parentFd = int(parentFile.Fd()) } if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil { if !os.IsExist(err) { return fmt.Errorf("mkdir %q: %w", name, err) } } file, err := openFileUnderRoot(name, dirfd, unix.O_DIRECTORY|unix.O_RDONLY, 0) if err != nil { return err } defer file.Close() return setFileAttrs(dirfd, file, mode, metadata, options, false) } func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0) if err != nil { return err } defer sourceFile.Close() destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) destDirFd := dirfd if destDir != "." { f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) if err != nil { return err } defer f.Close() destDirFd = int(f.Fd()) } err = doHardLink(int(sourceFile.Fd()), destDirFd, destBase) if err != nil { return fmt.Errorf("create hardlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) } newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY|unix.O_NOFOLLOW, 0) if err != nil { // If the target is a symlink, open the file with O_PATH. if errors.Is(err, unix.ELOOP) { newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_PATH|unix.O_NOFOLLOW, 0) if err != nil { return err } defer newFile.Close() return setFileAttrs(dirfd, newFile, mode, metadata, options, true) } return err } defer newFile.Close() return setFileAttrs(dirfd, newFile, mode, metadata, options, false) } func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) destDirFd := dirfd if destDir != "." { f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) if err != nil { return err } defer f.Close() destDirFd = int(f.Fd()) } if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil { return fmt.Errorf("create symlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) } return nil } type whiteoutHandler struct { Dirfd int Root string } func (d whiteoutHandler) Setxattr(path, name string, value []byte) error { file, err := openOrCreateDirUnderRoot(path, d.Dirfd, 0) if err != nil { return err } defer file.Close() if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil { return fmt.Errorf("set xattr %s=%q for %q: %w", name, value, path, err) } return nil } func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { dir := filepath.Dir(path) base := filepath.Base(path) dirfd := d.Dirfd if dir != "" { dir, err := openOrCreateDirUnderRoot(dir, d.Dirfd, 0) if err != nil { return err } defer dir.Close() dirfd = int(dir.Fd()) } if err := unix.Mknodat(dirfd, base, mode, dev); err != nil { return fmt.Errorf("mknod %q: %w", path, err) } return nil } func checkChownErr(err error, name string, uid, gid int) error { if errors.Is(err, syscall.EINVAL) { return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally: %w", uid, gid, name, err) } return err } func (d whiteoutHandler) Chown(path string, uid, gid int) error { file, err := openFileUnderRoot(path, d.Dirfd, unix.O_PATH, 0) if err != nil { return err } defer file.Close() if err := unix.Fchownat(int(file.Fd()), "", uid, gid, unix.AT_EMPTY_PATH); err != nil { var stat unix.Stat_t if unix.Fstat(int(file.Fd()), &stat) == nil { if stat.Uid == uint32(uid) && stat.Gid == uint32(gid) { return nil } } return checkChownErr(err, path, uid, gid) } return nil } type hardLinkToCreate struct { dest string dirfd int mode os.FileMode metadata *internal.FileMetadata } func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bool) bool { if value, ok := storeOpts.PullOptions[name]; ok { return strings.ToLower(value) == "true" } return def } func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) { defer c.layersCache.release() defer func() { if c.zstdReader != nil { c.zstdReader.Close() } }() bigData := map[string][]byte{ bigDataKey: c.manifest, } output := graphdriver.DriverWithDifferOutput{ Differ: c, BigData: bigData, } storeOpts, err := types.DefaultStoreOptionsAutoDetectUID() if err != nil { return output, err } if !parseBooleanPullOption(&storeOpts, "enable_partial_images", false) { return output, errors.New("enable_partial_images not configured") } enableHostDedup := parseBooleanPullOption(&storeOpts, "enable_host_deduplication", false) // When the hard links deduplication is used, file attributes are ignored because setting them // modifies the source file as well. useHardLinks := parseBooleanPullOption(&storeOpts, "use_hard_links", false) // List of OSTree repositories to use for deduplication ostreeRepos := strings.Split(storeOpts.PullOptions["ostree_repos"], ":") // Generate the manifest var toc internal.TOC if err := json.Unmarshal(c.manifest, &toc); err != nil { return output, err } whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) var missingParts []missingPart mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries) if err != nil { return output, err } if err := maybeDoIDRemap(mergedEntries, options); err != nil { return output, err } if options.ForceMask != nil { uid, gid, mode, err := archive.GetFileOwner(dest) if err == nil { value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) if err := unix.Setxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil { return output, err } } } dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH, 0) if err != nil { return output, fmt.Errorf("cannot open %q: %w", dest, err) } defer unix.Close(dirfd) // hardlinks can point to missing files. So create them after all files // are retrieved var hardLinks []hardLinkToCreate missingPartsSize, totalChunksSize := int64(0), int64(0) for i, r := range mergedEntries { if options.ForceMask != nil { value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777) r.Xattrs[containersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value)) r.Mode = int64(*options.ForceMask) } mode := os.FileMode(r.Mode) r.Name = filepath.Clean(r.Name) r.Linkname = filepath.Clean(r.Linkname) t, err := typeToTarType(r.Type) if err != nil { return output, err } if whiteoutConverter != nil { hdr := archivetar.Header{ Typeflag: t, Name: r.Name, Linkname: r.Linkname, Size: r.Size, Mode: r.Mode, Uid: r.UID, Gid: r.GID, } handler := whiteoutHandler{ Dirfd: dirfd, Root: dest, } writeFile, err := whiteoutConverter.ConvertReadWithHandler(&hdr, r.Name, &handler) if err != nil { return output, err } if !writeFile { continue } } switch t { case tar.TypeReg: // Create directly empty files. if r.Size == 0 { // Used to have a scope for cleanup. createEmptyFile := func() error { file, err := openFileUnderRoot(r.Name, dirfd, newFileFlags, 0) if err != nil { return err } defer file.Close() if err := setFileAttrs(dirfd, file, mode, &r, options, false); err != nil { return err } return nil } if err := createEmptyFile(); err != nil { return output, err } continue } case tar.TypeDir: if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil { return output, err } continue case tar.TypeLink: dest := dest dirfd := dirfd mode := mode r := r hardLinks = append(hardLinks, hardLinkToCreate{ dest: dest, dirfd: dirfd, mode: mode, metadata: &r, }) continue case tar.TypeSymlink: if err := safeSymlink(dirfd, mode, &r, options); err != nil { return output, err } continue case tar.TypeChar: case tar.TypeBlock: case tar.TypeFifo: /* Ignore. */ default: return output, fmt.Errorf("invalid type %q", t) } totalChunksSize += r.Size finalizeFile := func(dstFile *os.File) error { if dstFile != nil { defer dstFile.Close() if err := setFileAttrs(dirfd, dstFile, mode, &r, options, false); err != nil { return err } } return nil } found, dstFile, _, err := findFileInOtherLayers(c.layersCache, &r, dirfd, useHardLinks) if err != nil { return output, err } if found { if err := finalizeFile(dstFile); err != nil { return output, err } continue } found, dstFile, _, err = findFileInOSTreeRepos(&r, ostreeRepos, dirfd, useHardLinks) if err != nil { return output, err } if found { if err := finalizeFile(dstFile); err != nil { return output, err } continue } if enableHostDedup { found, dstFile, _, err = findFileOnTheHost(&r, dirfd, useHardLinks, c.copyBuffer) if err != nil { return output, err } if found { if err := finalizeFile(dstFile); err != nil { return output, err } continue } } missingPartsSize += r.Size if t == tar.TypeReg { remainingSize := r.Size for _, chunk := range r.Chunks { compressedSize := int64(chunk.EndOffset - chunk.Offset) size := remainingSize if chunk.ChunkSize > 0 { size = chunk.ChunkSize } remainingSize = remainingSize - size rawChunk := ImageSourceChunk{ Offset: uint64(chunk.Offset), Length: uint64(compressedSize), } file := missingFileChunk{ File: &mergedEntries[i], CompressedSize: compressedSize, UncompressedSize: size, } mp := missingPart{ SourceChunk: &rawChunk, Chunks: []missingFileChunk{ file, }, } root, path, offset := c.layersCache.findChunkInOtherLayers(chunk) if offset >= 0 { missingPartsSize -= size mp.OriginFile = &originFile{ Root: root, Path: path, Offset: offset, } } missingParts = append(missingParts, mp) } } } // There are some missing files. Prepare a multirange request for the missing chunks. if len(missingParts) > 0 { missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks) if err := c.retrieveMissingFiles(dest, dirfd, missingParts, options); err != nil { return output, err } } for _, m := range hardLinks { if err := safeLink(m.dirfd, m.mode, m.metadata, options); err != nil { return output, err } } if totalChunksSize > 0 { logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) } return output, nil } func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { // ignore the metadata files for the estargz format. if fileType != fileTypeEstargz { return false } switch e.Name { // ignore the metadata files for the estargz format. case estargz.PrefetchLandmark, estargz.NoPrefetchLandmark, estargz.TOCTarName: return true } return false } func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, error) { countNextChunks := func(start int) int { count := 0 for _, e := range entries[start:] { if e.Type != TypeChunk { return count } count++ } return count } size := 0 for _, entry := range entries { if mustSkipFile(fileType, entry) { continue } if entry.Type != TypeChunk { size++ } } mergedEntries := make([]internal.FileMetadata, size) m := 0 for i := 0; i < len(entries); i++ { e := entries[i] if mustSkipFile(fileType, e) { continue } if e.Type == TypeChunk { return nil, fmt.Errorf("chunk type without a regular file") } if e.Type == TypeReg { nChunks := countNextChunks(i + 1) e.Chunks = make([]*internal.FileMetadata, nChunks+1) for j := 0; j <= nChunks; j++ { e.Chunks[j] = &entries[i+j] e.EndOffset = entries[i+j].EndOffset } i += nChunks } mergedEntries[m] = e m++ } // stargz/estargz doesn't store EndOffset so let's calculate it here lastOffset := c.tocOffset for i := len(mergedEntries) - 1; i >= 0; i-- { if mergedEntries[i].EndOffset == 0 { mergedEntries[i].EndOffset = lastOffset } if mergedEntries[i].Offset != 0 { lastOffset = mergedEntries[i].Offset } lastChunkOffset := mergedEntries[i].EndOffset for j := len(mergedEntries[i].Chunks) - 1; j >= 0; j-- { mergedEntries[i].Chunks[j].EndOffset = lastChunkOffset mergedEntries[i].Chunks[j].Size = mergedEntries[i].Chunks[j].EndOffset - mergedEntries[i].Chunks[j].Offset lastChunkOffset = mergedEntries[i].Chunks[j].Offset } } return mergedEntries, nil } chunked: split appendCompressedStreamToFile Signed-off-by: Giuseppe Scrivano <20002b7b24c75512093a71a58285826d5c504fca@redhat.com> package chunked import ( archivetar "archive/tar" "context" "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "reflect" "sort" "strings" "sync/atomic" "syscall" "time" "github.com/containerd/stargz-snapshotter/estargz" storage "github.com/containers/storage" graphdriver "github.com/containers/storage/drivers" driversCopy "github.com/containers/storage/drivers/copy" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chunked/internal" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "github.com/containers/storage/types" securejoin "github.com/cyphar/filepath-securejoin" "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/archive/tar" "golang.org/x/sys/unix" ) const ( maxNumberMissingChunks = 1024 newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY) containersOverrideXattr = "user.containers.override_stat" bigDataKey = "zstd-chunked-manifest" fileTypeZstdChunked = iota fileTypeEstargz = iota fileTypeNoCompression = iota ) type compressedFileType int type chunkedDiffer struct { stream ImageSourceSeekable manifest []byte layersCache *layersCache tocOffset int64 fileType compressedFileType copyBuffer []byte gzipReader *pgzip.Reader zstdReader *zstd.Decoder rawReader io.Reader } var xattrsToIgnore = map[string]interface{}{ "security.selinux": true, } func timeToTimespec(time *time.Time) (ts unix.Timespec) { if time == nil || time.IsZero() { // Return UTIME_OMIT special value ts.Sec = 0 ts.Nsec = ((1 << 30) - 2) return } return unix.NsecToTimespec(time.UnixNano()) } func doHardLink(srcFd int, destDirFd int, destBase string) error { doLink := func() error { // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses // /proc/self/fd doesn't and can be used with rootless. srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd) return unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW) } err := doLink() // if the destination exists, unlink it first and try again if err != nil && os.IsExist(err) { unix.Unlinkat(destDirFd, destBase, 0) return doLink() } return err } func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) { src := fmt.Sprintf("/proc/self/fd/%d", srcFd) st, err := os.Stat(src) if err != nil { return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err) } copyWithFileRange, copyWithFileClone := true, true if useHardLinks { destDirPath := filepath.Dir(destFile) destBase := filepath.Base(destFile) destDir, err := openFileUnderRoot(destDirPath, dirfd, 0, mode) if err == nil { defer destDir.Close() err := doHardLink(srcFd, int(destDir.Fd()), destBase) if err == nil { return nil, st.Size(), nil } } } // If the destination file already exists, we shouldn't blow it away dstFile, err := openFileUnderRoot(destFile, dirfd, newFileFlags, mode) if err != nil { return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err) } err = driversCopy.CopyRegularToFile(src, dstFile, st, &copyWithFileRange, &copyWithFileClone) if err != nil { dstFile.Close() return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err) } return dstFile, st.Size(), nil } // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { if _, ok := annotations[internal.ManifestChecksumKey]; ok { return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss) } if _, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok { return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss) } return nil, errors.New("blob type not supported for partial retrieval") } func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { manifest, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations) if err != nil { return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } layersCache, err := getLayersCache(store) if err != nil { return nil, err } return &chunkedDiffer{ copyBuffer: makeCopyBuffer(), stream: iss, manifest: manifest, layersCache: layersCache, tocOffset: tocOffset, fileType: fileTypeZstdChunked, }, nil } func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations) if err != nil { return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } layersCache, err := getLayersCache(store) if err != nil { return nil, err } return &chunkedDiffer{ copyBuffer: makeCopyBuffer(), stream: iss, manifest: manifest, layersCache: layersCache, tocOffset: tocOffset, fileType: fileTypeEstargz, }, nil } func makeCopyBuffer() []byte { return make([]byte, 2<<20) } // copyFileFromOtherLayer copies a file from another layer // file is the file to look for. // source is the path to the source layer checkout. // otherFile contains the metadata for the file. // dirfd is an open file descriptor to the destination root directory. // useHardLinks defines whether the deduplication can be performed using hard links. func copyFileFromOtherLayer(file *internal.FileMetadata, source string, otherFile *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0) if err != nil { return false, nil, 0, fmt.Errorf("open source file %q: %w", source, err) } defer unix.Close(srcDirfd) srcFile, err := openFileUnderRoot(otherFile.Name, srcDirfd, unix.O_RDONLY, 0) if err != nil { return false, nil, 0, fmt.Errorf("open source file %q under target rootfs: %w", otherFile.Name, err) } defer srcFile.Close() dstFile, written, err := copyFileContent(int(srcFile.Fd()), file.Name, dirfd, 0, useHardLinks) if err != nil { return false, nil, 0, fmt.Errorf("copy content to %q: %w", file.Name, err) } return true, dstFile, written, nil } // canDedupMetadataWithHardLink says whether it is possible to deduplicate file with otherFile. // It checks that the two files have the same UID, GID, file mode and xattrs. func canDedupMetadataWithHardLink(file *internal.FileMetadata, otherFile *internal.FileMetadata) bool { if file.UID != otherFile.UID { return false } if file.GID != otherFile.GID { return false } if file.Mode != otherFile.Mode { return false } if !reflect.DeepEqual(file.Xattrs, otherFile.Xattrs) { return false } return true } // canDedupFileWithHardLink checks if the specified file can be deduplicated by an // open file, given its descriptor and stat data. func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo) bool { st, ok := s.Sys().(*syscall.Stat_t) if !ok { return false } path := fmt.Sprintf("/proc/self/fd/%d", fd) listXattrs, err := system.Llistxattr(path) if err != nil { return false } xattrs := make(map[string]string) for _, x := range listXattrs { v, err := system.Lgetxattr(path, x) if err != nil { return false } if _, found := xattrsToIgnore[x]; found { continue } xattrs[x] = string(v) } // fill only the attributes used by canDedupMetadataWithHardLink. otherFile := internal.FileMetadata{ UID: int(st.Uid), GID: int(st.Gid), Mode: int64(st.Mode), Xattrs: xattrs, } return canDedupMetadataWithHardLink(file, &otherFile) } func getFileDigest(f *os.File, buf []byte) (digest.Digest, error) { digester := digest.Canonical.Digester() if _, err := io.CopyBuffer(digester.Hash(), f, buf); err != nil { return "", err } return digester.Digest(), nil } // findFileInOSTreeRepos checks whether the requested file already exist in one of the OSTree repo and copies the file content from there if possible. // file is the file to look for. // ostreeRepos is a list of OSTree repos. // dirfd is an open fd to the destination checkout. // useHardLinks defines whether the deduplication can be performed using hard links. func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { digest, err := digest.Parse(file.Digest) if err != nil { return false, nil, 0, nil } payloadLink := digest.Encoded() + ".payload-link" if len(payloadLink) < 2 { return false, nil, 0, nil } for _, repo := range ostreeRepos { sourceFile := filepath.Join(repo, "objects", payloadLink[:2], payloadLink[2:]) st, err := os.Stat(sourceFile) if err != nil || !st.Mode().IsRegular() { continue } if st.Size() != file.Size { continue } fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0) if err != nil { return false, nil, 0, nil } f := os.NewFile(uintptr(fd), "fd") defer f.Close() // check if the open file can be deduplicated with hard links if useHardLinks && !canDedupFileWithHardLink(file, fd, st) { continue } dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks) if err != nil { return false, nil, 0, nil } return true, dstFile, written, nil } // If hard links deduplication was used and it has failed, try again without hard links. if useHardLinks { return findFileInOSTreeRepos(file, ostreeRepos, dirfd, false) } return false, nil, 0, nil } // findFileOnTheHost checks whether the requested file already exist on the host and copies the file content from there if possible. // It is currently implemented to look only at the file with the same path. Ideally it can detect the same content also at different // paths. // file is the file to look for. // dirfd is an open fd to the destination checkout. // useHardLinks defines whether the deduplication can be performed using hard links. func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool, buf []byte) (bool, *os.File, int64, error) { sourceFile := filepath.Clean(filepath.Join("/", file.Name)) if !strings.HasPrefix(sourceFile, "/usr/") { // limit host deduplication to files under /usr. return false, nil, 0, nil } st, err := os.Stat(sourceFile) if err != nil || !st.Mode().IsRegular() { return false, nil, 0, nil } if st.Size() != file.Size { return false, nil, 0, nil } fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0) if err != nil { return false, nil, 0, nil } f := os.NewFile(uintptr(fd), "fd") defer f.Close() manifestChecksum, err := digest.Parse(file.Digest) if err != nil { return false, nil, 0, err } checksum, err := getFileDigest(f, buf) if err != nil { return false, nil, 0, err } if checksum != manifestChecksum { return false, nil, 0, nil } // check if the open file can be deduplicated with hard links useHardLinks = useHardLinks && canDedupFileWithHardLink(file, fd, st) dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks) if err != nil { return false, nil, 0, nil } // calculate the checksum again to make sure the file wasn't modified while it was copied if _, err := f.Seek(0, 0); err != nil { dstFile.Close() return false, nil, 0, err } checksum, err = getFileDigest(f, buf) if err != nil { dstFile.Close() return false, nil, 0, err } if checksum != manifestChecksum { dstFile.Close() return false, nil, 0, nil } return true, dstFile, written, nil } type findFileState struct { file *internal.FileMetadata useHardLinks bool dirfd int found bool dstFile *os.File written int64 retError error } func (v *findFileState) VisitFile(candidate *internal.FileMetadata, target string) (bool, error) { if v.useHardLinks && !canDedupMetadataWithHardLink(v.file, candidate) { return true, nil } found, dstFile, written, err := copyFileFromOtherLayer(v.file, target, candidate, v.dirfd, v.useHardLinks) if found && err == nil { v.found = found v.dstFile = dstFile v.written = written v.retError = err return false, nil } return true, nil } // findFileInOtherLayers finds the specified file in other layers. // cache is the layers cache to use. // file is the file to look for. // dirfd is an open file descriptor to the checkout root directory. // useHardLinks defines whether the deduplication can be performed using hard links. func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { visitor := &findFileState{ file: file, useHardLinks: useHardLinks, dirfd: dirfd, } if err := cache.findFileInOtherLayers(file, visitor); err != nil { return false, nil, 0, err } return visitor.found, visitor.dstFile, visitor.written, visitor.retError } func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error { if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 { return nil } idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) for i := range manifest { if options.ChownOpts != nil { manifest[i].UID = options.ChownOpts.UID manifest[i].GID = options.ChownOpts.GID } else { pair := idtools.IDPair{ UID: manifest[i].UID, GID: manifest[i].GID, } var err error manifest[i].UID, manifest[i].GID, err = idMappings.ToContainer(pair) if err != nil { return err } } } return nil } type originFile struct { Root string Path string Offset int64 } type missingFileChunk struct { Gap int64 File *internal.FileMetadata CompressedSize int64 UncompressedSize int64 } type missingPart struct { SourceChunk *ImageSourceChunk OriginFile *originFile Chunks []missingFileChunk } func (o *originFile) OpenFile() (io.ReadCloser, error) { srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY, 0) if err != nil { return nil, fmt.Errorf("open source file %q: %w", o.Root, err) } defer unix.Close(srcDirfd) srcFile, err := openFileUnderRoot(o.Path, srcDirfd, unix.O_RDONLY, 0) if err != nil { return nil, fmt.Errorf("open source file %q under target rootfs: %w", o.Path, err) } if _, err := srcFile.Seek(o.Offset, 0); err != nil { srcFile.Close() return nil, err } return srcFile, nil } // setFileAttrs sets the file attributes for file given metadata func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions, usePath bool) error { if file == nil || file.Fd() < 0 { return errors.Errorf("invalid file") } fd := int(file.Fd()) t, err := typeToTarType(metadata.Type) if err != nil { return err } // If it is a symlink, force to use the path if t == tar.TypeSymlink { usePath = true } baseName := "" if usePath { dirName := filepath.Dir(metadata.Name) if dirName != "" { parentFd, err := openFileUnderRoot(dirName, dirfd, unix.O_PATH|unix.O_DIRECTORY, 0) if err != nil { return err } defer parentFd.Close() dirfd = int(parentFd.Fd()) } baseName = filepath.Base(metadata.Name) } doChown := func() error { if usePath { return unix.Fchownat(dirfd, baseName, metadata.UID, metadata.GID, unix.AT_SYMLINK_NOFOLLOW) } return unix.Fchown(fd, metadata.UID, metadata.GID) } doSetXattr := func(k string, v []byte) error { return unix.Fsetxattr(fd, k, v, 0) } doUtimes := func() error { ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} if usePath { return unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW) } return unix.UtimesNanoAt(unix.AT_FDCWD, fmt.Sprintf("/proc/self/fd/%d", fd), ts, 0) } doChmod := func() error { if usePath { return unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW) } return unix.Fchmod(fd, uint32(mode)) } if err := doChown(); err != nil { if !options.IgnoreChownErrors { return fmt.Errorf("chown %q to %d:%d: %w", metadata.Name, metadata.UID, metadata.GID, err) } } canIgnore := func(err error) bool { return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP) } for k, v := range metadata.Xattrs { if _, found := xattrsToIgnore[k]; found { continue } data, err := base64.StdEncoding.DecodeString(v) if err != nil { return fmt.Errorf("decode xattr %q: %w", v, err) } if err := doSetXattr(k, data); !canIgnore(err) { return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err) } } if err := doUtimes(); !canIgnore(err) { return fmt.Errorf("set utimes for %q: %w", metadata.Name, err) } if err := doChmod(); !canIgnore(err) { return fmt.Errorf("chmod %q: %w", metadata.Name, err) } return nil } func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { root := fmt.Sprintf("/proc/self/fd/%d", dirfd) targetRoot, err := os.Readlink(root) if err != nil { return -1, err } hasNoFollow := (flags & unix.O_NOFOLLOW) != 0 fd := -1 // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the // last component as the path to openat(). if hasNoFollow { dirName := filepath.Dir(name) if dirName != "" { newRoot, err := securejoin.SecureJoin(root, filepath.Dir(name)) if err != nil { return -1, err } root = newRoot } parentDirfd, err := unix.Open(root, unix.O_PATH, 0) if err != nil { return -1, err } defer unix.Close(parentDirfd) fd, err = unix.Openat(parentDirfd, filepath.Base(name), int(flags), uint32(mode)) if err != nil { return -1, err } } else { newPath, err := securejoin.SecureJoin(root, name) if err != nil { return -1, err } fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode)) if err != nil { return -1, err } } target, err := os.Readlink(fmt.Sprintf("/proc/self/fd/%d", fd)) if err != nil { unix.Close(fd) return -1, err } // Add an additional check to make sure the opened fd is inside the rootfs if !strings.HasPrefix(target, targetRoot) { unix.Close(fd) return -1, fmt.Errorf("error while resolving %q. It resolves outside the root directory", name) } return fd, err } func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { how := unix.OpenHow{ Flags: flags, Mode: uint64(mode & 07777), Resolve: unix.RESOLVE_IN_ROOT, } return unix.Openat2(dirfd, name, &how) } // skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid // using it again. var skipOpenat2 int32 // openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a // userspace lookup. func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { var fd int var err error if atomic.LoadInt32(&skipOpenat2) > 0 { fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) } else { fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode) // If the function failed with ENOSYS, switch off the support for openat2 // and fallback to using safejoin. if err != nil && errors.Is(err, unix.ENOSYS) { atomic.StoreInt32(&skipOpenat2, 1) fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) } } return fd, err } // openFileUnderRoot safely opens a file under the specified root directory using openat2 // name is the path to open relative to dirfd. // dirfd is an open file descriptor to the target checkout directory. // flags are the flags to pass to the open syscall. // mode specifies the mode to use for newly created files. func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) { fd, err := openFileUnderRootRaw(dirfd, name, flags, mode) if err == nil { return os.NewFile(uintptr(fd), name), nil } hasCreate := (flags & unix.O_CREAT) != 0 if errors.Is(err, unix.ENOENT) && hasCreate { parent := filepath.Dir(name) if parent != "" { newDirfd, err2 := openOrCreateDirUnderRoot(parent, dirfd, 0) if err2 == nil { defer newDirfd.Close() fd, err := openFileUnderRootRaw(dirfd, name, flags, mode) if err == nil { return os.NewFile(uintptr(fd), name), nil } } } } return nil, fmt.Errorf("open %q under the rootfs: %w", name, err) } // openOrCreateDirUnderRoot safely opens a directory or create it if it is missing. // name is the path to open relative to dirfd. // dirfd is an open file descriptor to the target checkout directory. // mode specifies the mode to use for newly created files. func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.File, error) { fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, mode) if err == nil { return os.NewFile(uintptr(fd), name), nil } if errors.Is(err, unix.ENOENT) { parent := filepath.Dir(name) if parent != "" { pDir, err2 := openOrCreateDirUnderRoot(parent, dirfd, mode) if err2 != nil { return nil, err } defer pDir.Close() baseName := filepath.Base(name) if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0755); err2 != nil { return nil, err } fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, mode) if err == nil { return os.NewFile(uintptr(fd), name), nil } } } return nil, err } func (c *chunkedDiffer) prepareCompressedStreamToFile(compression compressedFileType, from io.Reader, mf *missingFileChunk) error { switch compression { case fileTypeZstdChunked: c.rawReader = io.LimitReader(from, mf.CompressedSize) if c.zstdReader == nil { r, err := zstd.NewReader(c.rawReader) if err != nil { return err } c.zstdReader = r } else { if err := c.zstdReader.Reset(c.rawReader); err != nil { return err } } case fileTypeEstargz: c.rawReader = io.LimitReader(from, mf.CompressedSize) if c.gzipReader == nil { r, err := pgzip.NewReader(c.rawReader) if err != nil { return err } c.gzipReader = r } else { if err := c.gzipReader.Reset(c.rawReader); err != nil { return err } } case fileTypeNoCompression: c.rawReader = io.LimitReader(from, mf.UncompressedSize) default: return fmt.Errorf("unknown file type %q", c.fileType) } return nil } func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, to io.Writer, size int64) error { switch compression { case fileTypeZstdChunked: defer c.zstdReader.Reset(nil) if _, err := io.CopyBuffer(to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil { return err } case fileTypeEstargz: defer c.gzipReader.Close() if _, err := io.CopyBuffer(to, io.LimitReader(c.gzipReader, size), c.copyBuffer); err != nil { return err } case fileTypeNoCompression: if _, err := io.CopyBuffer(to, io.LimitReader(c.rawReader, size), c.copyBuffer); err != nil { return err } default: return fmt.Errorf("unknown file type %q", c.fileType) } return nil } type destinationFile struct { dirfd int file *os.File digester digest.Digester to io.Writer metadata *internal.FileMetadata options *archive.TarOptions } func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions) (*destinationFile, error) { file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) if err != nil { return nil, err } digester := digest.Canonical.Digester() to := io.MultiWriter(file, digester.Hash()) return &destinationFile{ file: file, digester: digester, to: to, metadata: metadata, options: options, dirfd: dirfd, }, nil } func (d *destinationFile) Close() error { manifestChecksum, err := digest.Parse(d.metadata.Digest) if err != nil { return err } if d.digester.Digest() != manifestChecksum { return fmt.Errorf("checksum mismatch for %q (got %q instead of %q)", d.file.Name(), d.digester.Digest(), manifestChecksum) } return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false) } func closeDestinationFiles(files chan *destinationFile, errors chan error) { for f := range files { errors <- f.Close() } close(errors) } func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) { var destFile *destinationFile filesToClose := make(chan *destinationFile, 3) closeFilesErrors := make(chan error, 2) go closeDestinationFiles(filesToClose, closeFilesErrors) defer func() { close(filesToClose) for e := range closeFilesErrors { if e != nil && Err == nil { Err = e } } }() for _, missingPart := range missingParts { var part io.ReadCloser compression := c.fileType switch { case missingPart.OriginFile != nil: var err error part, err = missingPart.OriginFile.OpenFile() if err != nil { return err } compression = fileTypeNoCompression case missingPart.SourceChunk != nil: select { case p := <-streams: part = p case err := <-errs: return err } if part == nil { return errors.Errorf("invalid stream returned") } default: return errors.Errorf("internal error: missing part misses both local and remote data stream") } for _, mf := range missingPart.Chunks { if mf.Gap > 0 { limitReader := io.LimitReader(part, mf.Gap) _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) if err != nil { Err = err goto exit } continue } if mf.File.Name == "" { Err = errors.Errorf("file name empty") goto exit } if err := c.prepareCompressedStreamToFile(compression, part, &mf); err != nil { Err = err goto exit } // Open the new file if it is different that what is already // opened if destFile == nil || destFile.metadata.Name != mf.File.Name { var err error if destFile != nil { cleanup: for { select { case err = <-closeFilesErrors: if err != nil { Err = err goto exit } default: break cleanup } } filesToClose <- destFile } destFile, err = openDestinationFile(dirfd, mf.File, options) if err != nil { Err = err goto exit } } if err := c.appendCompressedStreamToFile(compression, destFile.to, mf.UncompressedSize); err != nil { Err = err goto exit } if _, err := io.CopyBuffer(ioutil.Discard, c.rawReader, c.copyBuffer); err != nil { Err = err goto exit } } exit: part.Close() if Err != nil { break } } if destFile != nil { return destFile.Close() } return nil } func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { getGap := func(missingParts []missingPart, i int) int { prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length return int(missingParts[i].SourceChunk.Offset - prev) } getCost := func(missingParts []missingPart, i int) int { cost := getGap(missingParts, i) if missingParts[i-1].OriginFile != nil { cost += int(missingParts[i-1].SourceChunk.Length) } if missingParts[i].OriginFile != nil { cost += int(missingParts[i].SourceChunk.Length) } return cost } // simple case: merge chunks from the same file. newMissingParts := missingParts[0:1] prevIndex := 0 for i := 1; i < len(missingParts); i++ { gap := getGap(missingParts, i) if gap == 0 && missingParts[prevIndex].OriginFile == nil && missingParts[i].OriginFile == nil && len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 && missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name { missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize } else { newMissingParts = append(newMissingParts, missingParts[i]) prevIndex++ } } missingParts = newMissingParts if len(missingParts) <= target { return missingParts } // this implementation doesn't account for duplicates, so it could merge // more than necessary to reach the specified target. Since target itself // is a heuristic value, it doesn't matter. costs := make([]int, len(missingParts)-1) for i := 1; i < len(missingParts); i++ { costs[i-1] = getCost(missingParts, i) } sort.Ints(costs) toShrink := len(missingParts) - target targetValue := costs[toShrink] newMissingParts = missingParts[0:1] for i := 1; i < len(missingParts); i++ { if getCost(missingParts, i) > targetValue { newMissingParts = append(newMissingParts, missingParts[i]) } else { gap := getGap(missingParts, i) prev := &newMissingParts[len(newMissingParts)-1] prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length prev.OriginFile = nil if gap > 0 { gapFile := missingFileChunk{ Gap: int64(gap), } prev.Chunks = append(prev.Chunks, gapFile) } prev.Chunks = append(prev.Chunks, missingParts[i].Chunks...) } } return newMissingParts } func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { var chunksToRequest []ImageSourceChunk for _, c := range missingParts { if c.OriginFile == nil { chunksToRequest = append(chunksToRequest, *c.SourceChunk) } } // There are some missing files. Prepare a multirange request for the missing chunks. var streams chan io.ReadCloser var err error var errs chan error for { streams, errs, err = c.stream.GetBlobAt(chunksToRequest) if err == nil { break } if _, ok := err.(ErrBadRequest); ok { requested := len(missingParts) // If the server cannot handle at least 64 chunks in a single request, just give up. if requested < 64 { return err } // Merge more chunks to request missingParts = mergeMissingChunks(missingParts, requested/2) continue } return err } if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil { return err } return nil } func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.FileMetadata, options *archive.TarOptions) error { parent := filepath.Dir(name) base := filepath.Base(name) parentFd := dirfd if parent != "." { parentFile, err := openOrCreateDirUnderRoot(parent, dirfd, 0) if err != nil { return err } defer parentFile.Close() parentFd = int(parentFile.Fd()) } if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil { if !os.IsExist(err) { return fmt.Errorf("mkdir %q: %w", name, err) } } file, err := openFileUnderRoot(name, dirfd, unix.O_DIRECTORY|unix.O_RDONLY, 0) if err != nil { return err } defer file.Close() return setFileAttrs(dirfd, file, mode, metadata, options, false) } func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0) if err != nil { return err } defer sourceFile.Close() destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) destDirFd := dirfd if destDir != "." { f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) if err != nil { return err } defer f.Close() destDirFd = int(f.Fd()) } err = doHardLink(int(sourceFile.Fd()), destDirFd, destBase) if err != nil { return fmt.Errorf("create hardlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) } newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY|unix.O_NOFOLLOW, 0) if err != nil { // If the target is a symlink, open the file with O_PATH. if errors.Is(err, unix.ELOOP) { newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_PATH|unix.O_NOFOLLOW, 0) if err != nil { return err } defer newFile.Close() return setFileAttrs(dirfd, newFile, mode, metadata, options, true) } return err } defer newFile.Close() return setFileAttrs(dirfd, newFile, mode, metadata, options, false) } func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) destDirFd := dirfd if destDir != "." { f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) if err != nil { return err } defer f.Close() destDirFd = int(f.Fd()) } if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil { return fmt.Errorf("create symlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) } return nil } type whiteoutHandler struct { Dirfd int Root string } func (d whiteoutHandler) Setxattr(path, name string, value []byte) error { file, err := openOrCreateDirUnderRoot(path, d.Dirfd, 0) if err != nil { return err } defer file.Close() if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil { return fmt.Errorf("set xattr %s=%q for %q: %w", name, value, path, err) } return nil } func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { dir := filepath.Dir(path) base := filepath.Base(path) dirfd := d.Dirfd if dir != "" { dir, err := openOrCreateDirUnderRoot(dir, d.Dirfd, 0) if err != nil { return err } defer dir.Close() dirfd = int(dir.Fd()) } if err := unix.Mknodat(dirfd, base, mode, dev); err != nil { return fmt.Errorf("mknod %q: %w", path, err) } return nil } func checkChownErr(err error, name string, uid, gid int) error { if errors.Is(err, syscall.EINVAL) { return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally: %w", uid, gid, name, err) } return err } func (d whiteoutHandler) Chown(path string, uid, gid int) error { file, err := openFileUnderRoot(path, d.Dirfd, unix.O_PATH, 0) if err != nil { return err } defer file.Close() if err := unix.Fchownat(int(file.Fd()), "", uid, gid, unix.AT_EMPTY_PATH); err != nil { var stat unix.Stat_t if unix.Fstat(int(file.Fd()), &stat) == nil { if stat.Uid == uint32(uid) && stat.Gid == uint32(gid) { return nil } } return checkChownErr(err, path, uid, gid) } return nil } type hardLinkToCreate struct { dest string dirfd int mode os.FileMode metadata *internal.FileMetadata } func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bool) bool { if value, ok := storeOpts.PullOptions[name]; ok { return strings.ToLower(value) == "true" } return def } func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) { defer c.layersCache.release() defer func() { if c.zstdReader != nil { c.zstdReader.Close() } }() bigData := map[string][]byte{ bigDataKey: c.manifest, } output := graphdriver.DriverWithDifferOutput{ Differ: c, BigData: bigData, } storeOpts, err := types.DefaultStoreOptionsAutoDetectUID() if err != nil { return output, err } if !parseBooleanPullOption(&storeOpts, "enable_partial_images", false) { return output, errors.New("enable_partial_images not configured") } enableHostDedup := parseBooleanPullOption(&storeOpts, "enable_host_deduplication", false) // When the hard links deduplication is used, file attributes are ignored because setting them // modifies the source file as well. useHardLinks := parseBooleanPullOption(&storeOpts, "use_hard_links", false) // List of OSTree repositories to use for deduplication ostreeRepos := strings.Split(storeOpts.PullOptions["ostree_repos"], ":") // Generate the manifest var toc internal.TOC if err := json.Unmarshal(c.manifest, &toc); err != nil { return output, err } whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) var missingParts []missingPart mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries) if err != nil { return output, err } if err := maybeDoIDRemap(mergedEntries, options); err != nil { return output, err } if options.ForceMask != nil { uid, gid, mode, err := archive.GetFileOwner(dest) if err == nil { value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) if err := unix.Setxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil { return output, err } } } dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH, 0) if err != nil { return output, fmt.Errorf("cannot open %q: %w", dest, err) } defer unix.Close(dirfd) // hardlinks can point to missing files. So create them after all files // are retrieved var hardLinks []hardLinkToCreate missingPartsSize, totalChunksSize := int64(0), int64(0) for i, r := range mergedEntries { if options.ForceMask != nil { value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777) r.Xattrs[containersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value)) r.Mode = int64(*options.ForceMask) } mode := os.FileMode(r.Mode) r.Name = filepath.Clean(r.Name) r.Linkname = filepath.Clean(r.Linkname) t, err := typeToTarType(r.Type) if err != nil { return output, err } if whiteoutConverter != nil { hdr := archivetar.Header{ Typeflag: t, Name: r.Name, Linkname: r.Linkname, Size: r.Size, Mode: r.Mode, Uid: r.UID, Gid: r.GID, } handler := whiteoutHandler{ Dirfd: dirfd, Root: dest, } writeFile, err := whiteoutConverter.ConvertReadWithHandler(&hdr, r.Name, &handler) if err != nil { return output, err } if !writeFile { continue } } switch t { case tar.TypeReg: // Create directly empty files. if r.Size == 0 { // Used to have a scope for cleanup. createEmptyFile := func() error { file, err := openFileUnderRoot(r.Name, dirfd, newFileFlags, 0) if err != nil { return err } defer file.Close() if err := setFileAttrs(dirfd, file, mode, &r, options, false); err != nil { return err } return nil } if err := createEmptyFile(); err != nil { return output, err } continue } case tar.TypeDir: if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil { return output, err } continue case tar.TypeLink: dest := dest dirfd := dirfd mode := mode r := r hardLinks = append(hardLinks, hardLinkToCreate{ dest: dest, dirfd: dirfd, mode: mode, metadata: &r, }) continue case tar.TypeSymlink: if err := safeSymlink(dirfd, mode, &r, options); err != nil { return output, err } continue case tar.TypeChar: case tar.TypeBlock: case tar.TypeFifo: /* Ignore. */ default: return output, fmt.Errorf("invalid type %q", t) } totalChunksSize += r.Size finalizeFile := func(dstFile *os.File) error { if dstFile != nil { defer dstFile.Close() if err := setFileAttrs(dirfd, dstFile, mode, &r, options, false); err != nil { return err } } return nil } found, dstFile, _, err := findFileInOtherLayers(c.layersCache, &r, dirfd, useHardLinks) if err != nil { return output, err } if found { if err := finalizeFile(dstFile); err != nil { return output, err } continue } found, dstFile, _, err = findFileInOSTreeRepos(&r, ostreeRepos, dirfd, useHardLinks) if err != nil { return output, err } if found { if err := finalizeFile(dstFile); err != nil { return output, err } continue } if enableHostDedup { found, dstFile, _, err = findFileOnTheHost(&r, dirfd, useHardLinks, c.copyBuffer) if err != nil { return output, err } if found { if err := finalizeFile(dstFile); err != nil { return output, err } continue } } missingPartsSize += r.Size if t == tar.TypeReg { remainingSize := r.Size for _, chunk := range r.Chunks { compressedSize := int64(chunk.EndOffset - chunk.Offset) size := remainingSize if chunk.ChunkSize > 0 { size = chunk.ChunkSize } remainingSize = remainingSize - size rawChunk := ImageSourceChunk{ Offset: uint64(chunk.Offset), Length: uint64(compressedSize), } file := missingFileChunk{ File: &mergedEntries[i], CompressedSize: compressedSize, UncompressedSize: size, } mp := missingPart{ SourceChunk: &rawChunk, Chunks: []missingFileChunk{ file, }, } root, path, offset := c.layersCache.findChunkInOtherLayers(chunk) if offset >= 0 { missingPartsSize -= size mp.OriginFile = &originFile{ Root: root, Path: path, Offset: offset, } } missingParts = append(missingParts, mp) } } } // There are some missing files. Prepare a multirange request for the missing chunks. if len(missingParts) > 0 { missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks) if err := c.retrieveMissingFiles(dest, dirfd, missingParts, options); err != nil { return output, err } } for _, m := range hardLinks { if err := safeLink(m.dirfd, m.mode, m.metadata, options); err != nil { return output, err } } if totalChunksSize > 0 { logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) } return output, nil } func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { // ignore the metadata files for the estargz format. if fileType != fileTypeEstargz { return false } switch e.Name { // ignore the metadata files for the estargz format. case estargz.PrefetchLandmark, estargz.NoPrefetchLandmark, estargz.TOCTarName: return true } return false } func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, error) { countNextChunks := func(start int) int { count := 0 for _, e := range entries[start:] { if e.Type != TypeChunk { return count } count++ } return count } size := 0 for _, entry := range entries { if mustSkipFile(fileType, entry) { continue } if entry.Type != TypeChunk { size++ } } mergedEntries := make([]internal.FileMetadata, size) m := 0 for i := 0; i < len(entries); i++ { e := entries[i] if mustSkipFile(fileType, e) { continue } if e.Type == TypeChunk { return nil, fmt.Errorf("chunk type without a regular file") } if e.Type == TypeReg { nChunks := countNextChunks(i + 1) e.Chunks = make([]*internal.FileMetadata, nChunks+1) for j := 0; j <= nChunks; j++ { e.Chunks[j] = &entries[i+j] e.EndOffset = entries[i+j].EndOffset } i += nChunks } mergedEntries[m] = e m++ } // stargz/estargz doesn't store EndOffset so let's calculate it here lastOffset := c.tocOffset for i := len(mergedEntries) - 1; i >= 0; i-- { if mergedEntries[i].EndOffset == 0 { mergedEntries[i].EndOffset = lastOffset } if mergedEntries[i].Offset != 0 { lastOffset = mergedEntries[i].Offset } lastChunkOffset := mergedEntries[i].EndOffset for j := len(mergedEntries[i].Chunks) - 1; j >= 0; j-- { mergedEntries[i].Chunks[j].EndOffset = lastChunkOffset mergedEntries[i].Chunks[j].Size = mergedEntries[i].Chunks[j].EndOffset - mergedEntries[i].Chunks[j].Offset lastChunkOffset = mergedEntries[i].Chunks[j].Offset } } return mergedEntries, nil }
// Code generated by go-swagger; DO NOT EDIT. package models // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "strconv" strfmt "github.com/go-openapi/strfmt" "github.com/go-openapi/errors" "github.com/go-openapi/swag" "github.com/go-openapi/validate" ) // Cluster cluster // swagger:model Cluster type Cluster struct { // name of the cluster // Required: true // Pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ Name *string `json:"name"` // spec Spec *ClusterSpec `json:"spec,omitempty"` // status Status *ClusterStatus `json:"status,omitempty"` } // Validate validates this cluster func (m *Cluster) Validate(formats strfmt.Registry) error { var res []error if err := m.validateName(formats); err != nil { // prop res = append(res, err) } if err := m.validateSpec(formats); err != nil { // prop res = append(res, err) } if err := m.validateStatus(formats); err != nil { // prop res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } func (m *Cluster) validateName(formats strfmt.Registry) error { if err := validate.Required("name", "body", m.Name); err != nil { return err } if err := validate.Pattern("name", "body", string(*m.Name), `^[a-z]([-a-z0-9]*[a-z0-9])?$`); err != nil { return err } return nil } func (m *Cluster) validateSpec(formats strfmt.Registry) error { if swag.IsZero(m.Spec) { // not required return nil } if m.Spec != nil { if err := m.Spec.Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("spec") } return err } } return nil } func (m *Cluster) validateStatus(formats strfmt.Registry) error { if swag.IsZero(m.Status) { // not required return nil } if m.Status != nil { if err := m.Status.Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status") } return err } } return nil } // MarshalBinary interface implementation func (m *Cluster) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } return swag.WriteJSON(m) } // UnmarshalBinary interface implementation func (m *Cluster) UnmarshalBinary(b []byte) error { var res Cluster if err := swag.ReadJSON(b, &res); err != nil { return err } *m = res return nil } // ClusterSpec cluster spec // swagger:model ClusterSpec type ClusterSpec struct { // node pools NodePools []*ClusterSpecNodePoolsItems0 `json:"nodePools"` } // Validate validates this cluster spec func (m *ClusterSpec) Validate(formats strfmt.Registry) error { var res []error if err := m.validateNodePools(formats); err != nil { // prop res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } func (m *ClusterSpec) validateNodePools(formats strfmt.Registry) error { if swag.IsZero(m.NodePools) { // not required return nil } for i := 0; i < len(m.NodePools); i++ { if swag.IsZero(m.NodePools[i]) { // not required continue } if m.NodePools[i] != nil { if err := m.NodePools[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("spec" + "." + "nodePools" + "." + strconv.Itoa(i)) } return err } } } return nil } // MarshalBinary interface implementation func (m *ClusterSpec) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } return swag.WriteJSON(m) } // UnmarshalBinary interface implementation func (m *ClusterSpec) UnmarshalBinary(b []byte) error { var res ClusterSpec if err := swag.ReadJSON(b, &res); err != nil { return err } *m = res return nil } // ClusterSpecNodePoolsItems0 cluster spec node pools items0 // swagger:model ClusterSpecNodePoolsItems0 type ClusterSpecNodePoolsItems0 struct { // flavor Flavor string `json:"flavor,omitempty"` // image Image string `json:"image,omitempty"` // name // Pattern: ^[a-z]([a-z0-9]*)?$ Name string `json:"name,omitempty"` // size Size int64 `json:"size,omitempty"` } // Validate validates this cluster spec node pools items0 func (m *ClusterSpecNodePoolsItems0) Validate(formats strfmt.Registry) error { var res []error if err := m.validateName(formats); err != nil { // prop res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } func (m *ClusterSpecNodePoolsItems0) validateName(formats strfmt.Registry) error { if swag.IsZero(m.Name) { // not required return nil } if err := validate.Pattern("name", "body", string(m.Name), `^[a-z]([a-z0-9]*)?$`); err != nil { return err } return nil } // MarshalBinary interface implementation func (m *ClusterSpecNodePoolsItems0) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } return swag.WriteJSON(m) } // UnmarshalBinary interface implementation func (m *ClusterSpecNodePoolsItems0) UnmarshalBinary(b []byte) error { var res ClusterSpecNodePoolsItems0 if err := swag.ReadJSON(b, &res); err != nil { return err } *m = res return nil } // ClusterStatus cluster status // swagger:model ClusterStatus type ClusterStatus struct { // status of the cluster Kluster string `json:"kluster,omitempty"` // node pools NodePools []*ClusterStatusNodePoolsItems0 `json:"nodePools"` } // Validate validates this cluster status func (m *ClusterStatus) Validate(formats strfmt.Registry) error { var res []error if err := m.validateNodePools(formats); err != nil { // prop res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } func (m *ClusterStatus) validateNodePools(formats strfmt.Registry) error { if swag.IsZero(m.NodePools) { // not required return nil } for i := 0; i < len(m.NodePools); i++ { if swag.IsZero(m.NodePools[i]) { // not required continue } if m.NodePools[i] != nil { if err := m.NodePools[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status" + "." + "nodePools" + "." + strconv.Itoa(i)) } return err } } } return nil } // MarshalBinary interface implementation func (m *ClusterStatus) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } return swag.WriteJSON(m) } // UnmarshalBinary interface implementation func (m *ClusterStatus) UnmarshalBinary(b []byte) error { var res ClusterStatus if err := swag.ReadJSON(b, &res); err != nil { return err } *m = res return nil } // ClusterStatusNodePoolsItems0 cluster status node pools items0 // swagger:model ClusterStatusNodePoolsItems0 type ClusterStatusNodePoolsItems0 struct { // name Name string `json:"name,omitempty"` // ready Ready int64 `json:"ready,omitempty"` // size Size int64 `json:"size,omitempty"` } // Validate validates this cluster status node pools items0 func (m *ClusterStatusNodePoolsItems0) Validate(formats strfmt.Registry) error { var res []error if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } // MarshalBinary interface implementation func (m *ClusterStatusNodePoolsItems0) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } return swag.WriteJSON(m) } // UnmarshalBinary interface implementation func (m *ClusterStatusNodePoolsItems0) UnmarshalBinary(b []byte) error { var res ClusterStatusNodePoolsItems0 if err := swag.ReadJSON(b, &res); err != nil { return err } *m = res return nil } fix wrong model // Code generated by go-swagger; DO NOT EDIT. package models // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "strconv" strfmt "github.com/go-openapi/strfmt" "github.com/go-openapi/errors" "github.com/go-openapi/swag" "github.com/go-openapi/validate" ) // Cluster cluster // swagger:model Cluster type Cluster struct { // name of the cluster // Required: true // Pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ Name *string `json:"name"` // spec Spec *ClusterSpec `json:"spec,omitempty"` // status Status *ClusterStatus `json:"status,omitempty"` } // Validate validates this cluster func (m *Cluster) Validate(formats strfmt.Registry) error { var res []error if err := m.validateName(formats); err != nil { // prop res = append(res, err) } if err := m.validateSpec(formats); err != nil { // prop res = append(res, err) } if err := m.validateStatus(formats); err != nil { // prop res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } func (m *Cluster) validateName(formats strfmt.Registry) error { if err := validate.Required("name", "body", m.Name); err != nil { return err } if err := validate.Pattern("name", "body", string(*m.Name), `^[a-z]([-a-z0-9]*[a-z0-9])?$`); err != nil { return err } return nil } func (m *Cluster) validateSpec(formats strfmt.Registry) error { if swag.IsZero(m.Spec) { // not required return nil } if m.Spec != nil { if err := m.Spec.Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("spec") } return err } } return nil } func (m *Cluster) validateStatus(formats strfmt.Registry) error { if swag.IsZero(m.Status) { // not required return nil } if m.Status != nil { if err := m.Status.Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status") } return err } } return nil } // MarshalBinary interface implementation func (m *Cluster) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } return swag.WriteJSON(m) } // UnmarshalBinary interface implementation func (m *Cluster) UnmarshalBinary(b []byte) error { var res Cluster if err := swag.ReadJSON(b, &res); err != nil { return err } *m = res return nil } // ClusterSpec cluster spec // swagger:model ClusterSpec type ClusterSpec struct { // node pools NodePools []*ClusterSpecNodePoolsItems0 `json:"nodePools"` } // Validate validates this cluster spec func (m *ClusterSpec) Validate(formats strfmt.Registry) error { var res []error if err := m.validateNodePools(formats); err != nil { // prop res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } func (m *ClusterSpec) validateNodePools(formats strfmt.Registry) error { if swag.IsZero(m.NodePools) { // not required return nil } for i := 0; i < len(m.NodePools); i++ { if swag.IsZero(m.NodePools[i]) { // not required continue } if m.NodePools[i] != nil { if err := m.NodePools[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("spec" + "." + "nodePools" + "." + strconv.Itoa(i)) } return err } } } return nil } // MarshalBinary interface implementation func (m *ClusterSpec) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } return swag.WriteJSON(m) } // UnmarshalBinary interface implementation func (m *ClusterSpec) UnmarshalBinary(b []byte) error { var res ClusterSpec if err := swag.ReadJSON(b, &res); err != nil { return err } *m = res return nil } // ClusterSpecNodePoolsItems0 cluster spec node pools items0 // swagger:model ClusterSpecNodePoolsItems0 type ClusterSpecNodePoolsItems0 struct { // flavor // Required: true Flavor *string `json:"flavor"` // image Image string `json:"image,omitempty"` // name // Required: true // Pattern: ^[a-z]([a-z0-9]*)?$ Name *string `json:"name"` // size // Required: true // Maximum: 127 // Minimum: 0 Size *int64 `json:"size"` } // Validate validates this cluster spec node pools items0 func (m *ClusterSpecNodePoolsItems0) Validate(formats strfmt.Registry) error { var res []error if err := m.validateFlavor(formats); err != nil { // prop res = append(res, err) } if err := m.validateName(formats); err != nil { // prop res = append(res, err) } if err := m.validateSize(formats); err != nil { // prop res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } func (m *ClusterSpecNodePoolsItems0) validateFlavor(formats strfmt.Registry) error { if err := validate.Required("flavor", "body", m.Flavor); err != nil { return err } return nil } func (m *ClusterSpecNodePoolsItems0) validateName(formats strfmt.Registry) error { if err := validate.Required("name", "body", m.Name); err != nil { return err } if err := validate.Pattern("name", "body", string(*m.Name), `^[a-z]([a-z0-9]*)?$`); err != nil { return err } return nil } func (m *ClusterSpecNodePoolsItems0) validateSize(formats strfmt.Registry) error { if err := validate.Required("size", "body", m.Size); err != nil { return err } if err := validate.MinimumInt("size", "body", int64(*m.Size), 0, false); err != nil { return err } if err := validate.MaximumInt("size", "body", int64(*m.Size), 127, false); err != nil { return err } return nil } // MarshalBinary interface implementation func (m *ClusterSpecNodePoolsItems0) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } return swag.WriteJSON(m) } // UnmarshalBinary interface implementation func (m *ClusterSpecNodePoolsItems0) UnmarshalBinary(b []byte) error { var res ClusterSpecNodePoolsItems0 if err := swag.ReadJSON(b, &res); err != nil { return err } *m = res return nil } // ClusterStatus cluster status // swagger:model ClusterStatus type ClusterStatus struct { // kluster Kluster *ClusterStatusKluster `json:"kluster,omitempty"` // node pools NodePools []*ClusterStatusNodePoolsItems0 `json:"nodePools"` } // Validate validates this cluster status func (m *ClusterStatus) Validate(formats strfmt.Registry) error { var res []error if err := m.validateKluster(formats); err != nil { // prop res = append(res, err) } if err := m.validateNodePools(formats); err != nil { // prop res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } func (m *ClusterStatus) validateKluster(formats strfmt.Registry) error { if swag.IsZero(m.Kluster) { // not required return nil } if m.Kluster != nil { if err := m.Kluster.Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status" + "." + "kluster") } return err } } return nil } func (m *ClusterStatus) validateNodePools(formats strfmt.Registry) error { if swag.IsZero(m.NodePools) { // not required return nil } for i := 0; i < len(m.NodePools); i++ { if swag.IsZero(m.NodePools[i]) { // not required continue } if m.NodePools[i] != nil { if err := m.NodePools[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status" + "." + "nodePools" + "." + strconv.Itoa(i)) } return err } } } return nil } // MarshalBinary interface implementation func (m *ClusterStatus) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } return swag.WriteJSON(m) } // UnmarshalBinary interface implementation func (m *ClusterStatus) UnmarshalBinary(b []byte) error { var res ClusterStatus if err := swag.ReadJSON(b, &res); err != nil { return err } *m = res return nil } // ClusterStatusKluster cluster status kluster // swagger:model ClusterStatusKluster type ClusterStatusKluster struct { // message Message string `json:"message,omitempty"` // status of the cluster State string `json:"state,omitempty"` } // Validate validates this cluster status kluster func (m *ClusterStatusKluster) Validate(formats strfmt.Registry) error { var res []error if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } // MarshalBinary interface implementation func (m *ClusterStatusKluster) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } return swag.WriteJSON(m) } // UnmarshalBinary interface implementation func (m *ClusterStatusKluster) UnmarshalBinary(b []byte) error { var res ClusterStatusKluster if err := swag.ReadJSON(b, &res); err != nil { return err } *m = res return nil } // ClusterStatusNodePoolsItems0 cluster status node pools items0 // swagger:model ClusterStatusNodePoolsItems0 type ClusterStatusNodePoolsItems0 struct { // healthy // Required: true Healthy *int64 `json:"healthy"` // name // Required: true Name *string `json:"name"` // running // Required: true Running *int64 `json:"running"` // schedulable // Required: true Schedulable *int64 `json:"schedulable"` // size // Required: true Size *int64 `json:"size"` } // Validate validates this cluster status node pools items0 func (m *ClusterStatusNodePoolsItems0) Validate(formats strfmt.Registry) error { var res []error if err := m.validateHealthy(formats); err != nil { // prop res = append(res, err) } if err := m.validateName(formats); err != nil { // prop res = append(res, err) } if err := m.validateRunning(formats); err != nil { // prop res = append(res, err) } if err := m.validateSchedulable(formats); err != nil { // prop res = append(res, err) } if err := m.validateSize(formats); err != nil { // prop res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } func (m *ClusterStatusNodePoolsItems0) validateHealthy(formats strfmt.Registry) error { if err := validate.Required("healthy", "body", m.Healthy); err != nil { return err } return nil } func (m *ClusterStatusNodePoolsItems0) validateName(formats strfmt.Registry) error { if err := validate.Required("name", "body", m.Name); err != nil { return err } return nil } func (m *ClusterStatusNodePoolsItems0) validateRunning(formats strfmt.Registry) error { if err := validate.Required("running", "body", m.Running); err != nil { return err } return nil } func (m *ClusterStatusNodePoolsItems0) validateSchedulable(formats strfmt.Registry) error { if err := validate.Required("schedulable", "body", m.Schedulable); err != nil { return err } return nil } func (m *ClusterStatusNodePoolsItems0) validateSize(formats strfmt.Registry) error { if err := validate.Required("size", "body", m.Size); err != nil { return err } return nil } // MarshalBinary interface implementation func (m *ClusterStatusNodePoolsItems0) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } return swag.WriteJSON(m) } // UnmarshalBinary interface implementation func (m *ClusterStatusNodePoolsItems0) UnmarshalBinary(b []byte) error { var res ClusterStatusNodePoolsItems0 if err := swag.ReadJSON(b, &res); err != nil { return err } *m = res return nil }
/* Copyright 2017 the Heptio Ark contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package backup import ( "fmt" "time" "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" "github.com/spf13/cobra" "github.com/spf13/pflag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" api "github.com/heptio/ark/pkg/apis/ark/v1" "github.com/heptio/ark/pkg/client" "github.com/heptio/ark/pkg/cmd" "github.com/heptio/ark/pkg/cmd/util/flag" "github.com/heptio/ark/pkg/cmd/util/output" arkclient "github.com/heptio/ark/pkg/generated/clientset/versioned" ) func NewCreateCommand(f client.Factory, use string) *cobra.Command { o := NewCreateOptions() c := &cobra.Command{ Use: use + " NAME", Short: "Create a backup", Args: cobra.ExactArgs(1), Run: func(c *cobra.Command, args []string) { cmd.CheckError(o.Complete(args, f)) cmd.CheckError(o.Validate(c, args, f)) cmd.CheckError(o.Run(c, f)) }, } o.BindFlags(c.Flags()) o.BindWait(c.Flags()) output.BindFlags(c.Flags()) output.ClearOutputFlagDefault(c) return c } type CreateOptions struct { Name string TTL time.Duration SnapshotVolumes flag.OptionalBool IncludeNamespaces flag.StringArray ExcludeNamespaces flag.StringArray IncludeResources flag.StringArray ExcludeResources flag.StringArray Labels flag.Map Selector flag.LabelSelector IncludeClusterResources flag.OptionalBool Wait bool StorageLocation string client arkclient.Interface } func NewCreateOptions() *CreateOptions { return &CreateOptions{ TTL: 30 * 24 * time.Hour, IncludeNamespaces: flag.NewStringArray("*"), Labels: flag.NewMap(), SnapshotVolumes: flag.NewOptionalBool(nil), IncludeClusterResources: flag.NewOptionalBool(nil), } } func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { flags.DurationVar(&o.TTL, "ttl", o.TTL, "how long before the backup can be garbage collected") flags.Var(&o.IncludeNamespaces, "include-namespaces", "namespaces to include in the backup (use '*' for all namespaces)") flags.Var(&o.ExcludeNamespaces, "exclude-namespaces", "namespaces to exclude from the backup") flags.Var(&o.IncludeResources, "include-resources", "resources to include in the backup, formatted as resource.group, such as storageclasses.storage.k8s.io (use '*' for all resources)") flags.Var(&o.ExcludeResources, "exclude-resources", "resources to exclude from the backup, formatted as resource.group, such as storageclasses.storage.k8s.io") flags.Var(&o.Labels, "labels", "labels to apply to the backup") flags.StringVar(&o.StorageLocation, "storage-location", "", "location in which to store the backup") flags.VarP(&o.Selector, "selector", "l", "only back up resources matching this label selector") f := flags.VarPF(&o.SnapshotVolumes, "snapshot-volumes", "", "take snapshots of PersistentVolumes as part of the backup") // this allows the user to just specify "--snapshot-volumes" as shorthand for "--snapshot-volumes=true" // like a normal bool flag f.NoOptDefVal = "true" f = flags.VarPF(&o.IncludeClusterResources, "include-cluster-resources", "", "include cluster-scoped resources in the backup") f.NoOptDefVal = "true" } // BindWait binds the wait flag separately so it is not called by other create // commands that reuse CreateOptions's BindFlags method. func (o *CreateOptions) BindWait(flags *pflag.FlagSet) { flags.BoolVarP(&o.Wait, "wait", "w", o.Wait, "wait for the operation to complete") } func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Factory) error { if err := output.ValidateFlags(c); err != nil { return err } if o.StorageLocation != "" { if _, err := o.client.ArkV1().BackupStorageLocations(f.Namespace()).Get(o.StorageLocation, metav1.GetOptions{}); err != nil { return err } } return nil } func (o *CreateOptions) Complete(args []string, f client.Factory) error { o.Name = args[0] client, err := f.Client() if err != nil { return err } o.client = client return nil } func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { backup := &api.Backup{ ObjectMeta: metav1.ObjectMeta{ Namespace: f.Namespace(), Name: o.Name, Labels: o.Labels.Data(), }, Spec: api.BackupSpec{ IncludedNamespaces: o.IncludeNamespaces, ExcludedNamespaces: o.ExcludeNamespaces, IncludedResources: o.IncludeResources, ExcludedResources: o.ExcludeResources, LabelSelector: o.Selector.LabelSelector, SnapshotVolumes: o.SnapshotVolumes.Value, TTL: metav1.Duration{Duration: o.TTL}, IncludeClusterResources: o.IncludeClusterResources.Value, StorageLocation: o.StorageLocation, }, } if printed, err := output.PrintWithFormat(c, backup); printed || err != nil { return err } var backupInformer cache.SharedIndexInformer var updates chan *api.Backup if o.Wait { stop := make(chan struct{}) defer close(stop) updates = make(chan *api.Backup) backupInformer = v1.NewBackupInformer(o.client, f.Namespace(), 0, nil) backupInformer.AddEventHandler( cache.FilteringResourceEventHandler{ FilterFunc: func(obj interface{}) bool { backup, ok := obj.(*api.Backup) if !ok { return false } return backup.Name == o.Name }, Handler: cache.ResourceEventHandlerFuncs{ UpdateFunc: func(_, obj interface{}) { backup, ok := obj.(*api.Backup) if !ok { return } updates <- backup }, DeleteFunc: func(obj interface{}) { backup, ok := obj.(*api.Backup) if !ok { return } updates <- backup }, }, }, ) go backupInformer.Run(stop) } _, err := o.client.ArkV1().Backups(backup.Namespace).Create(backup) if err != nil { return err } fmt.Printf("Backup request %q submitted successfully.\n", backup.Name) if o.Wait { fmt.Println("Waiting for backup to complete. You may safely press ctrl-c to stop waiting - your backup will continue in the background.") ticker := time.NewTicker(time.Second) defer ticker.Stop() for { select { case <-ticker.C: fmt.Print(".") case backup, ok := <-updates: if !ok { fmt.Println("\nError waiting: unable to watch backups.") return nil } if backup.Status.Phase != api.BackupPhaseNew && backup.Status.Phase != api.BackupPhaseInProgress { fmt.Printf("\nBackup completed with status: %s. You may check for more information using the commands `ark backup describe %s` and `ark backup logs %s`.\n", backup.Status.Phase, backup.Name, backup.Name) return nil } } } } // Not waiting fmt.Printf("Run `ark backup describe %s` or `ark backup logs %s` for more details.\n", backup.Name, backup.Name) return nil } add --volume-snapshot-locations flag to ark backup create Signed-off-by: Steve Kriss <9ce5770b3bb4b2a1d59be2d97e34379cd192299f@heptio.com> /* Copyright 2017 the Heptio Ark contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package backup import ( "fmt" "time" "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" "github.com/spf13/cobra" "github.com/spf13/pflag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" api "github.com/heptio/ark/pkg/apis/ark/v1" "github.com/heptio/ark/pkg/client" "github.com/heptio/ark/pkg/cmd" "github.com/heptio/ark/pkg/cmd/util/flag" "github.com/heptio/ark/pkg/cmd/util/output" arkclient "github.com/heptio/ark/pkg/generated/clientset/versioned" ) func NewCreateCommand(f client.Factory, use string) *cobra.Command { o := NewCreateOptions() c := &cobra.Command{ Use: use + " NAME", Short: "Create a backup", Args: cobra.ExactArgs(1), Run: func(c *cobra.Command, args []string) { cmd.CheckError(o.Complete(args, f)) cmd.CheckError(o.Validate(c, args, f)) cmd.CheckError(o.Run(c, f)) }, } o.BindFlags(c.Flags()) o.BindWait(c.Flags()) output.BindFlags(c.Flags()) output.ClearOutputFlagDefault(c) return c } type CreateOptions struct { Name string TTL time.Duration SnapshotVolumes flag.OptionalBool IncludeNamespaces flag.StringArray ExcludeNamespaces flag.StringArray IncludeResources flag.StringArray ExcludeResources flag.StringArray Labels flag.Map Selector flag.LabelSelector IncludeClusterResources flag.OptionalBool Wait bool StorageLocation string SnapshotLocations []string client arkclient.Interface } func NewCreateOptions() *CreateOptions { return &CreateOptions{ TTL: 30 * 24 * time.Hour, IncludeNamespaces: flag.NewStringArray("*"), Labels: flag.NewMap(), SnapshotVolumes: flag.NewOptionalBool(nil), IncludeClusterResources: flag.NewOptionalBool(nil), } } func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { flags.DurationVar(&o.TTL, "ttl", o.TTL, "how long before the backup can be garbage collected") flags.Var(&o.IncludeNamespaces, "include-namespaces", "namespaces to include in the backup (use '*' for all namespaces)") flags.Var(&o.ExcludeNamespaces, "exclude-namespaces", "namespaces to exclude from the backup") flags.Var(&o.IncludeResources, "include-resources", "resources to include in the backup, formatted as resource.group, such as storageclasses.storage.k8s.io (use '*' for all resources)") flags.Var(&o.ExcludeResources, "exclude-resources", "resources to exclude from the backup, formatted as resource.group, such as storageclasses.storage.k8s.io") flags.Var(&o.Labels, "labels", "labels to apply to the backup") flags.StringVar(&o.StorageLocation, "storage-location", "", "location in which to store the backup") flags.StringSliceVar(&o.SnapshotLocations, "volume-snapshot-locations", o.SnapshotLocations, "list of locations (at most one per provider) where volume snapshots should be stored") flags.VarP(&o.Selector, "selector", "l", "only back up resources matching this label selector") f := flags.VarPF(&o.SnapshotVolumes, "snapshot-volumes", "", "take snapshots of PersistentVolumes as part of the backup") // this allows the user to just specify "--snapshot-volumes" as shorthand for "--snapshot-volumes=true" // like a normal bool flag f.NoOptDefVal = "true" f = flags.VarPF(&o.IncludeClusterResources, "include-cluster-resources", "", "include cluster-scoped resources in the backup") f.NoOptDefVal = "true" } // BindWait binds the wait flag separately so it is not called by other create // commands that reuse CreateOptions's BindFlags method. func (o *CreateOptions) BindWait(flags *pflag.FlagSet) { flags.BoolVarP(&o.Wait, "wait", "w", o.Wait, "wait for the operation to complete") } func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Factory) error { if err := output.ValidateFlags(c); err != nil { return err } if o.StorageLocation != "" { if _, err := o.client.ArkV1().BackupStorageLocations(f.Namespace()).Get(o.StorageLocation, metav1.GetOptions{}); err != nil { return err } } for _, loc := range o.SnapshotLocations { if _, err := o.client.ArkV1().VolumeSnapshotLocations(f.Namespace()).Get(loc, metav1.GetOptions{}); err != nil { return err } } return nil } func (o *CreateOptions) Complete(args []string, f client.Factory) error { o.Name = args[0] client, err := f.Client() if err != nil { return err } o.client = client return nil } func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { backup := &api.Backup{ ObjectMeta: metav1.ObjectMeta{ Namespace: f.Namespace(), Name: o.Name, Labels: o.Labels.Data(), }, Spec: api.BackupSpec{ IncludedNamespaces: o.IncludeNamespaces, ExcludedNamespaces: o.ExcludeNamespaces, IncludedResources: o.IncludeResources, ExcludedResources: o.ExcludeResources, LabelSelector: o.Selector.LabelSelector, SnapshotVolumes: o.SnapshotVolumes.Value, TTL: metav1.Duration{Duration: o.TTL}, IncludeClusterResources: o.IncludeClusterResources.Value, StorageLocation: o.StorageLocation, VolumeSnapshotLocations: o.SnapshotLocations, }, } if printed, err := output.PrintWithFormat(c, backup); printed || err != nil { return err } var backupInformer cache.SharedIndexInformer var updates chan *api.Backup if o.Wait { stop := make(chan struct{}) defer close(stop) updates = make(chan *api.Backup) backupInformer = v1.NewBackupInformer(o.client, f.Namespace(), 0, nil) backupInformer.AddEventHandler( cache.FilteringResourceEventHandler{ FilterFunc: func(obj interface{}) bool { backup, ok := obj.(*api.Backup) if !ok { return false } return backup.Name == o.Name }, Handler: cache.ResourceEventHandlerFuncs{ UpdateFunc: func(_, obj interface{}) { backup, ok := obj.(*api.Backup) if !ok { return } updates <- backup }, DeleteFunc: func(obj interface{}) { backup, ok := obj.(*api.Backup) if !ok { return } updates <- backup }, }, }, ) go backupInformer.Run(stop) } _, err := o.client.ArkV1().Backups(backup.Namespace).Create(backup) if err != nil { return err } fmt.Printf("Backup request %q submitted successfully.\n", backup.Name) if o.Wait { fmt.Println("Waiting for backup to complete. You may safely press ctrl-c to stop waiting - your backup will continue in the background.") ticker := time.NewTicker(time.Second) defer ticker.Stop() for { select { case <-ticker.C: fmt.Print(".") case backup, ok := <-updates: if !ok { fmt.Println("\nError waiting: unable to watch backups.") return nil } if backup.Status.Phase != api.BackupPhaseNew && backup.Status.Phase != api.BackupPhaseInProgress { fmt.Printf("\nBackup completed with status: %s. You may check for more information using the commands `ark backup describe %s` and `ark backup logs %s`.\n", backup.Status.Phase, backup.Name, backup.Name) return nil } } } } // Not waiting fmt.Printf("Run `ark backup describe %s` or `ark backup logs %s` for more details.\n", backup.Name, backup.Name) return nil }
package null import ( "database/sql" "encoding/json" "fmt" "reflect" "strconv" ) const ( nullString = "null" ) // Float is a nullable float64. // It does not consider zero values to be null. // It will decode to null, not zero, if null. type Float struct { sql.NullFloat64 } // NewFloat creates a new Float func NewFloat(f float64, valid bool) Float { return Float{ NullFloat64: sql.NullFloat64{ Float64: f, Valid: valid, }, } } // FloatFrom creates a new Float that will always be valid. func FloatFrom(f float64) Float { return NewFloat(f, true) } // FloatFromPtr creates a new Float that be null if f is nil. func FloatFromPtr(f *float64) Float { if f == nil { return NewFloat(0, false) } return NewFloat(*f, true) } // FloatFromString creates a new Float from string f. // If the string is equal to the value of nullString then the Float will be null. // An empty string f will return an error. func FloatFromString(f string, nullString string) (Float, error) { if f == nullString { return FloatFromPtr(nil), nil } fV, err := strconv.ParseFloat(f, 64) if err != nil { return Float{}, err } return FloatFrom(fV), nil } // UnmarshalJSON implements json.Unmarshaler. // It supports number and null input. // 0 will not be considered a null Float. // It also supports unmarshalling a sql.NullFloat64. func (f *Float) UnmarshalJSON(data []byte) error { var err error var v interface{} if err = json.Unmarshal(data, &v); err != nil { return err } switch x := v.(type) { case float64: f.Float64 = x case map[string]interface{}: err = json.Unmarshal(data, &f.NullFloat64) case nil: f.Valid = false return nil default: err = fmt.Errorf("json: cannot unmarshal %v into Go value of type null.Float", reflect.TypeOf(v).Name()) } f.Valid = err == nil return err } // UnmarshalText implements encoding.TextUnmarshaler. // It will unmarshal to a null Float if the input is a blank or not an integer. // It will return an error if the input is not an integer, blank, or "null". func (f *Float) UnmarshalText(text []byte) error { str := string(text) if str == "" || str == nullString { f.Valid = false return nil } var err error f.Float64, err = strconv.ParseFloat(string(text), 64) f.Valid = err == nil return err } // MarshalJSON implements json.Marshaler. // It will encode null if this Float is null. func (f Float) MarshalJSON() ([]byte, error) { if !f.Valid { return []byte(nullString), nil } return []byte(strconv.FormatFloat(f.Float64, 'f', -1, 64)), nil } // MarshalText implements encoding.TextMarshaler. // It will encode a blank string if this Float is null. func (f Float) MarshalText() ([]byte, error) { if !f.Valid { return []byte{}, nil } return []byte(strconv.FormatFloat(f.Float64, 'f', -1, 64)), nil } // MarshalText implements encoding.TextMarshaler. // It will encode a blank string if this Float is null. func (f Float) String() string { if !f.Valid { return nullString } return fmt.Sprintf("%1.3f", f.Float64) } // FullString returns float as string in full precision func (f Float) FullString() string { if !f.Valid { return nullString } return fmt.Sprintf("%f", f.Float64) } // SetValid changes this Float's value and also sets it to be non-null. func (f *Float) SetValid(n float64) { f.Float64 = n f.Valid = true } // Ptr returns a pointer to this Float's value, or a nil pointer if this Float is null. func (f Float) Ptr() *float64 { if !f.Valid { return nil } return &f.Float64 } // IsZero returns true for invalid Floats, for future omitempty support (Go 1.4?) // A non-null Float with a 0 value will not be considered zero. func (f Float) IsZero() bool { return !f.Valid } backend: null.Float NaN -> null for json marshal (#18284) package null import ( "database/sql" "encoding/json" "fmt" "math" "reflect" "strconv" ) const ( nullString = "null" ) // Float is a nullable float64. // It does not consider zero values to be null. // It will decode to null, not zero, if null. type Float struct { sql.NullFloat64 } // NewFloat creates a new Float func NewFloat(f float64, valid bool) Float { return Float{ NullFloat64: sql.NullFloat64{ Float64: f, Valid: valid, }, } } // FloatFrom creates a new Float that will always be valid. func FloatFrom(f float64) Float { return NewFloat(f, true) } // FloatFromPtr creates a new Float that be null if f is nil. func FloatFromPtr(f *float64) Float { if f == nil { return NewFloat(0, false) } return NewFloat(*f, true) } // FloatFromString creates a new Float from string f. // If the string is equal to the value of nullString then the Float will be null. // An empty string f will return an error. func FloatFromString(f string, nullString string) (Float, error) { if f == nullString { return FloatFromPtr(nil), nil } fV, err := strconv.ParseFloat(f, 64) if err != nil { return Float{}, err } return FloatFrom(fV), nil } // UnmarshalJSON implements json.Unmarshaler. // It supports number and null input. // 0 will not be considered a null Float. // It also supports unmarshalling a sql.NullFloat64. func (f *Float) UnmarshalJSON(data []byte) error { var err error var v interface{} if err = json.Unmarshal(data, &v); err != nil { return err } switch x := v.(type) { case float64: f.Float64 = x case map[string]interface{}: err = json.Unmarshal(data, &f.NullFloat64) case nil: f.Valid = false return nil default: err = fmt.Errorf("json: cannot unmarshal %v into Go value of type null.Float", reflect.TypeOf(v).Name()) } f.Valid = err == nil return err } // UnmarshalText implements encoding.TextUnmarshaler. // It will unmarshal to a null Float if the input is a blank or not an integer. // It will return an error if the input is not an integer, blank, or "null". func (f *Float) UnmarshalText(text []byte) error { str := string(text) if str == "" || str == nullString { f.Valid = false return nil } var err error f.Float64, err = strconv.ParseFloat(string(text), 64) f.Valid = err == nil return err } // MarshalJSON implements json.Marshaler. // It will encode null if this Float is null. func (f Float) MarshalJSON() ([]byte, error) { if !f.Valid || math.IsNaN(f.Float64) { return []byte(nullString), nil } return []byte(strconv.FormatFloat(f.Float64, 'f', -1, 64)), nil } // MarshalText implements encoding.TextMarshaler. // It will encode a blank string if this Float is null. func (f Float) MarshalText() ([]byte, error) { if !f.Valid { return []byte{}, nil } return []byte(strconv.FormatFloat(f.Float64, 'f', -1, 64)), nil } // MarshalText implements encoding.TextMarshaler. // It will encode a blank string if this Float is null. func (f Float) String() string { if !f.Valid { return nullString } return fmt.Sprintf("%1.3f", f.Float64) } // FullString returns float as string in full precision func (f Float) FullString() string { if !f.Valid { return nullString } return fmt.Sprintf("%f", f.Float64) } // SetValid changes this Float's value and also sets it to be non-null. func (f *Float) SetValid(n float64) { f.Float64 = n f.Valid = true } // Ptr returns a pointer to this Float's value, or a nil pointer if this Float is null. func (f Float) Ptr() *float64 { if !f.Valid { return nil } return &f.Float64 } // IsZero returns true for invalid Floats, for future omitempty support (Go 1.4?) // A non-null Float with a 0 value will not be considered zero. func (f Float) IsZero() bool { return !f.Valid }
package helm import ( "bytes" "fmt" "strings" "gopkg.in/yaml.v2" "github.com/flant/helm/pkg/chartutil" "github.com/flant/helm/pkg/engine" "github.com/flant/helm/pkg/proto/hapi/chart" "github.com/flant/helm/pkg/releaseutil" "github.com/flant/werf/pkg/util" ) type ChartTemplates []Template func (templates ChartTemplates) Pods() []Template { return templates.ByKind("Pod") } func (templates ChartTemplates) Jobs() []Template { return templates.ByKind("Job") } func (templates ChartTemplates) Deployments() []Template { return templates.ByKind("Deployment") } func (templates ChartTemplates) StatefulSets() []Template { return templates.ByKind("StatefulSet") } func (templates ChartTemplates) DaemonSets() []Template { return templates.ByKind("DaemonSet") } func (templates ChartTemplates) ByKind(kind string) []Template { var resultTemplates []Template for _, template := range templates { if strings.ToLower(template.Kind) == strings.ToLower(kind) { resultTemplates = append(resultTemplates, template) } } return resultTemplates } type Template struct { Version string `yaml:"apiVersion,omitempty"` Kind string `yaml:"kind,omitempty"` Metadata struct { Name string `yaml:"name,omitempty"` Namespace string `yaml:"namespace,omitempty"` Annotations map[string]string `yaml:"annotations,omitempty"` Labels map[string]string `yaml:"labels,omitempty"` UID string `yaml:"uid,omitempty"` OtherFields map[string]interface{} `yaml:",inline"` } `yaml:"metadata,omitempty"` Status string `yaml:"status,omitempty"` OtherFields map[string]interface{} `yaml:",inline"` } func (t Template) Namespace(namespace string) string { if t.Metadata.Namespace != "" { return t.Metadata.Namespace } return namespace } func GetTemplatesFromRevision(releaseName string, revision int32) (ChartTemplates, error) { rawTemplates, err := getRawTemplatesFromRevision(releaseName, revision) if err != nil { return nil, err } chartTemplates, err := parseTemplates(rawTemplates) if err != nil { return nil, fmt.Errorf("unable to parse revision templates: %s", err) } return chartTemplates, nil } func GetTemplatesFromChart(chartPath, releaseName, namespace string, values, set, setString []string) (ChartTemplates, error) { rawTemplates, err := getRawTemplatesFromChart(chartPath, releaseName, namespace, values, set, setString) if err != nil { return nil, err } chartTemplates, err := parseTemplates(rawTemplates) if err != nil { return nil, fmt.Errorf("unable to parse chart templates: %s", err) } return chartTemplates, nil } func getRawTemplatesFromChart(chartPath, releaseName, namespace string, values, set, setString []string) (string, error) { out := &bytes.Buffer{} renderOptions := RenderOptions{ ShowNotes: false, } if err := Render(out, chartPath, releaseName, namespace, values, set, setString, renderOptions); err != nil { return "", err } return out.String(), nil } func getRawTemplatesFromRevision(releaseName string, revision int32) (string, error) { var result string resp, err := releaseContent(releaseName, releaseContentOptions{Version: revision}) if err != nil { return "", err } for _, hook := range resp.Release.Hooks { result += fmt.Sprintf("---\n# %s\n%s\n", hook.Name, hook.Manifest) } result += "\n" result += resp.Release.Manifest return result, nil } func parseTemplates(rawTemplates string) (ChartTemplates, error) { var templates ChartTemplates for _, doc := range releaseutil.SplitManifests(rawTemplates) { var t Template err := yaml.Unmarshal([]byte(doc), &t) if err != nil { return nil, fmt.Errorf("%s\n\n%s\n", err, util.NumerateLines(doc, 1)) } if t.Metadata.Name != "" { templates = append(templates, t) } } return templates, nil } type WerfEngine struct { *engine.Engine ExtraAnnotations map[string]string ExtraLabels map[string]string } func (e *WerfEngine) Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) { templates, err := e.Engine.Render(chrt, values) if err != nil { return nil, err } for fileName, fileContent := range templates { if fileContent == "" { continue } if strings.HasSuffix(fileName, "/NOTES.txt") { continue } var resultManifests []string for _, manifest := range releaseutil.SplitManifests(fileContent) { var t Template err := yaml.Unmarshal([]byte(manifest), &t) if err != nil { return nil, err } if len(t.Metadata.Annotations) == 0 { t.Metadata.Annotations = map[string]string{} } for annoName, annoValue := range e.ExtraAnnotations { t.Metadata.Annotations[annoName] = annoValue } if len(t.Metadata.Labels) == 0 { t.Metadata.Labels = map[string]string{} } for labelName, labelValue := range e.ExtraLabels { t.Metadata.Labels[labelName] = labelValue } res, err := yaml.Marshal(t) if err != nil { return nil, err } resultManifests = append(resultManifests, string(res)) } templates[fileName] = strings.Join(resultManifests, "---\n") } return templates, nil } func NewWerfEngine() *WerfEngine { return &WerfEngine{ Engine: engine.New(), ExtraAnnotations: map[string]string{}, ExtraLabels: map[string]string{}, } } func WithExtra(extraAnnotations, extraLabels map[string]string, f func() error) error { WerfTemplateEngine.ExtraAnnotations = extraAnnotations WerfTemplateEngine.ExtraLabels = extraLabels err := f() WerfTemplateEngine.ExtraAnnotations = map[string]string{} WerfTemplateEngine.ExtraLabels = map[string]string{} return err } [deploy] Ignore empty helm manifests by werfGoTpl engine package helm import ( "bytes" "fmt" "strings" "gopkg.in/yaml.v2" "github.com/flant/helm/pkg/chartutil" "github.com/flant/helm/pkg/engine" "github.com/flant/helm/pkg/proto/hapi/chart" "github.com/flant/helm/pkg/releaseutil" "github.com/flant/werf/pkg/util" ) type ChartTemplates []Template func (templates ChartTemplates) Pods() []Template { return templates.ByKind("Pod") } func (templates ChartTemplates) Jobs() []Template { return templates.ByKind("Job") } func (templates ChartTemplates) Deployments() []Template { return templates.ByKind("Deployment") } func (templates ChartTemplates) StatefulSets() []Template { return templates.ByKind("StatefulSet") } func (templates ChartTemplates) DaemonSets() []Template { return templates.ByKind("DaemonSet") } func (templates ChartTemplates) ByKind(kind string) []Template { var resultTemplates []Template for _, template := range templates { if strings.ToLower(template.Kind) == strings.ToLower(kind) { resultTemplates = append(resultTemplates, template) } } return resultTemplates } type Template struct { Version string `yaml:"apiVersion,omitempty"` Kind string `yaml:"kind,omitempty"` Metadata struct { Name string `yaml:"name,omitempty"` Namespace string `yaml:"namespace,omitempty"` Annotations map[string]string `yaml:"annotations,omitempty"` Labels map[string]string `yaml:"labels,omitempty"` UID string `yaml:"uid,omitempty"` OtherFields map[string]interface{} `yaml:",inline"` } `yaml:"metadata,omitempty"` Status string `yaml:"status,omitempty"` OtherFields map[string]interface{} `yaml:",inline"` } func (t Template) Namespace(namespace string) string { if t.Metadata.Namespace != "" { return t.Metadata.Namespace } return namespace } func (t Template) IsEmpty() bool { if t.Version == "" || t.Kind == "" { return true } return false } func GetTemplatesFromRevision(releaseName string, revision int32) (ChartTemplates, error) { rawTemplates, err := getRawTemplatesFromRevision(releaseName, revision) if err != nil { return nil, err } chartTemplates, err := parseTemplates(rawTemplates) if err != nil { return nil, fmt.Errorf("unable to parse revision templates: %s", err) } return chartTemplates, nil } func GetTemplatesFromChart(chartPath, releaseName, namespace string, values, set, setString []string) (ChartTemplates, error) { rawTemplates, err := getRawTemplatesFromChart(chartPath, releaseName, namespace, values, set, setString) if err != nil { return nil, err } chartTemplates, err := parseTemplates(rawTemplates) if err != nil { return nil, fmt.Errorf("unable to parse chart templates: %s", err) } return chartTemplates, nil } func getRawTemplatesFromChart(chartPath, releaseName, namespace string, values, set, setString []string) (string, error) { out := &bytes.Buffer{} renderOptions := RenderOptions{ ShowNotes: false, } if err := Render(out, chartPath, releaseName, namespace, values, set, setString, renderOptions); err != nil { return "", err } return out.String(), nil } func getRawTemplatesFromRevision(releaseName string, revision int32) (string, error) { var result string resp, err := releaseContent(releaseName, releaseContentOptions{Version: revision}) if err != nil { return "", err } for _, hook := range resp.Release.Hooks { result += fmt.Sprintf("---\n# %s\n%s\n", hook.Name, hook.Manifest) } result += "\n" result += resp.Release.Manifest return result, nil } func parseTemplates(rawTemplates string) (ChartTemplates, error) { var templates ChartTemplates for _, doc := range releaseutil.SplitManifests(rawTemplates) { var t Template err := yaml.Unmarshal([]byte(doc), &t) if err != nil { return nil, fmt.Errorf("%s\n\n%s\n", err, util.NumerateLines(doc, 1)) } if t.Metadata.Name != "" { templates = append(templates, t) } } return templates, nil } type WerfEngine struct { *engine.Engine ExtraAnnotations map[string]string ExtraLabels map[string]string } func (e *WerfEngine) Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) { templates, err := e.Engine.Render(chrt, values) if err != nil { return nil, err } for fileName, fileContent := range templates { if fileContent == "" { continue } if strings.HasSuffix(fileName, "/NOTES.txt") { continue } var resultManifests []string for _, manifest := range releaseutil.SplitManifests(fileContent) { var t Template err := yaml.Unmarshal([]byte(manifest), &t) if err != nil { return nil, err } var resultManifestContent string if t.IsEmpty() { resultManifestContent = manifest } else { if len(t.Metadata.Annotations) == 0 { t.Metadata.Annotations = map[string]string{} } for annoName, annoValue := range e.ExtraAnnotations { t.Metadata.Annotations[annoName] = annoValue } if len(t.Metadata.Labels) == 0 { t.Metadata.Labels = map[string]string{} } for labelName, labelValue := range e.ExtraLabels { t.Metadata.Labels[labelName] = labelValue } res, err := yaml.Marshal(t) if err != nil { return nil, err } resultManifestContent = string(res) } resultManifests = append(resultManifests, resultManifestContent) } templates[fileName] = strings.Join(resultManifests, "\n---\n") } return templates, nil } func NewWerfEngine() *WerfEngine { return &WerfEngine{ Engine: engine.New(), ExtraAnnotations: map[string]string{}, ExtraLabels: map[string]string{}, } } func WithExtra(extraAnnotations, extraLabels map[string]string, f func() error) error { WerfTemplateEngine.ExtraAnnotations = extraAnnotations WerfTemplateEngine.ExtraLabels = extraLabels err := f() WerfTemplateEngine.ExtraAnnotations = map[string]string{} WerfTemplateEngine.ExtraLabels = map[string]string{} return err }
// Copyright 2019 Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package istioagent import ( "context" "io/ioutil" "net" "os" "strings" "time" "istio.io/istio/pkg/kube" caClientInterface "istio.io/istio/security/pkg/nodeagent/caclient/interface" citadel "istio.io/istio/security/pkg/nodeagent/caclient/providers/citadel" gca "istio.io/istio/security/pkg/nodeagent/caclient/providers/google" "istio.io/istio/security/pkg/nodeagent/plugin/providers/google/stsclient" "istio.io/istio/security/pkg/nodeagent/cache" "istio.io/istio/security/pkg/nodeagent/sds" "istio.io/istio/security/pkg/nodeagent/secretfetcher" "istio.io/pkg/env" "istio.io/pkg/log" ) // To debug: // curl -X POST localhost:15000/logging?config=trace - to see SendingDiscoveryRequest // Breakpoints in secretcache.go GenerateSecret.. // Note that istiod currently can't validate the JWT token unless it runs on k8s // Main problem is the JWT validation check which hardcodes the k8s server address and token location. // // To test on a local machine, for debugging: // // kis exec $POD -- cat /run/secrets/istio-token/istio-token > var/run/secrets/tokens/istio-token // kis port-forward $POD 15010:15010 & // // You can also copy the K8S CA and a token to be used to connect to k8s - but will need removing the hardcoded addr // kis exec $POD -- cat /run/secrets/kubernetes.io/serviceaccount/{ca.crt,token} > var/run/secrets/kubernetes.io/serviceaccount/ // // Or disable the jwt validation while debugging SDS problems. var ( caProviderEnv = env.RegisterStringVar(caProvider, "Citadel", "").Get() // TODO: default to same as discovery address caEndpointEnv = env.RegisterStringVar(caEndpoint, "", "").Get() pluginNamesEnv = env.RegisterStringVar(pluginNames, "", "").Get() enableIngressGatewaySDSEnv = env.RegisterBoolVar(enableIngressGatewaySDS, false, "").Get() trustDomainEnv = env.RegisterStringVar(trustDomain, "", "").Get() secretTTLEnv = env.RegisterDurationVar(secretTTL, 24*time.Hour, "").Get() secretRefreshGraceDurationEnv = env.RegisterDurationVar(SecretRefreshGraceDuration, 1*time.Hour, "").Get() secretRotationIntervalEnv = env.RegisterDurationVar(SecretRotationInterval, 10*time.Minute, "").Get() staledConnectionRecycleIntervalEnv = env.RegisterDurationVar(staledConnectionRecycleInterval, 5*time.Minute, "").Get() initialBackoffEnv = env.RegisterIntVar(InitialBackoff, 10, "").Get() pkcs8KeysEnv = env.RegisterBoolVar(pkcs8Key, false, "Whether to generate PKCS#8 private keys").Get() // Location of a custom-mounted root (for example using Secret) mountedRoot = "/etc/certs/root-cert.pem" // Location of K8S CA root. k8sCAPath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" ) const ( // name of authentication provider. caProvider = "CA_PROVIDER" // CA endpoint. caEndpoint = "CA_ADDR" // names of authentication provider's plugins. pluginNames = "PLUGINS" // The trust domain corresponds to the trust root of a system. // Refer to https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#21-trust-domain trustDomain = "TRUST_DOMAIN" // The ingress gateway SDS mode allows node agent to provision credentials to ingress gateway // proxy by watching kubernetes secrets. enableIngressGatewaySDS = "ENABLE_INGRESS_GATEWAY_SDS" // The environmental variable name for secret TTL, node agent decides whether a secret // is expired if time.now - secret.createtime >= secretTTL. // example value format like "90m" secretTTL = "SECRET_TTL" // The environmental variable name for grace duration that secret is re-generated // before it's expired time. // example value format like "10m" SecretRefreshGraceDuration = "SECRET_GRACE_DURATION" // The environmental variable name for key rotation job running interval. // example value format like "20m" SecretRotationInterval = "SECRET_JOB_RUN_INTERVAL" // The environmental variable name for staled connection recycle job running interval. // example value format like "5m" staledConnectionRecycleInterval = "STALED_CONNECTION_RECYCLE_RUN_INTERVAL" // The environmental variable name for the initial backoff in milliseconds. // example value format like "10" InitialBackoff = "INITIAL_BACKOFF_MSEC" pkcs8Key = "PKCS8_KEY" ) var ( // JWTPath is the default location of a JWT token to be used to authenticate with XDS and CA servers. // If the file is missing, the agent will fallback to using mounted certificates if XDS address is secure. JWTPath = "./var/run/secrets/tokens/istio-token" // LocalSDS is the location of the in-process SDS server - must be in a writeable dir. LocalSDS = "/etc/istio/proxy/SDS" workloadSdsCacheOptions cache.Options gatewaySdsCacheOptions cache.Options serverOptions sds.Options gatewaySecretChan chan struct{} ) // SDSAgent contains the configuration of the agent, based on the injected // environment: // - SDS hostPath if node-agent was used // - /etc/certs/key if Citadel or other mounted Secrets are used // - root cert to use for connecting to XDS server // - CA address, with proper defaults and detection type SDSAgent struct { // Location of JWTPath to connect to CA. If empty, SDS is not possible. // If set SDS will be used - either local or via hostPath. JWTPath string // SDSAddress is the address of the SDS server. Starts with unix: for hostpath mount or built-in // May also be a https address. SDSAddress string // CertPath is set with the location of the certs, or empty if mounted certs are not present. CertsPath string // RequireCerts is set if the agent requires certificates: // - if controlPlaneAuthEnabled is set // - port of discovery server is not 15010 (the plain text default). RequireCerts bool // Expected SAN SAN string } // NewSDSAgent wraps the logic for a local SDS. It will check if the JWT token required for local SDS is // present, and set additional config options for the in-process SDS agent. // // The JWT token is currently using a pre-defined audience (istio-ca) or it must match the trust domain (WIP). // If the JWT token is not present - the local SDS agent can't authenticate. // // If node agent and JWT are mounted: it indicates user injected a config using hostPath, and will be used. // func NewSDSAgent(discAddr string, tlsRequired bool) *SDSAgent { ac := &SDSAgent{} discHost, discPort, err := net.SplitHostPort(discAddr) if err != nil { log.Fatala("Invalid discovery address", discAddr, err) } if _, err := os.Stat(JWTPath); err == nil { ac.JWTPath = JWTPath } else { // Can't use in-process SDS. log.Warna("Missing JWT token, can't use in process SDS ", JWTPath, err) if discPort == "15012" { log.Fatala("Missing JWT, can't authenticate with control plane. Try using plain text (15010)") } return ac } ac.SDSAddress = "unix:" + LocalSDS if _, err := os.Stat("/etc/certs/key.pem"); err == nil { ac.CertsPath = "/etc/certs" } if tlsRequired { ac.RequireCerts = true } // Istiod uses a fixed, defined port for K8S-signed certificates. if discPort == "15012" { ac.RequireCerts = true // For local debugging - the discoveryAddress is set to localhost, but the cert issued for normal SA. if discHost == "localhost" { discHost = "istiod.istio-system.svc" } ac.SAN = discHost } return ac } // Simplified SDS setup. This is called if and only if user has explicitly mounted a K8S JWT token, and is not // using a hostPath mounted or external SDS server. // // 1. External CA: requires authenticating the trusted JWT AND validating the SAN against the JWT. // For example Google CA // // 2. Indirect, using istiod: using K8S cert. // // 3. Monitor mode - watching secret in same namespace ( Ingress) // // 4. TODO: File watching, for backward compat/migration from mounted secrets. func (conf *SDSAgent) Start(isSidecar bool, podNamespace string) (*sds.Server, error) { applyEnvVars() gatewaySdsCacheOptions = workloadSdsCacheOptions // Next to the envoy config, writeable dir (mounted as mem) serverOptions.WorkloadUDSPath = LocalSDS serverOptions.UseLocalJWT = true // TODO: remove the caching, workload has a single cert workloadSecretCache, _ := newSecretCache(serverOptions) var gatewaySecretCache *cache.SecretCache if !isSidecar { serverOptions.EnableIngressGatewaySDS = true // TODO: what is the setting for ingress ? serverOptions.IngressGatewayUDSPath = serverOptions.WorkloadUDSPath + "_ROUTER" gatewaySecretCache = newIngressSecretCache(podNamespace) } // For sidecar and ingress we need to first get the certificates for the workload. // We'll also save them in files, for backward compat with servers generating files // TODO: use caClient.CSRSign() directly // fail hard if we need certs ( control plane security enabled ) and we don't have mounted certs and // we fail to load SDS fail := conf.RequireCerts && conf.CertsPath == "" tok, err := ioutil.ReadFile(conf.JWTPath) if err != nil && fail { log.Fatala("Failed to read token", err) } else { si, err := workloadSecretCache.GenerateSecret(context.Background(), "bootstrap", "default", string(tok)) if err != nil { if fail { log.Fatala("Failed to get certificates", err) } else { log.Warna("Failed to get certificate from CA", err) } } else { log.Infoa("Got initial certificate valid until ", si.ExpireTime) } if si != nil { // For debugging and backward compat - we may not need it long term // The files can be used if an Pilot configured with SDS disabled is used, will generate // file based XDS config instead of SDS. err = ioutil.WriteFile("/etc/istio/proxy/key.pem", si.PrivateKey, 0700) if err != nil { log.Fatalf("Failed to write certs: %v", err) } err = ioutil.WriteFile("/etc/istio/proxy/cert-chain.pem", si.CertificateChain, 0700) if err != nil { log.Fatalf("Failed to write certs: %v", err) } } sir, err := workloadSecretCache.GenerateSecret(context.Background(), "bootstrap", "ROOTCA", string(tok)) if err != nil { if fail { log.Fatala("Failed to get certificates", err) } else { log.Warna("Failed to get certificate from CA", err) } } if sir != nil { // For debugging and backward compat - we may not need it long term // TODO: we should concatenate this file with the existing root-cert and possibly pilot-generated roots, for // smooth transition across CAs. err = ioutil.WriteFile("/etc/istio/proxy/root-cert.pem", sir.RootCert, 0700) if err != nil { log.Fatalf("Failed to write certs: %v", err) } } } server, err := sds.NewServer(serverOptions, workloadSecretCache, gatewaySecretCache) if err != nil { return nil, err } return server, nil } // newSecretCache creates the cache for workload secrets and/or gateway secrets. func newSecretCache(serverOptions sds.Options) (workloadSecretCache *cache.SecretCache, caClient caClientInterface.Client) { ret := &secretfetcher.SecretFetcher{} // TODO: get the MC public keys from pilot. // TODO: root cert for Istiod from the K8S file or local override // In node agent, a controller is used getting 'istio-security.istio-system' config map // Single caTLSRootCert inside. var err error // TODO: this should all be packaged in a plugin, possibly with optional compilation. if (serverOptions.CAProviderName == "GoogleCA" || strings.Contains(serverOptions.CAEndpoint, "googleapis.com")) && stsclient.GKEClusterURL != "" { // Use a plugin to an external CA - this has direct support for the K8S JWT token // This is only used if the proper env variables are injected - otherwise the existing Citadel or Istiod will be // used. caClient, err = gca.NewGoogleCAClient(serverOptions.CAEndpoint, true) serverOptions.PluginNames = []string{"GoogleTokenExchange"} } else { // Determine the default CA. // If /etc/certs exists - it means Citadel is used (possibly in a mode to only provision the root-cert, not keys) // Otherwise: default to istiod // // If an explicit CA is configured, assume it is mounting /etc/certs var rootCert []byte // explicitSecret is true if a /etc/certs/root-cert file has been mounted. Will be used // to authenticate the certificate of the SDS server (istiod or custom). explicitSecret := false if _, err := os.Stat(mountedRoot); err == nil { rootCert, err = ioutil.ReadFile(mountedRoot) if err != nil { log.Warna("Failed to load existing citadel root", err) } else { explicitSecret = true } } tls := true if serverOptions.CAEndpoint == "" { // Determine the default address, based on the presence of Citadel secrets if explicitSecret { log.Info("Using citadel CA for SDS") serverOptions.CAEndpoint = "istio-citadel.istio-system:8060" } else { rootCert, err = ioutil.ReadFile(k8sCAPath) if err != nil { log.Warna("Failed to load K8S cert, assume IP secure network ", err) serverOptions.CAEndpoint = "istiod.istio-system.svc:15010" } else { log.Info("Using default istiod CA, with K8S certificates for SDS") serverOptions.CAEndpoint = "istiod.istio-system.svc:15012" } } } else { // Explicitly configured CA log.Infoa("Using user-configured CA", serverOptions.CAEndpoint) if strings.HasSuffix(serverOptions.CAEndpoint, ":15010") { log.Warna("Debug mode or IP-secure network") tls = false } if strings.HasSuffix(serverOptions.CAEndpoint, ":15012") { rootCert, err = ioutil.ReadFile(k8sCAPath) if err != nil { log.Fatala("Invalid config - port 15012 expects a K8S-signed certificate but certs missing", err) } } } // Will use TLS unless the reserved 15010 port is used ( istiod on an ipsec/secure VPC) // rootCert may be nil - in which case the system roots are used, and the CA is expected to have public key // Otherwise assume the injection has mounted /etc/certs/root-cert.pem caClient, err = citadel.NewCitadelClient(serverOptions.CAEndpoint, tls, rootCert) } if err != nil { log.Errorf("failed to create secretFetcher for workload proxy: %v", err) os.Exit(1) } ret.UseCaClient = true ret.CaClient = caClient workloadSdsCacheOptions.TrustDomain = serverOptions.TrustDomain workloadSdsCacheOptions.Pkcs8Keys = serverOptions.Pkcs8Keys workloadSdsCacheOptions.Plugins = sds.NewPlugins(serverOptions.PluginNames) workloadSecretCache = cache.NewSecretCache(ret, sds.NotifyProxy, workloadSdsCacheOptions) return } // TODO: use existing 'sidecar/router' config to enable loading Secrets func newIngressSecretCache(namespace string) (gatewaySecretCache *cache.SecretCache) { gSecretFetcher := &secretfetcher.SecretFetcher{ UseCaClient: false, } cs, err := kube.CreateClientset("", "") if err != nil { log.Errorf("failed to create secretFetcher for gateway proxy: %v", err) os.Exit(1) } gSecretFetcher.FallbackSecretName = "gateway-fallback" gSecretFetcher.InitWithKubeClientAndNs(cs.CoreV1(), namespace) gatewaySecretChan = make(chan struct{}) gSecretFetcher.Run(gatewaySecretChan) gatewaySecretCache = cache.NewSecretCache(gSecretFetcher, sds.NotifyProxy, gatewaySdsCacheOptions) return gatewaySecretCache } func applyEnvVars() { serverOptions.PluginNames = strings.Split(pluginNamesEnv, ",") serverOptions.EnableWorkloadSDS = true serverOptions.EnableIngressGatewaySDS = enableIngressGatewaySDSEnv serverOptions.CAProviderName = caProviderEnv serverOptions.CAEndpoint = caEndpointEnv serverOptions.TrustDomain = trustDomainEnv serverOptions.Pkcs8Keys = pkcs8KeysEnv workloadSdsCacheOptions.SecretTTL = secretTTLEnv workloadSdsCacheOptions.SecretRefreshGraceDuration = secretRefreshGraceDurationEnv workloadSdsCacheOptions.RotationInterval = secretRotationIntervalEnv serverOptions.RecycleInterval = staledConnectionRecycleIntervalEnv workloadSdsCacheOptions.InitialBackoff = int64(initialBackoffEnv) } Fix logs without spaces (#19960) * Fix logs without spaces This results in logs that have two words with no space between them. Minor cleanup to fix that * Update pkg/istio-agent/sds-agent.go Co-Authored-By: Romain Lenglet <1d1ad0fbb64cc2325d34701cdbc7695144a1eb59@berabera.info> Co-authored-by: Romain Lenglet <1d1ad0fbb64cc2325d34701cdbc7695144a1eb59@berabera.info> // Copyright 2019 Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package istioagent import ( "context" "io/ioutil" "net" "os" "strings" "time" "istio.io/istio/pkg/kube" caClientInterface "istio.io/istio/security/pkg/nodeagent/caclient/interface" citadel "istio.io/istio/security/pkg/nodeagent/caclient/providers/citadel" gca "istio.io/istio/security/pkg/nodeagent/caclient/providers/google" "istio.io/istio/security/pkg/nodeagent/plugin/providers/google/stsclient" "istio.io/istio/security/pkg/nodeagent/cache" "istio.io/istio/security/pkg/nodeagent/sds" "istio.io/istio/security/pkg/nodeagent/secretfetcher" "istio.io/pkg/env" "istio.io/pkg/log" ) // To debug: // curl -X POST localhost:15000/logging?config=trace - to see SendingDiscoveryRequest // Breakpoints in secretcache.go GenerateSecret.. // Note that istiod currently can't validate the JWT token unless it runs on k8s // Main problem is the JWT validation check which hardcodes the k8s server address and token location. // // To test on a local machine, for debugging: // // kis exec $POD -- cat /run/secrets/istio-token/istio-token > var/run/secrets/tokens/istio-token // kis port-forward $POD 15010:15010 & // // You can also copy the K8S CA and a token to be used to connect to k8s - but will need removing the hardcoded addr // kis exec $POD -- cat /run/secrets/kubernetes.io/serviceaccount/{ca.crt,token} > var/run/secrets/kubernetes.io/serviceaccount/ // // Or disable the jwt validation while debugging SDS problems. var ( caProviderEnv = env.RegisterStringVar(caProvider, "Citadel", "").Get() // TODO: default to same as discovery address caEndpointEnv = env.RegisterStringVar(caEndpoint, "", "").Get() pluginNamesEnv = env.RegisterStringVar(pluginNames, "", "").Get() enableIngressGatewaySDSEnv = env.RegisterBoolVar(enableIngressGatewaySDS, false, "").Get() trustDomainEnv = env.RegisterStringVar(trustDomain, "", "").Get() secretTTLEnv = env.RegisterDurationVar(secretTTL, 24*time.Hour, "").Get() secretRefreshGraceDurationEnv = env.RegisterDurationVar(SecretRefreshGraceDuration, 1*time.Hour, "").Get() secretRotationIntervalEnv = env.RegisterDurationVar(SecretRotationInterval, 10*time.Minute, "").Get() staledConnectionRecycleIntervalEnv = env.RegisterDurationVar(staledConnectionRecycleInterval, 5*time.Minute, "").Get() initialBackoffEnv = env.RegisterIntVar(InitialBackoff, 10, "").Get() pkcs8KeysEnv = env.RegisterBoolVar(pkcs8Key, false, "Whether to generate PKCS#8 private keys").Get() // Location of a custom-mounted root (for example using Secret) mountedRoot = "/etc/certs/root-cert.pem" // Location of K8S CA root. k8sCAPath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" ) const ( // name of authentication provider. caProvider = "CA_PROVIDER" // CA endpoint. caEndpoint = "CA_ADDR" // names of authentication provider's plugins. pluginNames = "PLUGINS" // The trust domain corresponds to the trust root of a system. // Refer to https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#21-trust-domain trustDomain = "TRUST_DOMAIN" // The ingress gateway SDS mode allows node agent to provision credentials to ingress gateway // proxy by watching kubernetes secrets. enableIngressGatewaySDS = "ENABLE_INGRESS_GATEWAY_SDS" // The environmental variable name for secret TTL, node agent decides whether a secret // is expired if time.now - secret.createtime >= secretTTL. // example value format like "90m" secretTTL = "SECRET_TTL" // The environmental variable name for grace duration that secret is re-generated // before it's expired time. // example value format like "10m" SecretRefreshGraceDuration = "SECRET_GRACE_DURATION" // The environmental variable name for key rotation job running interval. // example value format like "20m" SecretRotationInterval = "SECRET_JOB_RUN_INTERVAL" // The environmental variable name for staled connection recycle job running interval. // example value format like "5m" staledConnectionRecycleInterval = "STALED_CONNECTION_RECYCLE_RUN_INTERVAL" // The environmental variable name for the initial backoff in milliseconds. // example value format like "10" InitialBackoff = "INITIAL_BACKOFF_MSEC" pkcs8Key = "PKCS8_KEY" ) var ( // JWTPath is the default location of a JWT token to be used to authenticate with XDS and CA servers. // If the file is missing, the agent will fallback to using mounted certificates if XDS address is secure. JWTPath = "./var/run/secrets/tokens/istio-token" // LocalSDS is the location of the in-process SDS server - must be in a writeable dir. LocalSDS = "/etc/istio/proxy/SDS" workloadSdsCacheOptions cache.Options gatewaySdsCacheOptions cache.Options serverOptions sds.Options gatewaySecretChan chan struct{} ) // SDSAgent contains the configuration of the agent, based on the injected // environment: // - SDS hostPath if node-agent was used // - /etc/certs/key if Citadel or other mounted Secrets are used // - root cert to use for connecting to XDS server // - CA address, with proper defaults and detection type SDSAgent struct { // Location of JWTPath to connect to CA. If empty, SDS is not possible. // If set SDS will be used - either local or via hostPath. JWTPath string // SDSAddress is the address of the SDS server. Starts with unix: for hostpath mount or built-in // May also be a https address. SDSAddress string // CertPath is set with the location of the certs, or empty if mounted certs are not present. CertsPath string // RequireCerts is set if the agent requires certificates: // - if controlPlaneAuthEnabled is set // - port of discovery server is not 15010 (the plain text default). RequireCerts bool // Expected SAN SAN string } // NewSDSAgent wraps the logic for a local SDS. It will check if the JWT token required for local SDS is // present, and set additional config options for the in-process SDS agent. // // The JWT token is currently using a pre-defined audience (istio-ca) or it must match the trust domain (WIP). // If the JWT token is not present - the local SDS agent can't authenticate. // // If node agent and JWT are mounted: it indicates user injected a config using hostPath, and will be used. // func NewSDSAgent(discAddr string, tlsRequired bool) *SDSAgent { ac := &SDSAgent{} discHost, discPort, err := net.SplitHostPort(discAddr) if err != nil { log.Fatala("Invalid discovery address", discAddr, err) } if _, err := os.Stat(JWTPath); err == nil { ac.JWTPath = JWTPath } else { // Can't use in-process SDS. log.Warna("Missing JWT token, can't use in process SDS ", JWTPath, err) if discPort == "15012" { log.Fatala("Missing JWT, can't authenticate with control plane. Try using plain text (15010)") } return ac } ac.SDSAddress = "unix:" + LocalSDS if _, err := os.Stat("/etc/certs/key.pem"); err == nil { ac.CertsPath = "/etc/certs" } if tlsRequired { ac.RequireCerts = true } // Istiod uses a fixed, defined port for K8S-signed certificates. if discPort == "15012" { ac.RequireCerts = true // For local debugging - the discoveryAddress is set to localhost, but the cert issued for normal SA. if discHost == "localhost" { discHost = "istiod.istio-system.svc" } ac.SAN = discHost } return ac } // Simplified SDS setup. This is called if and only if user has explicitly mounted a K8S JWT token, and is not // using a hostPath mounted or external SDS server. // // 1. External CA: requires authenticating the trusted JWT AND validating the SAN against the JWT. // For example Google CA // // 2. Indirect, using istiod: using K8S cert. // // 3. Monitor mode - watching secret in same namespace ( Ingress) // // 4. TODO: File watching, for backward compat/migration from mounted secrets. func (conf *SDSAgent) Start(isSidecar bool, podNamespace string) (*sds.Server, error) { applyEnvVars() gatewaySdsCacheOptions = workloadSdsCacheOptions // Next to the envoy config, writeable dir (mounted as mem) serverOptions.WorkloadUDSPath = LocalSDS serverOptions.UseLocalJWT = true // TODO: remove the caching, workload has a single cert workloadSecretCache, _ := newSecretCache(serverOptions) var gatewaySecretCache *cache.SecretCache if !isSidecar { serverOptions.EnableIngressGatewaySDS = true // TODO: what is the setting for ingress ? serverOptions.IngressGatewayUDSPath = serverOptions.WorkloadUDSPath + "_ROUTER" gatewaySecretCache = newIngressSecretCache(podNamespace) } // For sidecar and ingress we need to first get the certificates for the workload. // We'll also save them in files, for backward compat with servers generating files // TODO: use caClient.CSRSign() directly // fail hard if we need certs ( control plane security enabled ) and we don't have mounted certs and // we fail to load SDS fail := conf.RequireCerts && conf.CertsPath == "" tok, err := ioutil.ReadFile(conf.JWTPath) if err != nil && fail { log.Fatala("Failed to read token", err) } else { si, err := workloadSecretCache.GenerateSecret(context.Background(), "bootstrap", "default", string(tok)) if err != nil { if fail { log.Fatalf("Failed to get certificates: %v", err) } else { log.Warnf("Failed to get certificate from CA: %v", err) } } else { log.Infoa("Got initial certificate valid until ", si.ExpireTime) } if si != nil { // For debugging and backward compat - we may not need it long term // The files can be used if an Pilot configured with SDS disabled is used, will generate // file based XDS config instead of SDS. err = ioutil.WriteFile("/etc/istio/proxy/key.pem", si.PrivateKey, 0700) if err != nil { log.Fatalf("Failed to write certs: %v", err) } err = ioutil.WriteFile("/etc/istio/proxy/cert-chain.pem", si.CertificateChain, 0700) if err != nil { log.Fatalf("Failed to write certs: %v", err) } } sir, err := workloadSecretCache.GenerateSecret(context.Background(), "bootstrap", "ROOTCA", string(tok)) if err != nil { if fail { log.Fatalf("Failed to get certificates: %v", err) } else { log.Warnf("Failed to get certificate from CA: %v ", err) } } if sir != nil { // For debugging and backward compat - we may not need it long term // TODO: we should concatenate this file with the existing root-cert and possibly pilot-generated roots, for // smooth transition across CAs. err = ioutil.WriteFile("/etc/istio/proxy/root-cert.pem", sir.RootCert, 0700) if err != nil { log.Fatalf("Failed to write certs: %v", err) } } } server, err := sds.NewServer(serverOptions, workloadSecretCache, gatewaySecretCache) if err != nil { return nil, err } return server, nil } // newSecretCache creates the cache for workload secrets and/or gateway secrets. func newSecretCache(serverOptions sds.Options) (workloadSecretCache *cache.SecretCache, caClient caClientInterface.Client) { ret := &secretfetcher.SecretFetcher{} // TODO: get the MC public keys from pilot. // TODO: root cert for Istiod from the K8S file or local override // In node agent, a controller is used getting 'istio-security.istio-system' config map // Single caTLSRootCert inside. var err error // TODO: this should all be packaged in a plugin, possibly with optional compilation. if (serverOptions.CAProviderName == "GoogleCA" || strings.Contains(serverOptions.CAEndpoint, "googleapis.com")) && stsclient.GKEClusterURL != "" { // Use a plugin to an external CA - this has direct support for the K8S JWT token // This is only used if the proper env variables are injected - otherwise the existing Citadel or Istiod will be // used. caClient, err = gca.NewGoogleCAClient(serverOptions.CAEndpoint, true) serverOptions.PluginNames = []string{"GoogleTokenExchange"} } else { // Determine the default CA. // If /etc/certs exists - it means Citadel is used (possibly in a mode to only provision the root-cert, not keys) // Otherwise: default to istiod // // If an explicit CA is configured, assume it is mounting /etc/certs var rootCert []byte // explicitSecret is true if a /etc/certs/root-cert file has been mounted. Will be used // to authenticate the certificate of the SDS server (istiod or custom). explicitSecret := false if _, err := os.Stat(mountedRoot); err == nil { rootCert, err = ioutil.ReadFile(mountedRoot) if err != nil { log.Warnf("Failed to load existing citadel root: %v", err) } else { explicitSecret = true } } tls := true if serverOptions.CAEndpoint == "" { // Determine the default address, based on the presence of Citadel secrets if explicitSecret { log.Info("Using citadel CA for SDS") serverOptions.CAEndpoint = "istio-citadel.istio-system:8060" } else { rootCert, err = ioutil.ReadFile(k8sCAPath) if err != nil { log.Warnf("Failed to load K8S cert, assume IP secure network: %v", err) serverOptions.CAEndpoint = "istiod.istio-system.svc:15010" } else { log.Info("Using default istiod CA, with K8S certificates for SDS") serverOptions.CAEndpoint = "istiod.istio-system.svc:15012" } } } else { // Explicitly configured CA log.Infoa("Using user-configured CA ", serverOptions.CAEndpoint) if strings.HasSuffix(serverOptions.CAEndpoint, ":15010") { log.Warna("Debug mode or IP-secure network") tls = false } if strings.HasSuffix(serverOptions.CAEndpoint, ":15012") { rootCert, err = ioutil.ReadFile(k8sCAPath) if err != nil { log.Fatalf("Invalid config - port 15012 expects a K8S-signed certificate but certs missing: %v", err) } } } // Will use TLS unless the reserved 15010 port is used ( istiod on an ipsec/secure VPC) // rootCert may be nil - in which case the system roots are used, and the CA is expected to have public key // Otherwise assume the injection has mounted /etc/certs/root-cert.pem caClient, err = citadel.NewCitadelClient(serverOptions.CAEndpoint, tls, rootCert) } if err != nil { log.Errorf("failed to create secretFetcher for workload proxy: %v", err) os.Exit(1) } ret.UseCaClient = true ret.CaClient = caClient workloadSdsCacheOptions.TrustDomain = serverOptions.TrustDomain workloadSdsCacheOptions.Pkcs8Keys = serverOptions.Pkcs8Keys workloadSdsCacheOptions.Plugins = sds.NewPlugins(serverOptions.PluginNames) workloadSecretCache = cache.NewSecretCache(ret, sds.NotifyProxy, workloadSdsCacheOptions) return } // TODO: use existing 'sidecar/router' config to enable loading Secrets func newIngressSecretCache(namespace string) (gatewaySecretCache *cache.SecretCache) { gSecretFetcher := &secretfetcher.SecretFetcher{ UseCaClient: false, } cs, err := kube.CreateClientset("", "") if err != nil { log.Errorf("failed to create secretFetcher for gateway proxy: %v", err) os.Exit(1) } gSecretFetcher.FallbackSecretName = "gateway-fallback" gSecretFetcher.InitWithKubeClientAndNs(cs.CoreV1(), namespace) gatewaySecretChan = make(chan struct{}) gSecretFetcher.Run(gatewaySecretChan) gatewaySecretCache = cache.NewSecretCache(gSecretFetcher, sds.NotifyProxy, gatewaySdsCacheOptions) return gatewaySecretCache } func applyEnvVars() { serverOptions.PluginNames = strings.Split(pluginNamesEnv, ",") serverOptions.EnableWorkloadSDS = true serverOptions.EnableIngressGatewaySDS = enableIngressGatewaySDSEnv serverOptions.CAProviderName = caProviderEnv serverOptions.CAEndpoint = caEndpointEnv serverOptions.TrustDomain = trustDomainEnv serverOptions.Pkcs8Keys = pkcs8KeysEnv workloadSdsCacheOptions.SecretTTL = secretTTLEnv workloadSdsCacheOptions.SecretRefreshGraceDuration = secretRefreshGraceDurationEnv workloadSdsCacheOptions.RotationInterval = secretRotationIntervalEnv serverOptions.RecycleInterval = staledConnectionRecycleIntervalEnv workloadSdsCacheOptions.InitialBackoff = int64(initialBackoffEnv) }
// Copyright 2018-2020 Authors of Cilium // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package k8s import ( "github.com/cilium/cilium/pkg/comparator" "github.com/cilium/cilium/pkg/datapath" cilium_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/core/v1" slim_discover_v1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/discovery/v1beta1" slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" slim_networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/networking/v1" "github.com/cilium/cilium/pkg/k8s/types" "github.com/cilium/cilium/pkg/logging/logfields" v1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" ) func ObjToV1NetworkPolicy(obj interface{}) *slim_networkingv1.NetworkPolicy { k8sNP, ok := obj.(*slim_networkingv1.NetworkPolicy) if ok { return k8sNP } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. k8sNP, ok := deletedObj.Obj.(*slim_networkingv1.NetworkPolicy) if ok { return k8sNP } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v1 NetworkPolicy") return nil } func ObjToV1Services(obj interface{}) *slim_corev1.Service { svc, ok := obj.(*slim_corev1.Service) if ok { return svc } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. svc, ok := deletedObj.Obj.(*slim_corev1.Service) if ok { return svc } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v1 Service") return nil } func ObjToV1Endpoints(obj interface{}) *slim_corev1.Endpoints { ep, ok := obj.(*slim_corev1.Endpoints) if ok { return ep } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. ep, ok := deletedObj.Obj.(*slim_corev1.Endpoints) if ok { return ep } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v1 Endpoints") return nil } func ObjToV1EndpointSlice(obj interface{}) *slim_discover_v1beta1.EndpointSlice { ep, ok := obj.(*slim_discover_v1beta1.EndpointSlice) if ok { return ep } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. ep, ok := deletedObj.Obj.(*slim_discover_v1beta1.EndpointSlice) if ok { return ep } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v1beta1 EndpointSlice") return nil } func ObjToSlimCNP(obj interface{}) *types.SlimCNP { cnp, ok := obj.(*types.SlimCNP) if ok { return cnp } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. cnp, ok := deletedObj.Obj.(*types.SlimCNP) if ok { return cnp } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v2 CiliumNetworkPolicy") return nil } func ObjTov1Pod(obj interface{}) *slim_corev1.Pod { pod, ok := obj.(*slim_corev1.Pod) if ok { return pod } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. pod, ok := deletedObj.Obj.(*slim_corev1.Pod) if ok { return pod } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v1 Pod") return nil } func ObjToV1Node(obj interface{}) *slim_corev1.Node { node, ok := obj.(*slim_corev1.Node) if ok { return node } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. node, ok := deletedObj.Obj.(*slim_corev1.Node) if ok { return node } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v1 Node") return nil } func ObjToV1Namespace(obj interface{}) *slim_corev1.Namespace { ns, ok := obj.(*slim_corev1.Namespace) if ok { return ns } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. ns, ok := deletedObj.Obj.(*slim_corev1.Namespace) if ok { return ns } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v1 Namespace") return nil } func EqualV1Services(k8sSVC1, k8sSVC2 *slim_corev1.Service, nodeAddressing datapath.NodeAddressing) bool { // Service annotations are used to mark services as global, shared, etc. if !comparator.MapStringEquals(k8sSVC1.GetAnnotations(), k8sSVC2.GetAnnotations()) { return false } svcID1, svc1 := ParseService(k8sSVC1, nodeAddressing) svcID2, svc2 := ParseService(k8sSVC2, nodeAddressing) if svcID1 != svcID2 { return false } // Please write all the equalness logic inside the K8sServiceInfo.Equals() // method. return svc1.DeepEquals(svc2) } // AnnotationsEqual returns whether the annotation with any key in // relevantAnnotations is equal in anno1 and anno2. func AnnotationsEqual(relevantAnnotations []string, anno1, anno2 map[string]string) bool { for _, an := range relevantAnnotations { if anno1[an] != anno2[an] { return false } } return true } func convertToK8sServicePorts(ports []v1.ServicePort) []slim_corev1.ServicePort { if ports == nil { return nil } slimPorts := make([]slim_corev1.ServicePort, 0, len(ports)) for _, v1Port := range ports { slimPorts = append(slimPorts, slim_corev1.ServicePort{ Name: v1Port.Name, Protocol: slim_corev1.Protocol(v1Port.Protocol), Port: v1Port.Port, NodePort: v1Port.NodePort, }, ) } return slimPorts } func convertToK8sServiceAffinityConfig(saCfg *v1.SessionAffinityConfig) *slim_corev1.SessionAffinityConfig { if saCfg == nil { return nil } if saCfg.ClientIP == nil { return &slim_corev1.SessionAffinityConfig{} } return &slim_corev1.SessionAffinityConfig{ ClientIP: &slim_corev1.ClientIPConfig{ TimeoutSeconds: saCfg.ClientIP.TimeoutSeconds, }, } } func convertToK8sLoadBalancerIngress(lbIngs []v1.LoadBalancerIngress) []slim_corev1.LoadBalancerIngress { if lbIngs == nil { return nil } slimLBIngs := make([]slim_corev1.LoadBalancerIngress, 0, len(lbIngs)) for _, lbIng := range lbIngs { slimLBIngs = append(slimLBIngs, slim_corev1.LoadBalancerIngress{ IP: lbIng.IP, }, ) } return slimLBIngs } // ConvertToK8sService converts a *v1.Service into a // *slim_corev1.Service or a cache.DeletedFinalStateUnknown into // a cache.DeletedFinalStateUnknown with a *slim_corev1.Service in its Obj. // If the given obj can't be cast into either *slim_corev1.Service // nor cache.DeletedFinalStateUnknown, the original obj is returned. func ConvertToK8sService(obj interface{}) interface{} { switch concreteObj := obj.(type) { case *v1.Service: return &slim_corev1.Service{ TypeMeta: slim_metav1.TypeMeta{ Kind: concreteObj.TypeMeta.Kind, APIVersion: concreteObj.TypeMeta.APIVersion, }, ObjectMeta: slim_metav1.ObjectMeta{ Name: concreteObj.ObjectMeta.Name, Namespace: concreteObj.ObjectMeta.Namespace, UID: concreteObj.ObjectMeta.UID, Labels: concreteObj.ObjectMeta.Labels, Annotations: concreteObj.ObjectMeta.Annotations, }, Spec: slim_corev1.ServiceSpec{ Ports: convertToK8sServicePorts(concreteObj.Spec.Ports), Selector: concreteObj.Spec.Selector, ClusterIP: concreteObj.Spec.ClusterIP, Type: slim_corev1.ServiceType(concreteObj.Spec.Type), ExternalIPs: concreteObj.Spec.ExternalIPs, SessionAffinity: slim_corev1.ServiceAffinity(concreteObj.Spec.SessionAffinity), ExternalTrafficPolicy: slim_corev1.ServiceExternalTrafficPolicyType(concreteObj.Spec.ExternalTrafficPolicy), HealthCheckNodePort: concreteObj.Spec.HealthCheckNodePort, SessionAffinityConfig: convertToK8sServiceAffinityConfig(concreteObj.Spec.SessionAffinityConfig), }, Status: slim_corev1.ServiceStatus{ LoadBalancer: slim_corev1.LoadBalancerStatus{ Ingress: convertToK8sLoadBalancerIngress(concreteObj.Status.LoadBalancer.Ingress), }, }, } case cache.DeletedFinalStateUnknown: svc, ok := concreteObj.Obj.(*v1.Service) if !ok { return obj } return cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: &slim_corev1.Service{ TypeMeta: slim_metav1.TypeMeta{ Kind: svc.TypeMeta.Kind, APIVersion: svc.TypeMeta.APIVersion, }, ObjectMeta: slim_metav1.ObjectMeta{ Name: svc.ObjectMeta.Name, Namespace: svc.ObjectMeta.Namespace, UID: svc.ObjectMeta.UID, Labels: svc.ObjectMeta.Labels, Annotations: svc.ObjectMeta.Annotations, }, Spec: slim_corev1.ServiceSpec{ Ports: convertToK8sServicePorts(svc.Spec.Ports), Selector: svc.Spec.Selector, ClusterIP: svc.Spec.ClusterIP, Type: slim_corev1.ServiceType(svc.Spec.Type), ExternalIPs: svc.Spec.ExternalIPs, SessionAffinity: slim_corev1.ServiceAffinity(svc.Spec.SessionAffinity), ExternalTrafficPolicy: slim_corev1.ServiceExternalTrafficPolicyType(svc.Spec.ExternalTrafficPolicy), HealthCheckNodePort: svc.Spec.HealthCheckNodePort, SessionAffinityConfig: convertToK8sServiceAffinityConfig(svc.Spec.SessionAffinityConfig), }, Status: slim_corev1.ServiceStatus{ LoadBalancer: slim_corev1.LoadBalancerStatus{ Ingress: convertToK8sLoadBalancerIngress(svc.Status.LoadBalancer.Ingress), }, }, }, } default: return obj } } // ConvertToCCNPWithStatus converts a *cilium_v2.CiliumClusterwideNetworkPolicy // into *types.SlimCNP or a cache.DeletedFinalStateUnknown into // a cache.DeletedFinalStateUnknown with a *types.SlimCNP in its Obj. // If the given obj can't be cast into either *cilium_v2.CiliumClusterwideNetworkPolicy // nor cache.DeletedFinalStateUnknown, the original obj is returned. func ConvertToCCNPWithStatus(obj interface{}) interface{} { switch concreteObj := obj.(type) { case *cilium_v2.CiliumClusterwideNetworkPolicy: t := &types.SlimCNP{ CiliumNetworkPolicy: concreteObj.CiliumNetworkPolicy, } t.Status = concreteObj.Status return t case cache.DeletedFinalStateUnknown: cnp, ok := concreteObj.Obj.(*cilium_v2.CiliumClusterwideNetworkPolicy) if !ok { return obj } t := &types.SlimCNP{ CiliumNetworkPolicy: cnp.CiliumNetworkPolicy, } t.Status = cnp.Status return cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: t, } default: return obj } } // ConvertToCNPWithStatus converts a *cilium_v2.CiliumNetworkPolicy or a // *cilium_v2.CiliumClusterwideNetworkPolicy into a // *types.SlimCNP or a cache.DeletedFinalStateUnknown into // a cache.DeletedFinalStateUnknown with a *types.SlimCNP in its Obj. // If the given obj can't be cast into either *cilium_v2.CiliumNetworkPolicy // nor cache.DeletedFinalStateUnknown, the original obj is returned. func ConvertToCNPWithStatus(obj interface{}) interface{} { switch concreteObj := obj.(type) { case *cilium_v2.CiliumNetworkPolicy: return &types.SlimCNP{ CiliumNetworkPolicy: concreteObj, } case cache.DeletedFinalStateUnknown: cnp, ok := concreteObj.Obj.(*cilium_v2.CiliumNetworkPolicy) if !ok { return obj } return cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: &types.SlimCNP{ CiliumNetworkPolicy: cnp, }, } default: return obj } } // ConvertToCCNP converts a *cilium_v2.CiliumClusterwideNetworkPolicy into a // *types.SlimCNP without the Status field of the given CNP, or a // cache.DeletedFinalStateUnknown into a cache.DeletedFinalStateUnknown with a // *types.SlimCNP, also without the Status field of the given CNP, in its Obj. // If the given obj can't be cast into either *cilium_v2.CiliumClusterwideNetworkPolicy // nor cache.DeletedFinalStateUnknown, the original obj is returned. // WARNING calling this function will set *all* fields of the given CNP as // empty. func ConvertToCCNP(obj interface{}) interface{} { switch concreteObj := obj.(type) { case *cilium_v2.CiliumClusterwideNetworkPolicy: cnp := &types.SlimCNP{ CiliumNetworkPolicy: &cilium_v2.CiliumNetworkPolicy{ TypeMeta: concreteObj.TypeMeta, ObjectMeta: concreteObj.ObjectMeta, Spec: concreteObj.Spec, Specs: concreteObj.Specs, }, } *concreteObj = cilium_v2.CiliumClusterwideNetworkPolicy{} return cnp case cache.DeletedFinalStateUnknown: cnp, ok := concreteObj.Obj.(*cilium_v2.CiliumClusterwideNetworkPolicy) if !ok { return obj } dfsu := cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: &types.SlimCNP{ CiliumNetworkPolicy: &cilium_v2.CiliumNetworkPolicy{ TypeMeta: cnp.TypeMeta, ObjectMeta: cnp.ObjectMeta, Spec: cnp.Spec, Specs: cnp.Specs, }, }, } *cnp = cilium_v2.CiliumClusterwideNetworkPolicy{} return dfsu default: return obj } } // ConvertToCNP converts a *cilium_v2.CiliumNetworkPolicy into a // *types.SlimCNP without the Status field of the given CNP, or a // cache.DeletedFinalStateUnknown into a cache.DeletedFinalStateUnknown with a // *types.SlimCNP, also without the Status field of the given CNP, in its Obj. // If the given obj can't be cast into either *cilium_v2.CiliumNetworkPolicy // nor cache.DeletedFinalStateUnknown, the original obj is returned. // WARNING calling this function will set *all* fields of the given CNP as // empty. func ConvertToCNP(obj interface{}) interface{} { switch concreteObj := obj.(type) { case *cilium_v2.CiliumNetworkPolicy: cnp := &types.SlimCNP{ CiliumNetworkPolicy: &cilium_v2.CiliumNetworkPolicy{ TypeMeta: concreteObj.TypeMeta, ObjectMeta: concreteObj.ObjectMeta, Spec: concreteObj.Spec, Specs: concreteObj.Specs, }, } *concreteObj = cilium_v2.CiliumNetworkPolicy{} return cnp case cache.DeletedFinalStateUnknown: cnp, ok := concreteObj.Obj.(*cilium_v2.CiliumNetworkPolicy) if !ok { return obj } dfsu := cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: &types.SlimCNP{ CiliumNetworkPolicy: &cilium_v2.CiliumNetworkPolicy{ TypeMeta: cnp.TypeMeta, ObjectMeta: cnp.ObjectMeta, Spec: cnp.Spec, Specs: cnp.Specs, }, }, } *cnp = cilium_v2.CiliumNetworkPolicy{} return dfsu default: return obj } } func convertToAddress(v1Addrs []v1.NodeAddress) []slim_corev1.NodeAddress { if v1Addrs == nil { return nil } addrs := make([]slim_corev1.NodeAddress, 0, len(v1Addrs)) for _, addr := range v1Addrs { addrs = append( addrs, slim_corev1.NodeAddress{ Type: slim_corev1.NodeAddressType(addr.Type), Address: addr.Address, }, ) } return addrs } func convertToTaints(v1Taints []v1.Taint) []slim_corev1.Taint { if v1Taints == nil { return nil } taints := make([]slim_corev1.Taint, 0, len(v1Taints)) for _, taint := range v1Taints { var ta *slim_metav1.Time if taint.TimeAdded != nil { t := slim_metav1.NewTime(taint.TimeAdded.Time) ta = &t } taints = append( taints, slim_corev1.Taint{ Key: taint.Key, Value: taint.Value, Effect: slim_corev1.TaintEffect(taint.Effect), TimeAdded: ta, }, ) } return taints } // ConvertToNode converts a *v1.Node into a // *types.Node or a cache.DeletedFinalStateUnknown into // a cache.DeletedFinalStateUnknown with a *types.Node in its Obj. // If the given obj can't be cast into either *v1.Node // nor cache.DeletedFinalStateUnknown, the original obj is returned. // WARNING calling this function will set *all* fields of the given Node as // empty. func ConvertToNode(obj interface{}) interface{} { switch concreteObj := obj.(type) { case *v1.Node: p := &slim_corev1.Node{ TypeMeta: slim_metav1.TypeMeta{ Kind: concreteObj.TypeMeta.Kind, APIVersion: concreteObj.TypeMeta.APIVersion, }, ObjectMeta: slim_metav1.ObjectMeta{ Name: concreteObj.ObjectMeta.Name, Namespace: concreteObj.ObjectMeta.Namespace, UID: concreteObj.ObjectMeta.UID, Labels: concreteObj.ObjectMeta.Labels, Annotations: concreteObj.ObjectMeta.Annotations, }, Spec: slim_corev1.NodeSpec{ PodCIDR: concreteObj.Spec.PodCIDR, PodCIDRs: concreteObj.Spec.PodCIDRs, Taints: convertToTaints(concreteObj.Spec.Taints), }, Status: slim_corev1.NodeStatus{ Addresses: convertToAddress(concreteObj.Status.Addresses), }, } *concreteObj = v1.Node{} return p case cache.DeletedFinalStateUnknown: node, ok := concreteObj.Obj.(*v1.Node) if !ok { return obj } dfsu := cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: &slim_corev1.Node{ TypeMeta: slim_metav1.TypeMeta{ Kind: node.TypeMeta.Kind, APIVersion: node.TypeMeta.APIVersion, }, ObjectMeta: slim_metav1.ObjectMeta{ Name: node.ObjectMeta.Name, Namespace: node.ObjectMeta.Namespace, UID: node.ObjectMeta.UID, Labels: node.ObjectMeta.Labels, Annotations: node.ObjectMeta.Annotations, }, Spec: slim_corev1.NodeSpec{ PodCIDR: node.Spec.PodCIDR, PodCIDRs: node.Spec.PodCIDRs, Taints: convertToTaints(node.Spec.Taints), }, Status: slim_corev1.NodeStatus{ Addresses: convertToAddress(node.Status.Addresses), }, }, } *node = v1.Node{} return dfsu default: return obj } } // ConvertToCiliumNode converts a *cilium_v2.CiliumNode into a // *cilium_v2.CiliumNode or a cache.DeletedFinalStateUnknown into // a cache.DeletedFinalStateUnknown with a *cilium_v2.CiliumNode in its Obj. // If the given obj can't be cast into either *cilium_v2.CiliumNode // nor cache.DeletedFinalStateUnknown, the original obj is returned. func ConvertToCiliumNode(obj interface{}) interface{} { // TODO create a slim type of the CiliumNode switch concreteObj := obj.(type) { case *cilium_v2.CiliumNode: return concreteObj case cache.DeletedFinalStateUnknown: ciliumNode, ok := concreteObj.Obj.(*cilium_v2.CiliumNode) if !ok { return obj } return cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: ciliumNode, } default: return obj } } // ObjToCiliumNode attempts to cast object to a CiliumNode object and // returns a deep copy if the castin succeeds. Otherwise, nil is returned. func ObjToCiliumNode(obj interface{}) *cilium_v2.CiliumNode { cn, ok := obj.(*cilium_v2.CiliumNode) if ok { return cn } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. cn, ok := deletedObj.Obj.(*cilium_v2.CiliumNode) if ok { return cn } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid v2 CiliumNode") return nil } // ConvertToCiliumEndpoint converts a *cilium_v2.CiliumEndpoint into a // *types.CiliumEndpoint or a cache.DeletedFinalStateUnknown into a // cache.DeletedFinalStateUnknown with a *types.CiliumEndpoint in its Obj. // If the given obj can't be cast into either *cilium_v2.CiliumEndpoint nor // cache.DeletedFinalStateUnknown, the original obj is returned. func ConvertToCiliumEndpoint(obj interface{}) interface{} { switch concreteObj := obj.(type) { case *cilium_v2.CiliumEndpoint: p := &types.CiliumEndpoint{ TypeMeta: slim_metav1.TypeMeta{ Kind: concreteObj.TypeMeta.Kind, APIVersion: concreteObj.TypeMeta.APIVersion, }, ObjectMeta: slim_metav1.ObjectMeta{ Name: concreteObj.ObjectMeta.Name, Namespace: concreteObj.ObjectMeta.Namespace, UID: concreteObj.ObjectMeta.UID, ResourceVersion: concreteObj.ObjectMeta.ResourceVersion, // We don't need to store labels nor annotations because // they are not used by the CEP handlers. Labels: nil, Annotations: nil, }, Encryption: &concreteObj.Status.Encryption, Identity: concreteObj.Status.Identity, Networking: concreteObj.Status.Networking, NamedPorts: concreteObj.Status.NamedPorts, } *concreteObj = cilium_v2.CiliumEndpoint{} return p case cache.DeletedFinalStateUnknown: ciliumEndpoint, ok := concreteObj.Obj.(*cilium_v2.CiliumEndpoint) if !ok { return obj } dfsu := cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: &types.CiliumEndpoint{TypeMeta: slim_metav1.TypeMeta{ Kind: ciliumEndpoint.TypeMeta.Kind, APIVersion: ciliumEndpoint.TypeMeta.APIVersion, }, ObjectMeta: slim_metav1.ObjectMeta{ Name: ciliumEndpoint.ObjectMeta.Name, Namespace: ciliumEndpoint.ObjectMeta.Namespace, UID: ciliumEndpoint.ObjectMeta.UID, ResourceVersion: ciliumEndpoint.ObjectMeta.ResourceVersion, // We don't need to store labels nor annotations because // they are not used by the CEP handlers. Labels: nil, Annotations: nil, }, Encryption: &ciliumEndpoint.Status.Encryption, Identity: ciliumEndpoint.Status.Identity, Networking: ciliumEndpoint.Status.Networking, NamedPorts: ciliumEndpoint.Status.NamedPorts, }, } *ciliumEndpoint = cilium_v2.CiliumEndpoint{} return dfsu default: return obj } } // ObjToCiliumEndpoint attempts to cast object to a CiliumEndpoint object // and returns a deep copy if the castin succeeds. Otherwise, nil is returned. func ObjToCiliumEndpoint(obj interface{}) *types.CiliumEndpoint { ce, ok := obj.(*types.CiliumEndpoint) if ok { return ce } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. ce, ok := deletedObj.Obj.(*types.CiliumEndpoint) if ok { return ce } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid v2 CiliumEndpoint") return nil } pkg/k8s: add missing ResourceVersion field when parsing k8s structures Fixes: d7cbb6a823bc ("slim/k8s: add missing resourceVersion field") Signed-off-by: André Martins <bc9800b9d52a24cce72a73dd528afed53f10e5fc@cilium.io> // Copyright 2018-2020 Authors of Cilium // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package k8s import ( "github.com/cilium/cilium/pkg/comparator" "github.com/cilium/cilium/pkg/datapath" cilium_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/core/v1" slim_discover_v1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/discovery/v1beta1" slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" slim_networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/networking/v1" "github.com/cilium/cilium/pkg/k8s/types" "github.com/cilium/cilium/pkg/logging/logfields" v1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" ) func ObjToV1NetworkPolicy(obj interface{}) *slim_networkingv1.NetworkPolicy { k8sNP, ok := obj.(*slim_networkingv1.NetworkPolicy) if ok { return k8sNP } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. k8sNP, ok := deletedObj.Obj.(*slim_networkingv1.NetworkPolicy) if ok { return k8sNP } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v1 NetworkPolicy") return nil } func ObjToV1Services(obj interface{}) *slim_corev1.Service { svc, ok := obj.(*slim_corev1.Service) if ok { return svc } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. svc, ok := deletedObj.Obj.(*slim_corev1.Service) if ok { return svc } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v1 Service") return nil } func ObjToV1Endpoints(obj interface{}) *slim_corev1.Endpoints { ep, ok := obj.(*slim_corev1.Endpoints) if ok { return ep } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. ep, ok := deletedObj.Obj.(*slim_corev1.Endpoints) if ok { return ep } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v1 Endpoints") return nil } func ObjToV1EndpointSlice(obj interface{}) *slim_discover_v1beta1.EndpointSlice { ep, ok := obj.(*slim_discover_v1beta1.EndpointSlice) if ok { return ep } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. ep, ok := deletedObj.Obj.(*slim_discover_v1beta1.EndpointSlice) if ok { return ep } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v1beta1 EndpointSlice") return nil } func ObjToSlimCNP(obj interface{}) *types.SlimCNP { cnp, ok := obj.(*types.SlimCNP) if ok { return cnp } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. cnp, ok := deletedObj.Obj.(*types.SlimCNP) if ok { return cnp } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v2 CiliumNetworkPolicy") return nil } func ObjTov1Pod(obj interface{}) *slim_corev1.Pod { pod, ok := obj.(*slim_corev1.Pod) if ok { return pod } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. pod, ok := deletedObj.Obj.(*slim_corev1.Pod) if ok { return pod } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v1 Pod") return nil } func ObjToV1Node(obj interface{}) *slim_corev1.Node { node, ok := obj.(*slim_corev1.Node) if ok { return node } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. node, ok := deletedObj.Obj.(*slim_corev1.Node) if ok { return node } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v1 Node") return nil } func ObjToV1Namespace(obj interface{}) *slim_corev1.Namespace { ns, ok := obj.(*slim_corev1.Namespace) if ok { return ns } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. ns, ok := deletedObj.Obj.(*slim_corev1.Namespace) if ok { return ns } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid k8s v1 Namespace") return nil } func EqualV1Services(k8sSVC1, k8sSVC2 *slim_corev1.Service, nodeAddressing datapath.NodeAddressing) bool { // Service annotations are used to mark services as global, shared, etc. if !comparator.MapStringEquals(k8sSVC1.GetAnnotations(), k8sSVC2.GetAnnotations()) { return false } svcID1, svc1 := ParseService(k8sSVC1, nodeAddressing) svcID2, svc2 := ParseService(k8sSVC2, nodeAddressing) if svcID1 != svcID2 { return false } // Please write all the equalness logic inside the K8sServiceInfo.Equals() // method. return svc1.DeepEquals(svc2) } // AnnotationsEqual returns whether the annotation with any key in // relevantAnnotations is equal in anno1 and anno2. func AnnotationsEqual(relevantAnnotations []string, anno1, anno2 map[string]string) bool { for _, an := range relevantAnnotations { if anno1[an] != anno2[an] { return false } } return true } func convertToK8sServicePorts(ports []v1.ServicePort) []slim_corev1.ServicePort { if ports == nil { return nil } slimPorts := make([]slim_corev1.ServicePort, 0, len(ports)) for _, v1Port := range ports { slimPorts = append(slimPorts, slim_corev1.ServicePort{ Name: v1Port.Name, Protocol: slim_corev1.Protocol(v1Port.Protocol), Port: v1Port.Port, NodePort: v1Port.NodePort, }, ) } return slimPorts } func convertToK8sServiceAffinityConfig(saCfg *v1.SessionAffinityConfig) *slim_corev1.SessionAffinityConfig { if saCfg == nil { return nil } if saCfg.ClientIP == nil { return &slim_corev1.SessionAffinityConfig{} } return &slim_corev1.SessionAffinityConfig{ ClientIP: &slim_corev1.ClientIPConfig{ TimeoutSeconds: saCfg.ClientIP.TimeoutSeconds, }, } } func convertToK8sLoadBalancerIngress(lbIngs []v1.LoadBalancerIngress) []slim_corev1.LoadBalancerIngress { if lbIngs == nil { return nil } slimLBIngs := make([]slim_corev1.LoadBalancerIngress, 0, len(lbIngs)) for _, lbIng := range lbIngs { slimLBIngs = append(slimLBIngs, slim_corev1.LoadBalancerIngress{ IP: lbIng.IP, }, ) } return slimLBIngs } // ConvertToK8sService converts a *v1.Service into a // *slim_corev1.Service or a cache.DeletedFinalStateUnknown into // a cache.DeletedFinalStateUnknown with a *slim_corev1.Service in its Obj. // If the given obj can't be cast into either *slim_corev1.Service // nor cache.DeletedFinalStateUnknown, the original obj is returned. func ConvertToK8sService(obj interface{}) interface{} { switch concreteObj := obj.(type) { case *v1.Service: return &slim_corev1.Service{ TypeMeta: slim_metav1.TypeMeta{ Kind: concreteObj.TypeMeta.Kind, APIVersion: concreteObj.TypeMeta.APIVersion, }, ObjectMeta: slim_metav1.ObjectMeta{ Name: concreteObj.ObjectMeta.Name, Namespace: concreteObj.ObjectMeta.Namespace, ResourceVersion: concreteObj.ObjectMeta.ResourceVersion, UID: concreteObj.ObjectMeta.UID, Labels: concreteObj.ObjectMeta.Labels, Annotations: concreteObj.ObjectMeta.Annotations, }, Spec: slim_corev1.ServiceSpec{ Ports: convertToK8sServicePorts(concreteObj.Spec.Ports), Selector: concreteObj.Spec.Selector, ClusterIP: concreteObj.Spec.ClusterIP, Type: slim_corev1.ServiceType(concreteObj.Spec.Type), ExternalIPs: concreteObj.Spec.ExternalIPs, SessionAffinity: slim_corev1.ServiceAffinity(concreteObj.Spec.SessionAffinity), ExternalTrafficPolicy: slim_corev1.ServiceExternalTrafficPolicyType(concreteObj.Spec.ExternalTrafficPolicy), HealthCheckNodePort: concreteObj.Spec.HealthCheckNodePort, SessionAffinityConfig: convertToK8sServiceAffinityConfig(concreteObj.Spec.SessionAffinityConfig), }, Status: slim_corev1.ServiceStatus{ LoadBalancer: slim_corev1.LoadBalancerStatus{ Ingress: convertToK8sLoadBalancerIngress(concreteObj.Status.LoadBalancer.Ingress), }, }, } case cache.DeletedFinalStateUnknown: svc, ok := concreteObj.Obj.(*v1.Service) if !ok { return obj } return cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: &slim_corev1.Service{ TypeMeta: slim_metav1.TypeMeta{ Kind: svc.TypeMeta.Kind, APIVersion: svc.TypeMeta.APIVersion, }, ObjectMeta: slim_metav1.ObjectMeta{ Name: svc.ObjectMeta.Name, Namespace: svc.ObjectMeta.Namespace, ResourceVersion: svc.ObjectMeta.ResourceVersion, UID: svc.ObjectMeta.UID, Labels: svc.ObjectMeta.Labels, Annotations: svc.ObjectMeta.Annotations, }, Spec: slim_corev1.ServiceSpec{ Ports: convertToK8sServicePorts(svc.Spec.Ports), Selector: svc.Spec.Selector, ClusterIP: svc.Spec.ClusterIP, Type: slim_corev1.ServiceType(svc.Spec.Type), ExternalIPs: svc.Spec.ExternalIPs, SessionAffinity: slim_corev1.ServiceAffinity(svc.Spec.SessionAffinity), ExternalTrafficPolicy: slim_corev1.ServiceExternalTrafficPolicyType(svc.Spec.ExternalTrafficPolicy), HealthCheckNodePort: svc.Spec.HealthCheckNodePort, SessionAffinityConfig: convertToK8sServiceAffinityConfig(svc.Spec.SessionAffinityConfig), }, Status: slim_corev1.ServiceStatus{ LoadBalancer: slim_corev1.LoadBalancerStatus{ Ingress: convertToK8sLoadBalancerIngress(svc.Status.LoadBalancer.Ingress), }, }, }, } default: return obj } } // ConvertToCCNPWithStatus converts a *cilium_v2.CiliumClusterwideNetworkPolicy // into *types.SlimCNP or a cache.DeletedFinalStateUnknown into // a cache.DeletedFinalStateUnknown with a *types.SlimCNP in its Obj. // If the given obj can't be cast into either *cilium_v2.CiliumClusterwideNetworkPolicy // nor cache.DeletedFinalStateUnknown, the original obj is returned. func ConvertToCCNPWithStatus(obj interface{}) interface{} { switch concreteObj := obj.(type) { case *cilium_v2.CiliumClusterwideNetworkPolicy: t := &types.SlimCNP{ CiliumNetworkPolicy: concreteObj.CiliumNetworkPolicy, } t.Status = concreteObj.Status return t case cache.DeletedFinalStateUnknown: cnp, ok := concreteObj.Obj.(*cilium_v2.CiliumClusterwideNetworkPolicy) if !ok { return obj } t := &types.SlimCNP{ CiliumNetworkPolicy: cnp.CiliumNetworkPolicy, } t.Status = cnp.Status return cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: t, } default: return obj } } // ConvertToCNPWithStatus converts a *cilium_v2.CiliumNetworkPolicy or a // *cilium_v2.CiliumClusterwideNetworkPolicy into a // *types.SlimCNP or a cache.DeletedFinalStateUnknown into // a cache.DeletedFinalStateUnknown with a *types.SlimCNP in its Obj. // If the given obj can't be cast into either *cilium_v2.CiliumNetworkPolicy // nor cache.DeletedFinalStateUnknown, the original obj is returned. func ConvertToCNPWithStatus(obj interface{}) interface{} { switch concreteObj := obj.(type) { case *cilium_v2.CiliumNetworkPolicy: return &types.SlimCNP{ CiliumNetworkPolicy: concreteObj, } case cache.DeletedFinalStateUnknown: cnp, ok := concreteObj.Obj.(*cilium_v2.CiliumNetworkPolicy) if !ok { return obj } return cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: &types.SlimCNP{ CiliumNetworkPolicy: cnp, }, } default: return obj } } // ConvertToCCNP converts a *cilium_v2.CiliumClusterwideNetworkPolicy into a // *types.SlimCNP without the Status field of the given CNP, or a // cache.DeletedFinalStateUnknown into a cache.DeletedFinalStateUnknown with a // *types.SlimCNP, also without the Status field of the given CNP, in its Obj. // If the given obj can't be cast into either *cilium_v2.CiliumClusterwideNetworkPolicy // nor cache.DeletedFinalStateUnknown, the original obj is returned. // WARNING calling this function will set *all* fields of the given CNP as // empty. func ConvertToCCNP(obj interface{}) interface{} { switch concreteObj := obj.(type) { case *cilium_v2.CiliumClusterwideNetworkPolicy: cnp := &types.SlimCNP{ CiliumNetworkPolicy: &cilium_v2.CiliumNetworkPolicy{ TypeMeta: concreteObj.TypeMeta, ObjectMeta: concreteObj.ObjectMeta, Spec: concreteObj.Spec, Specs: concreteObj.Specs, }, } *concreteObj = cilium_v2.CiliumClusterwideNetworkPolicy{} return cnp case cache.DeletedFinalStateUnknown: cnp, ok := concreteObj.Obj.(*cilium_v2.CiliumClusterwideNetworkPolicy) if !ok { return obj } dfsu := cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: &types.SlimCNP{ CiliumNetworkPolicy: &cilium_v2.CiliumNetworkPolicy{ TypeMeta: cnp.TypeMeta, ObjectMeta: cnp.ObjectMeta, Spec: cnp.Spec, Specs: cnp.Specs, }, }, } *cnp = cilium_v2.CiliumClusterwideNetworkPolicy{} return dfsu default: return obj } } // ConvertToCNP converts a *cilium_v2.CiliumNetworkPolicy into a // *types.SlimCNP without the Status field of the given CNP, or a // cache.DeletedFinalStateUnknown into a cache.DeletedFinalStateUnknown with a // *types.SlimCNP, also without the Status field of the given CNP, in its Obj. // If the given obj can't be cast into either *cilium_v2.CiliumNetworkPolicy // nor cache.DeletedFinalStateUnknown, the original obj is returned. // WARNING calling this function will set *all* fields of the given CNP as // empty. func ConvertToCNP(obj interface{}) interface{} { switch concreteObj := obj.(type) { case *cilium_v2.CiliumNetworkPolicy: cnp := &types.SlimCNP{ CiliumNetworkPolicy: &cilium_v2.CiliumNetworkPolicy{ TypeMeta: concreteObj.TypeMeta, ObjectMeta: concreteObj.ObjectMeta, Spec: concreteObj.Spec, Specs: concreteObj.Specs, }, } *concreteObj = cilium_v2.CiliumNetworkPolicy{} return cnp case cache.DeletedFinalStateUnknown: cnp, ok := concreteObj.Obj.(*cilium_v2.CiliumNetworkPolicy) if !ok { return obj } dfsu := cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: &types.SlimCNP{ CiliumNetworkPolicy: &cilium_v2.CiliumNetworkPolicy{ TypeMeta: cnp.TypeMeta, ObjectMeta: cnp.ObjectMeta, Spec: cnp.Spec, Specs: cnp.Specs, }, }, } *cnp = cilium_v2.CiliumNetworkPolicy{} return dfsu default: return obj } } func convertToAddress(v1Addrs []v1.NodeAddress) []slim_corev1.NodeAddress { if v1Addrs == nil { return nil } addrs := make([]slim_corev1.NodeAddress, 0, len(v1Addrs)) for _, addr := range v1Addrs { addrs = append( addrs, slim_corev1.NodeAddress{ Type: slim_corev1.NodeAddressType(addr.Type), Address: addr.Address, }, ) } return addrs } func convertToTaints(v1Taints []v1.Taint) []slim_corev1.Taint { if v1Taints == nil { return nil } taints := make([]slim_corev1.Taint, 0, len(v1Taints)) for _, taint := range v1Taints { var ta *slim_metav1.Time if taint.TimeAdded != nil { t := slim_metav1.NewTime(taint.TimeAdded.Time) ta = &t } taints = append( taints, slim_corev1.Taint{ Key: taint.Key, Value: taint.Value, Effect: slim_corev1.TaintEffect(taint.Effect), TimeAdded: ta, }, ) } return taints } // ConvertToNode converts a *v1.Node into a // *types.Node or a cache.DeletedFinalStateUnknown into // a cache.DeletedFinalStateUnknown with a *types.Node in its Obj. // If the given obj can't be cast into either *v1.Node // nor cache.DeletedFinalStateUnknown, the original obj is returned. // WARNING calling this function will set *all* fields of the given Node as // empty. func ConvertToNode(obj interface{}) interface{} { switch concreteObj := obj.(type) { case *v1.Node: p := &slim_corev1.Node{ TypeMeta: slim_metav1.TypeMeta{ Kind: concreteObj.TypeMeta.Kind, APIVersion: concreteObj.TypeMeta.APIVersion, }, ObjectMeta: slim_metav1.ObjectMeta{ Name: concreteObj.ObjectMeta.Name, Namespace: concreteObj.ObjectMeta.Namespace, UID: concreteObj.ObjectMeta.UID, ResourceVersion: concreteObj.ObjectMeta.ResourceVersion, Labels: concreteObj.ObjectMeta.Labels, Annotations: concreteObj.ObjectMeta.Annotations, }, Spec: slim_corev1.NodeSpec{ PodCIDR: concreteObj.Spec.PodCIDR, PodCIDRs: concreteObj.Spec.PodCIDRs, Taints: convertToTaints(concreteObj.Spec.Taints), }, Status: slim_corev1.NodeStatus{ Addresses: convertToAddress(concreteObj.Status.Addresses), }, } *concreteObj = v1.Node{} return p case cache.DeletedFinalStateUnknown: node, ok := concreteObj.Obj.(*v1.Node) if !ok { return obj } dfsu := cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: &slim_corev1.Node{ TypeMeta: slim_metav1.TypeMeta{ Kind: node.TypeMeta.Kind, APIVersion: node.TypeMeta.APIVersion, }, ObjectMeta: slim_metav1.ObjectMeta{ Name: node.ObjectMeta.Name, Namespace: node.ObjectMeta.Namespace, UID: node.ObjectMeta.UID, ResourceVersion: node.ObjectMeta.ResourceVersion, Labels: node.ObjectMeta.Labels, Annotations: node.ObjectMeta.Annotations, }, Spec: slim_corev1.NodeSpec{ PodCIDR: node.Spec.PodCIDR, PodCIDRs: node.Spec.PodCIDRs, Taints: convertToTaints(node.Spec.Taints), }, Status: slim_corev1.NodeStatus{ Addresses: convertToAddress(node.Status.Addresses), }, }, } *node = v1.Node{} return dfsu default: return obj } } // ConvertToCiliumNode converts a *cilium_v2.CiliumNode into a // *cilium_v2.CiliumNode or a cache.DeletedFinalStateUnknown into // a cache.DeletedFinalStateUnknown with a *cilium_v2.CiliumNode in its Obj. // If the given obj can't be cast into either *cilium_v2.CiliumNode // nor cache.DeletedFinalStateUnknown, the original obj is returned. func ConvertToCiliumNode(obj interface{}) interface{} { // TODO create a slim type of the CiliumNode switch concreteObj := obj.(type) { case *cilium_v2.CiliumNode: return concreteObj case cache.DeletedFinalStateUnknown: ciliumNode, ok := concreteObj.Obj.(*cilium_v2.CiliumNode) if !ok { return obj } return cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: ciliumNode, } default: return obj } } // ObjToCiliumNode attempts to cast object to a CiliumNode object and // returns a deep copy if the castin succeeds. Otherwise, nil is returned. func ObjToCiliumNode(obj interface{}) *cilium_v2.CiliumNode { cn, ok := obj.(*cilium_v2.CiliumNode) if ok { return cn } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. cn, ok := deletedObj.Obj.(*cilium_v2.CiliumNode) if ok { return cn } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid v2 CiliumNode") return nil } // ConvertToCiliumEndpoint converts a *cilium_v2.CiliumEndpoint into a // *types.CiliumEndpoint or a cache.DeletedFinalStateUnknown into a // cache.DeletedFinalStateUnknown with a *types.CiliumEndpoint in its Obj. // If the given obj can't be cast into either *cilium_v2.CiliumEndpoint nor // cache.DeletedFinalStateUnknown, the original obj is returned. func ConvertToCiliumEndpoint(obj interface{}) interface{} { switch concreteObj := obj.(type) { case *cilium_v2.CiliumEndpoint: p := &types.CiliumEndpoint{ TypeMeta: slim_metav1.TypeMeta{ Kind: concreteObj.TypeMeta.Kind, APIVersion: concreteObj.TypeMeta.APIVersion, }, ObjectMeta: slim_metav1.ObjectMeta{ Name: concreteObj.ObjectMeta.Name, Namespace: concreteObj.ObjectMeta.Namespace, UID: concreteObj.ObjectMeta.UID, ResourceVersion: concreteObj.ObjectMeta.ResourceVersion, // We don't need to store labels nor annotations because // they are not used by the CEP handlers. Labels: nil, Annotations: nil, }, Encryption: &concreteObj.Status.Encryption, Identity: concreteObj.Status.Identity, Networking: concreteObj.Status.Networking, NamedPorts: concreteObj.Status.NamedPorts, } *concreteObj = cilium_v2.CiliumEndpoint{} return p case cache.DeletedFinalStateUnknown: ciliumEndpoint, ok := concreteObj.Obj.(*cilium_v2.CiliumEndpoint) if !ok { return obj } dfsu := cache.DeletedFinalStateUnknown{ Key: concreteObj.Key, Obj: &types.CiliumEndpoint{ TypeMeta: slim_metav1.TypeMeta{ Kind: ciliumEndpoint.TypeMeta.Kind, APIVersion: ciliumEndpoint.TypeMeta.APIVersion, }, ObjectMeta: slim_metav1.ObjectMeta{ Name: ciliumEndpoint.ObjectMeta.Name, Namespace: ciliumEndpoint.ObjectMeta.Namespace, UID: ciliumEndpoint.ObjectMeta.UID, ResourceVersion: ciliumEndpoint.ObjectMeta.ResourceVersion, // We don't need to store labels nor annotations because // they are not used by the CEP handlers. Labels: nil, Annotations: nil, }, Encryption: &ciliumEndpoint.Status.Encryption, Identity: ciliumEndpoint.Status.Identity, Networking: ciliumEndpoint.Status.Networking, NamedPorts: ciliumEndpoint.Status.NamedPorts, }, } *ciliumEndpoint = cilium_v2.CiliumEndpoint{} return dfsu default: return obj } } // ObjToCiliumEndpoint attempts to cast object to a CiliumEndpoint object // and returns a deep copy if the castin succeeds. Otherwise, nil is returned. func ObjToCiliumEndpoint(obj interface{}) *types.CiliumEndpoint { ce, ok := obj.(*types.CiliumEndpoint) if ok { return ce } deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) if ok { // Delete was not observed by the watcher but is // removed from kube-apiserver. This is the last // known state and the object no longer exists. ce, ok := deletedObj.Obj.(*types.CiliumEndpoint) if ok { return ce } } log.WithField(logfields.Object, logfields.Repr(obj)). Warn("Ignoring invalid v2 CiliumEndpoint") return nil }
// Copyright 2018-2020 Authors of Cilium // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package groups import ( "context" "fmt" "time" "github.com/cilium/cilium/pkg/controller" "github.com/cilium/cilium/pkg/k8s" cilium_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" "github.com/cilium/cilium/pkg/logging/logfields" "github.com/cilium/cilium/pkg/metrics" "github.com/sirupsen/logrus" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( // maxNumberOfAttempts Number of times that try to retrieve a information from a cloud provider. maxNumberOfAttempts = 5 // SleepDuration time that sleep in case that can't retrieve information from a cloud provider. sleepDuration = 5 * time.Second ) var ( controllerManager = controller.NewManager() ) // AddDerivativeCNPIfNeeded will create a new CNP if the given CNP has any rules // that need to create a new derivative policy. // It returns a boolean, true in case that all actions are correct, false if // something fails. func AddDerivativeCNPIfNeeded(cnp *cilium_v2.CiliumNetworkPolicy) bool { if !cnp.RequiresDerivative() { log.WithFields(logrus.Fields{ logfields.CiliumNetworkPolicyName: cnp.ObjectMeta.Name, logfields.K8sNamespace: cnp.ObjectMeta.Namespace, }).Debug("CNP does not have derivative policies, skipped") return true } controllerManager.UpdateController(fmt.Sprintf("add-derivative-cnp-%s", cnp.ObjectMeta.Name), controller.ControllerParams{ DoFunc: func(ctx context.Context) error { return addDerivativePolicy(ctx, cnp, false) }, }) return true } // AddDerivativeCCNPIfNeeded will create a new CCNP if the given NetworkPolicy has any rules // that need to create a new derivative policy. // It returns a boolean, true in case that all actions are correct, false if // something fails. func AddDerivativeCCNPIfNeeded(cnp *cilium_v2.CiliumNetworkPolicy) bool { if !cnp.RequiresDerivative() { log.WithFields(logrus.Fields{ logfields.CiliumClusterwideNetworkPolicyName: cnp.ObjectMeta.Name, }).Debug("CCNP does not have derivative policies, skipped") return true } controllerManager.UpdateController(fmt.Sprintf("add-derivative-ccnp-%s", cnp.ObjectMeta.Name), controller.ControllerParams{ DoFunc: func(ctx context.Context) error { return addDerivativePolicy(ctx, cnp, true) }, }) return true } // UpdateDerivativeCNPIfNeeded updates or creates a CNP if the given CNP has // any rule that needs to create a new derivative policy(eg: ToGroups). In case // that the new CNP does not have any derivative policy and the old one had // one, it will delete the old policy. // The function returns true if an update is required for the derivative policy // and false otherwise. func UpdateDerivativeCNPIfNeeded(newCNP *cilium_v2.CiliumNetworkPolicy, oldCNP *cilium_v2.CiliumNetworkPolicy) bool { if !newCNP.RequiresDerivative() && oldCNP.RequiresDerivative() { log.WithFields(logrus.Fields{ logfields.CiliumNetworkPolicyName: newCNP.ObjectMeta.Name, logfields.K8sNamespace: newCNP.ObjectMeta.Namespace, }).Info("New CNP does not have derivative policy, but old had. Deleting old policies") controllerManager.UpdateController(fmt.Sprintf("delete-derivative-cnp-%s", oldCNP.ObjectMeta.Name), controller.ControllerParams{ DoFunc: func(ctx context.Context) error { return DeleteDerivativeCNP(ctx, oldCNP) }, }) return false } if !newCNP.RequiresDerivative() { return false } controllerManager.UpdateController(fmt.Sprintf("update-derivative-cnp-%s", newCNP.ObjectMeta.Name), controller.ControllerParams{ DoFunc: func(ctx context.Context) error { return addDerivativePolicy(ctx, newCNP, false) }, }) return true } // UpdateDerivativeCCNPIfNeeded updates or creates a CCNP if the given CCNP has // any rule that needs to create a new derivative policy(eg: ToGroups). In case // that the new CCNP does not have any derivative policy and the old one had // one, it will delete the old policy. // The function returns true if an update is required for the derivative policy // and false otherwise. func UpdateDerivativeCCNPIfNeeded(newCCNP *cilium_v2.CiliumNetworkPolicy, oldCCNP *cilium_v2.CiliumNetworkPolicy) bool { if !newCCNP.RequiresDerivative() && oldCCNP.RequiresDerivative() { log.WithFields(logrus.Fields{ logfields.CiliumClusterwideNetworkPolicyName: newCCNP.ObjectMeta.Name, }).Info("New CCNP does not have derivative policy, but old had. Deleting old policies") controllerManager.UpdateController(fmt.Sprintf("delete-derivative-ccnp-%s", oldCCNP.ObjectMeta.Name), controller.ControllerParams{ DoFunc: func(ctx context.Context) error { return DeleteDerivativeCCNP(ctx, oldCCNP) }, }) return false } if !newCCNP.RequiresDerivative() { return false } controllerManager.UpdateController(fmt.Sprintf("update-derivative-ccnp-%s", newCCNP.ObjectMeta.Name), controller.ControllerParams{ DoFunc: func(ctx context.Context) error { return addDerivativePolicy(ctx, newCCNP, true) }, }) return true } // DeleteDerivativeFromCache deletes the given CNP from the groupsCNPCache to // no continue pooling new data. func DeleteDerivativeFromCache(cnp *cilium_v2.CiliumNetworkPolicy) { groupsCNPCache.DeleteCNP(cnp) } // DeleteDerivativeCNP if the given policy has a derivative constraint,the // given CNP will be deleted from store and the cache. func DeleteDerivativeCNP(ctx context.Context, cnp *cilium_v2.CiliumNetworkPolicy) error { scopedLog := log.WithFields(logrus.Fields{ logfields.CiliumNetworkPolicyName: cnp.ObjectMeta.Name, logfields.K8sNamespace: cnp.ObjectMeta.Namespace, }) if !cnp.RequiresDerivative() { scopedLog.Debug("CNP does not have derivative policies, skipped") return nil } err := k8s.CiliumClient().CiliumV2().CiliumNetworkPolicies(cnp.ObjectMeta.Namespace).DeleteCollection( ctx, v1.DeleteOptions{}, v1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", parentCNP, cnp.ObjectMeta.UID)}) if err != nil { return err } DeleteDerivativeFromCache(cnp) return nil } // DeleteDerivativeCCNP if the given policy has a derivative constraint, the // given CCNP will be deleted from store and the cache. func DeleteDerivativeCCNP(ctx context.Context, ccnp *cilium_v2.CiliumNetworkPolicy) error { scopedLog := log.WithFields(logrus.Fields{ logfields.CiliumClusterwideNetworkPolicyName: ccnp.ObjectMeta.Name, }) if !ccnp.RequiresDerivative() { scopedLog.Debug("CCNP does not have derivative policies, skipped") return nil } err := k8s.CiliumClient().CiliumV2().CiliumClusterwideNetworkPolicies().DeleteCollection( ctx, v1.DeleteOptions{}, v1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", parentCNP, ccnp.ObjectMeta.UID)}) if err != nil { return err } DeleteDerivativeFromCache(ccnp) return nil } func addDerivativePolicy(ctx context.Context, cnp *cilium_v2.CiliumNetworkPolicy, clusterScoped bool) error { var ( scopedLog *logrus.Entry derivativePolicy v1.Object derivativeCNP *cilium_v2.CiliumNetworkPolicy derivativeCCNP *cilium_v2.CiliumClusterwideNetworkPolicy derivativeErr, err error ) if clusterScoped { scopedLog = log.WithFields(logrus.Fields{ logfields.CiliumClusterwideNetworkPolicyName: cnp.ObjectMeta.Name, }) } else { scopedLog = log.WithFields(logrus.Fields{ logfields.CiliumNetworkPolicyName: cnp.ObjectMeta.Name, logfields.K8sNamespace: cnp.ObjectMeta.Namespace, }) } // The maxNumberOfAttempts is to not hit the limits of cloud providers API. // Also, the derivativeErr is never returned, if not the controller will // hit this function and the cloud providers limit will be raised. This // will cause a disaster, due all other policies will hit the limit as // well. // If the createDerivativeCNP() fails, a new all block rule will be inserted and // the derivative status in the parent policy will be updated with the // error. for numAttempts := 0; numAttempts <= maxNumberOfAttempts; numAttempts++ { if clusterScoped { derivativeCCNP, derivativeErr = createDerivativeCCNP(ctx, cnp) derivativePolicy = derivativeCCNP } else { derivativeCNP, derivativeErr = createDerivativeCNP(ctx, cnp) derivativePolicy = derivativeCNP } if derivativeErr == nil { break } metrics.PolicyImportErrorsTotal.Inc() scopedLog.WithError(derivativeErr).Error("Cannot create derivative rule. Installing deny-all rule.") statusErr := updateDerivativeStatus(cnp, derivativePolicy.GetName(), derivativeErr, clusterScoped) if statusErr != nil { scopedLog.WithError(statusErr).Error("Cannot update status for derivative policy") } time.Sleep(sleepDuration) } groupsCNPCache.UpdateCNP(cnp) if clusterScoped { _, err = updateOrCreateCCNP(derivativeCCNP) } else { _, err = updateOrCreateCNP(derivativeCNP) } if err != nil { statusErr := updateDerivativeStatus(cnp, derivativePolicy.GetName(), err, clusterScoped) if statusErr != nil { metrics.PolicyImportErrorsTotal.Inc() scopedLog.WithError(err).Error("Cannot update status for derivative policy") } return statusErr } err = updateDerivativeStatus(cnp, derivativePolicy.GetName(), nil, clusterScoped) if err != nil { scopedLog.WithError(err).Error("Cannot update status for derivative policy") } return err } policy/groups: replace retry from addDerivativePolicy with controller retry Fixes: task-2 of #13083 Signed-off-by: Pratyush Singhal <a9c8b93e7a34b1575d30754466420227262f1b06@gmail.com> // Copyright 2018-2020 Authors of Cilium // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package groups import ( "context" "fmt" "github.com/cilium/cilium/pkg/controller" "github.com/cilium/cilium/pkg/k8s" cilium_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" "github.com/cilium/cilium/pkg/logging/logfields" "github.com/cilium/cilium/pkg/metrics" "github.com/sirupsen/logrus" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var ( controllerManager = controller.NewManager() ) // AddDerivativeCNPIfNeeded will create a new CNP if the given CNP has any rules // that need to create a new derivative policy. // It returns a boolean, true in case that all actions are correct, false if // something fails. func AddDerivativeCNPIfNeeded(cnp *cilium_v2.CiliumNetworkPolicy) bool { if !cnp.RequiresDerivative() { log.WithFields(logrus.Fields{ logfields.CiliumNetworkPolicyName: cnp.ObjectMeta.Name, logfields.K8sNamespace: cnp.ObjectMeta.Namespace, }).Debug("CNP does not have derivative policies, skipped") return true } controllerManager.UpdateController(fmt.Sprintf("add-derivative-cnp-%s", cnp.ObjectMeta.Name), controller.ControllerParams{ DoFunc: func(ctx context.Context) error { return addDerivativePolicy(ctx, cnp, false) }, }) return true } // AddDerivativeCCNPIfNeeded will create a new CCNP if the given NetworkPolicy has any rules // that need to create a new derivative policy. // It returns a boolean, true in case that all actions are correct, false if // something fails. func AddDerivativeCCNPIfNeeded(cnp *cilium_v2.CiliumNetworkPolicy) bool { if !cnp.RequiresDerivative() { log.WithFields(logrus.Fields{ logfields.CiliumClusterwideNetworkPolicyName: cnp.ObjectMeta.Name, }).Debug("CCNP does not have derivative policies, skipped") return true } controllerManager.UpdateController(fmt.Sprintf("add-derivative-ccnp-%s", cnp.ObjectMeta.Name), controller.ControllerParams{ DoFunc: func(ctx context.Context) error { return addDerivativePolicy(ctx, cnp, true) }, }) return true } // UpdateDerivativeCNPIfNeeded updates or creates a CNP if the given CNP has // any rule that needs to create a new derivative policy(eg: ToGroups). In case // that the new CNP does not have any derivative policy and the old one had // one, it will delete the old policy. // The function returns true if an update is required for the derivative policy // and false otherwise. func UpdateDerivativeCNPIfNeeded(newCNP *cilium_v2.CiliumNetworkPolicy, oldCNP *cilium_v2.CiliumNetworkPolicy) bool { if !newCNP.RequiresDerivative() && oldCNP.RequiresDerivative() { log.WithFields(logrus.Fields{ logfields.CiliumNetworkPolicyName: newCNP.ObjectMeta.Name, logfields.K8sNamespace: newCNP.ObjectMeta.Namespace, }).Info("New CNP does not have derivative policy, but old had. Deleting old policies") controllerManager.UpdateController(fmt.Sprintf("delete-derivative-cnp-%s", oldCNP.ObjectMeta.Name), controller.ControllerParams{ DoFunc: func(ctx context.Context) error { return DeleteDerivativeCNP(ctx, oldCNP) }, }) return false } if !newCNP.RequiresDerivative() { return false } controllerManager.UpdateController(fmt.Sprintf("update-derivative-cnp-%s", newCNP.ObjectMeta.Name), controller.ControllerParams{ DoFunc: func(ctx context.Context) error { return addDerivativePolicy(ctx, newCNP, false) }, }) return true } // UpdateDerivativeCCNPIfNeeded updates or creates a CCNP if the given CCNP has // any rule that needs to create a new derivative policy(eg: ToGroups). In case // that the new CCNP does not have any derivative policy and the old one had // one, it will delete the old policy. // The function returns true if an update is required for the derivative policy // and false otherwise. func UpdateDerivativeCCNPIfNeeded(newCCNP *cilium_v2.CiliumNetworkPolicy, oldCCNP *cilium_v2.CiliumNetworkPolicy) bool { if !newCCNP.RequiresDerivative() && oldCCNP.RequiresDerivative() { log.WithFields(logrus.Fields{ logfields.CiliumClusterwideNetworkPolicyName: newCCNP.ObjectMeta.Name, }).Info("New CCNP does not have derivative policy, but old had. Deleting old policies") controllerManager.UpdateController(fmt.Sprintf("delete-derivative-ccnp-%s", oldCCNP.ObjectMeta.Name), controller.ControllerParams{ DoFunc: func(ctx context.Context) error { return DeleteDerivativeCCNP(ctx, oldCCNP) }, }) return false } if !newCCNP.RequiresDerivative() { return false } controllerManager.UpdateController(fmt.Sprintf("update-derivative-ccnp-%s", newCCNP.ObjectMeta.Name), controller.ControllerParams{ DoFunc: func(ctx context.Context) error { return addDerivativePolicy(ctx, newCCNP, true) }, }) return true } // DeleteDerivativeFromCache deletes the given CNP from the groupsCNPCache to // no continue pooling new data. func DeleteDerivativeFromCache(cnp *cilium_v2.CiliumNetworkPolicy) { groupsCNPCache.DeleteCNP(cnp) } // DeleteDerivativeCNP if the given policy has a derivative constraint,the // given CNP will be deleted from store and the cache. func DeleteDerivativeCNP(ctx context.Context, cnp *cilium_v2.CiliumNetworkPolicy) error { scopedLog := log.WithFields(logrus.Fields{ logfields.CiliumNetworkPolicyName: cnp.ObjectMeta.Name, logfields.K8sNamespace: cnp.ObjectMeta.Namespace, }) if !cnp.RequiresDerivative() { scopedLog.Debug("CNP does not have derivative policies, skipped") return nil } err := k8s.CiliumClient().CiliumV2().CiliumNetworkPolicies(cnp.ObjectMeta.Namespace).DeleteCollection( ctx, v1.DeleteOptions{}, v1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", parentCNP, cnp.ObjectMeta.UID)}) if err != nil { return err } DeleteDerivativeFromCache(cnp) return nil } // DeleteDerivativeCCNP if the given policy has a derivative constraint, the // given CCNP will be deleted from store and the cache. func DeleteDerivativeCCNP(ctx context.Context, ccnp *cilium_v2.CiliumNetworkPolicy) error { scopedLog := log.WithFields(logrus.Fields{ logfields.CiliumClusterwideNetworkPolicyName: ccnp.ObjectMeta.Name, }) if !ccnp.RequiresDerivative() { scopedLog.Debug("CCNP does not have derivative policies, skipped") return nil } err := k8s.CiliumClient().CiliumV2().CiliumClusterwideNetworkPolicies().DeleteCollection( ctx, v1.DeleteOptions{}, v1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", parentCNP, ccnp.ObjectMeta.UID)}) if err != nil { return err } DeleteDerivativeFromCache(ccnp) return nil } func addDerivativePolicy(ctx context.Context, cnp *cilium_v2.CiliumNetworkPolicy, clusterScoped bool) error { var ( scopedLog *logrus.Entry derivativePolicy v1.Object derivativeCNP *cilium_v2.CiliumNetworkPolicy derivativeCCNP *cilium_v2.CiliumClusterwideNetworkPolicy derivativeErr, err error ) if clusterScoped { scopedLog = log.WithFields(logrus.Fields{ logfields.CiliumClusterwideNetworkPolicyName: cnp.ObjectMeta.Name, }) } else { scopedLog = log.WithFields(logrus.Fields{ logfields.CiliumNetworkPolicyName: cnp.ObjectMeta.Name, logfields.K8sNamespace: cnp.ObjectMeta.Namespace, }) } // If the createDerivativeCNP() fails, a new all block rule will be inserted and // the derivative status in the parent policy will be updated with the // error. if clusterScoped { derivativeCCNP, derivativeErr = createDerivativeCCNP(ctx, cnp) derivativePolicy = derivativeCCNP } else { derivativeCNP, derivativeErr = createDerivativeCNP(ctx, cnp) derivativePolicy = derivativeCNP } if derivativeErr != nil { metrics.PolicyImportErrorsTotal.Inc() scopedLog.WithError(derivativeErr).Error("Cannot create derivative rule. Installing deny-all rule.") statusErr := updateDerivativeStatus(cnp, derivativePolicy.GetName(), derivativeErr, clusterScoped) if statusErr != nil { scopedLog.WithError(statusErr).Error("Cannot update status for derivative policy") } return derivativeErr } groupsCNPCache.UpdateCNP(cnp) if clusterScoped { _, err = updateOrCreateCCNP(derivativeCCNP) } else { _, err = updateOrCreateCNP(derivativeCNP) } if err != nil { statusErr := updateDerivativeStatus(cnp, derivativePolicy.GetName(), err, clusterScoped) if statusErr != nil { metrics.PolicyImportErrorsTotal.Inc() scopedLog.WithError(err).Error("Cannot update status for derivative policy") } return statusErr } err = updateDerivativeStatus(cnp, derivativePolicy.GetName(), nil, clusterScoped) if err != nil { scopedLog.WithError(err).Error("Cannot update status for derivative policy") } return err }
package ginpprof import ( "net/http/pprof" "strings" "github.com/gin-gonic/gin" ) // Note::This source code is from https://github.com/DeanThompson/ginpprof/blob/master/pprof.go // This source is not used anymore. // Wrap adds several routes from package `net/http/pprof` to *gin.Engine object func Wrap(router *gin.Engine) { WrapGroup(&router.RouterGroup) } // Wrapper make sure we are backward compatible var Wrapper = Wrap // WrapGroup adds several routes from package `net/http/pprof` to *gin.RouterGroup object func WrapGroup(router *gin.RouterGroup) { routers := []struct { Method string Path string Handler gin.HandlerFunc }{ {"GET", "/debug/pprof/", IndexHandler()}, {"GET", "/debug/pprof/heap", HeapHandler()}, {"GET", "/debug/pprof/goroutine", GoroutineHandler()}, {"GET", "/debug/pprof/block", BlockHandler()}, {"GET", "/debug/pprof/threadcreate", ThreadCreateHandler()}, {"GET", "/debug/pprof/cmdline", CmdlineHandler()}, {"GET", "/debug/pprof/profile", ProfileHandler()}, {"GET", "/debug/pprof/symbol", SymbolHandler()}, {"POST", "/debug/pprof/symbol", SymbolHandler()}, {"GET", "/debug/pprof/trace", TraceHandler()}, {"GET", "/debug/pprof/mutex", MutexHandler()}, } basePath := strings.TrimSuffix(router.BasePath(), "/") var prefix string switch { case basePath == "": prefix = "" case strings.HasSuffix(basePath, "/debug"): prefix = "/debug" case strings.HasSuffix(basePath, "/debug/pprof"): prefix = "/debug/pprof" } for _, r := range routers { router.Handle(r.Method, strings.TrimPrefix(r.Path, prefix), r.Handler) } } // IndexHandler will pass the call from /debug/pprof to pprof func IndexHandler() gin.HandlerFunc { return func(ctx *gin.Context) { pprof.Index(ctx.Writer, ctx.Request) } } // HeapHandler will pass the call from /debug/pprof/heap to pprof func HeapHandler() gin.HandlerFunc { return func(ctx *gin.Context) { pprof.Handler("heap").ServeHTTP(ctx.Writer, ctx.Request) } } // GoroutineHandler will pass the call from /debug/pprof/goroutine to pprof func GoroutineHandler() gin.HandlerFunc { return func(ctx *gin.Context) { pprof.Handler("goroutine").ServeHTTP(ctx.Writer, ctx.Request) } } // BlockHandler will pass the call from /debug/pprof/block to pprof func BlockHandler() gin.HandlerFunc { return func(ctx *gin.Context) { pprof.Handler("block").ServeHTTP(ctx.Writer, ctx.Request) } } // ThreadCreateHandler will pass the call from /debug/pprof/threadcreate to pprof func ThreadCreateHandler() gin.HandlerFunc { return func(ctx *gin.Context) { pprof.Handler("threadcreate").ServeHTTP(ctx.Writer, ctx.Request) } } // CmdlineHandler will pass the call from /debug/pprof/cmdline to pprof func CmdlineHandler() gin.HandlerFunc { return func(ctx *gin.Context) { pprof.Cmdline(ctx.Writer, ctx.Request) } } // ProfileHandler will pass the call from /debug/pprof/profile to pprof func ProfileHandler() gin.HandlerFunc { return func(ctx *gin.Context) { pprof.Profile(ctx.Writer, ctx.Request) } } // SymbolHandler will pass the call from /debug/pprof/symbol to pprof func SymbolHandler() gin.HandlerFunc { return func(ctx *gin.Context) { pprof.Symbol(ctx.Writer, ctx.Request) } } // TraceHandler will pass the call from /debug/pprof/trace to pprof func TraceHandler() gin.HandlerFunc { return func(ctx *gin.Context) { pprof.Trace(ctx.Writer, ctx.Request) } } // MutexHandler will pass the call from /debug/pprof/mutex to pprof func MutexHandler() gin.HandlerFunc { return func(ctx *gin.Context) { pprof.Handler("mutex").ServeHTTP(ctx.Writer, ctx.Request) } } delete unused file
/* Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package tiller import ( "bytes" "errors" "fmt" "log" "path" "regexp" "strings" "github.com/technosophos/moniker" ctx "golang.org/x/net/context" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/typed/discovery" "k8s.io/helm/pkg/chartutil" "k8s.io/helm/pkg/kube" "k8s.io/helm/pkg/proto/hapi/chart" "k8s.io/helm/pkg/proto/hapi/release" "k8s.io/helm/pkg/proto/hapi/services" relutil "k8s.io/helm/pkg/releaseutil" "k8s.io/helm/pkg/storage/driver" "k8s.io/helm/pkg/tiller/environment" "k8s.io/helm/pkg/timeconv" "k8s.io/helm/pkg/version" ) // releaseNameMaxLen is the maximum length of a release name. // // As of Kubernetes 1.4, the max limit on a name is 63 chars. We reserve 10 for // charts to add data. Effectively, that gives us 53 chars. // See https://github.com/kubernetes/helm/issues/1528 const releaseNameMaxLen = 53 // NOTESFILE_SUFFIX that we want to treat special. It goes through the templating engine // but it's not a yaml file (resource) hence can't have hooks, etc. And the user actually // wants to see this file after rendering in the status command. However, it must be a suffix // since there can be filepath in front of it. const notesFileSuffix = "NOTES.txt" var ( // errMissingChart indicates that a chart was not provided. errMissingChart = errors.New("no chart provided") // errMissingRelease indicates that a release (name) was not provided. errMissingRelease = errors.New("no release provided") // errInvalidRevision indicates that an invalid release revision number was provided. errInvalidRevision = errors.New("invalid release revision") ) // ListDefaultLimit is the default limit for number of items returned in a list. var ListDefaultLimit int64 = 512 // ValidName is a regular expression for names. // // According to the Kubernetes help text, the regular expression it uses is: // // (([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])? // // We modified that. First, we added start and end delimiters. Second, we changed // the final ? to + to require that the pattern match at least once. This modification // prevents an empty string from matching. var ValidName = regexp.MustCompile("^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])+$") // ReleaseServer implements the server-side gRPC endpoint for the HAPI services. type ReleaseServer struct { env *environment.Environment clientset internalclientset.Interface } // NewReleaseServer creates a new release server. func NewReleaseServer(env *environment.Environment, clientset internalclientset.Interface) *ReleaseServer { return &ReleaseServer{ env: env, clientset: clientset, } } // ListReleases lists the releases found by the server. func (s *ReleaseServer) ListReleases(req *services.ListReleasesRequest, stream services.ReleaseService_ListReleasesServer) error { if len(req.StatusCodes) == 0 { req.StatusCodes = []release.Status_Code{release.Status_DEPLOYED} } //rels, err := s.env.Releases.ListDeployed() rels, err := s.env.Releases.ListFilterAll(func(r *release.Release) bool { for _, sc := range req.StatusCodes { if sc == r.Info.Status.Code { return true } } return false }) if err != nil { return err } if len(req.Filter) != 0 { rels, err = filterReleases(req.Filter, rels) if err != nil { return err } } total := int64(len(rels)) switch req.SortBy { case services.ListSort_NAME: relutil.SortByName(rels) case services.ListSort_LAST_RELEASED: relutil.SortByDate(rels) } if req.SortOrder == services.ListSort_DESC { ll := len(rels) rr := make([]*release.Release, ll) for i, item := range rels { rr[ll-i-1] = item } rels = rr } l := int64(len(rels)) if req.Offset != "" { i := -1 for ii, cur := range rels { if cur.Name == req.Offset { i = ii } } if i == -1 { return fmt.Errorf("offset %q not found", req.Offset) } if len(rels) < i { return fmt.Errorf("no items after %q", req.Offset) } rels = rels[i:] l = int64(len(rels)) } if req.Limit == 0 { req.Limit = ListDefaultLimit } next := "" if l > req.Limit { next = rels[req.Limit].Name rels = rels[0:req.Limit] l = int64(len(rels)) } res := &services.ListReleasesResponse{ Next: next, Count: l, Total: total, Releases: rels, } return stream.Send(res) } func filterReleases(filter string, rels []*release.Release) ([]*release.Release, error) { preg, err := regexp.Compile(filter) if err != nil { return rels, err } matches := []*release.Release{} for _, r := range rels { if preg.MatchString(r.Name) { matches = append(matches, r) } } return matches, nil } // GetVersion sends the server version. func (s *ReleaseServer) GetVersion(c ctx.Context, req *services.GetVersionRequest) (*services.GetVersionResponse, error) { v := version.GetVersionProto() return &services.GetVersionResponse{Version: v}, nil } // GetReleaseStatus gets the status information for a named release. func (s *ReleaseServer) GetReleaseStatus(c ctx.Context, req *services.GetReleaseStatusRequest) (*services.GetReleaseStatusResponse, error) { if !ValidName.MatchString(req.Name) { return nil, errMissingRelease } var rel *release.Release if req.Version <= 0 { var err error rel, err = s.env.Releases.Last(req.Name) if err != nil { return nil, fmt.Errorf("getting deployed release %q: %s", req.Name, err) } } else { var err error if rel, err = s.env.Releases.Get(req.Name, req.Version); err != nil { return nil, fmt.Errorf("getting release '%s' (v%d): %s", req.Name, req.Version, err) } } if rel.Info == nil { return nil, errors.New("release info is missing") } if rel.Chart == nil { return nil, errors.New("release chart is missing") } sc := rel.Info.Status.Code statusResp := &services.GetReleaseStatusResponse{ Name: rel.Name, Namespace: rel.Namespace, Info: rel.Info, } // Ok, we got the status of the release as we had jotted down, now we need to match the // manifest we stashed away with reality from the cluster. kubeCli := s.env.KubeClient resp, err := kubeCli.Get(rel.Namespace, bytes.NewBufferString(rel.Manifest)) if sc == release.Status_DELETED || sc == release.Status_FAILED { // Skip errors if this is already deleted or failed. return statusResp, nil } else if err != nil { log.Printf("warning: Get for %s failed: %v", rel.Name, err) return nil, err } rel.Info.Status.Resources = resp return statusResp, nil } // GetReleaseContent gets all of the stored information for the given release. func (s *ReleaseServer) GetReleaseContent(c ctx.Context, req *services.GetReleaseContentRequest) (*services.GetReleaseContentResponse, error) { if !ValidName.MatchString(req.Name) { return nil, errMissingRelease } if req.Version <= 0 { rel, err := s.env.Releases.Deployed(req.Name) return &services.GetReleaseContentResponse{Release: rel}, err } rel, err := s.env.Releases.Get(req.Name, req.Version) return &services.GetReleaseContentResponse{Release: rel}, err } // UpdateRelease takes an existing release and new information, and upgrades the release. func (s *ReleaseServer) UpdateRelease(c ctx.Context, req *services.UpdateReleaseRequest) (*services.UpdateReleaseResponse, error) { currentRelease, updatedRelease, err := s.prepareUpdate(req) if err != nil { return nil, err } res, err := s.performUpdate(currentRelease, updatedRelease, req) if err != nil { return res, err } if !req.DryRun { if err := s.env.Releases.Create(updatedRelease); err != nil { return res, err } } return res, nil } func (s *ReleaseServer) performUpdate(originalRelease, updatedRelease *release.Release, req *services.UpdateReleaseRequest) (*services.UpdateReleaseResponse, error) { res := &services.UpdateReleaseResponse{Release: updatedRelease} if req.DryRun { log.Printf("Dry run for %s", updatedRelease.Name) return res, nil } // pre-upgrade hooks if !req.DisableHooks { if err := s.execHook(updatedRelease.Hooks, updatedRelease.Name, updatedRelease.Namespace, preUpgrade, req.Timeout); err != nil { return res, err } } if err := s.performKubeUpdate(originalRelease, updatedRelease, req.Recreate); err != nil { log.Printf("warning: Release Upgrade %q failed: %s", updatedRelease.Name, err) originalRelease.Info.Status.Code = release.Status_SUPERSEDED updatedRelease.Info.Status.Code = release.Status_FAILED s.recordRelease(originalRelease, true) s.recordRelease(updatedRelease, false) return res, err } // post-upgrade hooks if !req.DisableHooks { if err := s.execHook(updatedRelease.Hooks, updatedRelease.Name, updatedRelease.Namespace, postUpgrade, req.Timeout); err != nil { return res, err } } originalRelease.Info.Status.Code = release.Status_SUPERSEDED s.recordRelease(originalRelease, true) updatedRelease.Info.Status.Code = release.Status_DEPLOYED return res, nil } // reuseValues copies values from the current release to a new release if the new release does not have any values. // // If the request already has values, or if there are no values in the current release, this does nothing. func (s *ReleaseServer) reuseValues(req *services.UpdateReleaseRequest, current *release.Release) { if (req.Values == nil || req.Values.Raw == "" || req.Values.Raw == "{}\n") && current.Config != nil && current.Config.Raw != "" && current.Config.Raw != "{}\n" { log.Printf("Copying values from %s (v%d) to new release.", current.Name, current.Version) req.Values = current.Config } } // prepareUpdate builds an updated release for an update operation. func (s *ReleaseServer) prepareUpdate(req *services.UpdateReleaseRequest) (*release.Release, *release.Release, error) { if !ValidName.MatchString(req.Name) { return nil, nil, errMissingRelease } if req.Chart == nil { return nil, nil, errMissingChart } // finds the non-deleted release with the given name currentRelease, err := s.env.Releases.Last(req.Name) if err != nil { return nil, nil, err } // If new values were not supplied in the upgrade, re-use the existing values. s.reuseValues(req, currentRelease) // Increment revision count. This is passed to templates, and also stored on // the release object. revision := currentRelease.Version + 1 ts := timeconv.Now() options := chartutil.ReleaseOptions{ Name: req.Name, Time: ts, Namespace: currentRelease.Namespace, IsUpgrade: true, Revision: int(revision), } caps, err := capabilities(s.clientset.Discovery()) if err != nil { return nil, nil, err } valuesToRender, err := chartutil.ToRenderValuesCaps(req.Chart, req.Values, options, caps) if err != nil { return nil, nil, err } hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, caps.APIVersions) if err != nil { return nil, nil, err } // Store an updated release. updatedRelease := &release.Release{ Name: req.Name, Namespace: currentRelease.Namespace, Chart: req.Chart, Config: req.Values, Info: &release.Info{ FirstDeployed: currentRelease.Info.FirstDeployed, LastDeployed: ts, Status: &release.Status{Code: release.Status_UNKNOWN}, }, Version: revision, Manifest: manifestDoc.String(), Hooks: hooks, } if len(notesTxt) > 0 { updatedRelease.Info.Status.Notes = notesTxt } err = validateManifest(s.env.KubeClient, currentRelease.Namespace, manifestDoc.Bytes()) return currentRelease, updatedRelease, err } // RollbackRelease rolls back to a previous version of the given release. func (s *ReleaseServer) RollbackRelease(c ctx.Context, req *services.RollbackReleaseRequest) (*services.RollbackReleaseResponse, error) { currentRelease, targetRelease, err := s.prepareRollback(req) if err != nil { return nil, err } res, err := s.performRollback(currentRelease, targetRelease, req) if err != nil { return res, err } if !req.DryRun { if err := s.env.Releases.Create(targetRelease); err != nil { return res, err } } return res, nil } func (s *ReleaseServer) performRollback(currentRelease, targetRelease *release.Release, req *services.RollbackReleaseRequest) (*services.RollbackReleaseResponse, error) { res := &services.RollbackReleaseResponse{Release: targetRelease} if req.DryRun { log.Printf("Dry run for %s", targetRelease.Name) return res, nil } // pre-rollback hooks if !req.DisableHooks { if err := s.execHook(targetRelease.Hooks, targetRelease.Name, targetRelease.Namespace, preRollback, req.Timeout); err != nil { return res, err } } if err := s.performKubeUpdate(currentRelease, targetRelease, req.Recreate); err != nil { log.Printf("warning: Release Rollback %q failed: %s", targetRelease.Name, err) currentRelease.Info.Status.Code = release.Status_SUPERSEDED targetRelease.Info.Status.Code = release.Status_FAILED s.recordRelease(currentRelease, true) s.recordRelease(targetRelease, false) return res, err } // post-rollback hooks if !req.DisableHooks { if err := s.execHook(targetRelease.Hooks, targetRelease.Name, targetRelease.Namespace, postRollback, req.Timeout); err != nil { return res, err } } currentRelease.Info.Status.Code = release.Status_SUPERSEDED s.recordRelease(currentRelease, true) targetRelease.Info.Status.Code = release.Status_DEPLOYED return res, nil } func (s *ReleaseServer) performKubeUpdate(currentRelease, targetRelease *release.Release, recreate bool) error { kubeCli := s.env.KubeClient current := bytes.NewBufferString(currentRelease.Manifest) target := bytes.NewBufferString(targetRelease.Manifest) return kubeCli.Update(targetRelease.Namespace, current, target, recreate) } // prepareRollback finds the previous release and prepares a new release object with // the previous release's configuration func (s *ReleaseServer) prepareRollback(req *services.RollbackReleaseRequest) (*release.Release, *release.Release, error) { switch { case !ValidName.MatchString(req.Name): return nil, nil, errMissingRelease case req.Version < 0: return nil, nil, errInvalidRevision } crls, err := s.env.Releases.Last(req.Name) if err != nil { return nil, nil, err } rbv := req.Version if req.Version == 0 { rbv = crls.Version - 1 } log.Printf("rolling back %s (current: v%d, target: v%d)", req.Name, crls.Version, rbv) prls, err := s.env.Releases.Get(req.Name, rbv) if err != nil { return nil, nil, err } // Store a new release object with previous release's configuration target := &release.Release{ Name: req.Name, Namespace: crls.Namespace, Chart: prls.Chart, Config: prls.Config, Info: &release.Info{ FirstDeployed: crls.Info.FirstDeployed, LastDeployed: timeconv.Now(), Status: &release.Status{ Code: release.Status_UNKNOWN, Notes: prls.Info.Status.Notes, }, }, Version: crls.Version + 1, Manifest: prls.Manifest, Hooks: prls.Hooks, } return crls, target, nil } func (s *ReleaseServer) uniqName(start string, reuse bool) (string, error) { // If a name is supplied, we check to see if that name is taken. If not, it // is granted. If reuse is true and a deleted release with that name exists, // we re-grant it. Otherwise, an error is returned. if start != "" { if len(start) > releaseNameMaxLen { return "", fmt.Errorf("release name %q exceeds max length of %d", start, releaseNameMaxLen) } h, err := s.env.Releases.History(start) if err != nil || len(h) < 1 { return start, nil } relutil.Reverse(h, relutil.SortByRevision) rel := h[0] if st := rel.Info.Status.Code; reuse && (st == release.Status_DELETED || st == release.Status_FAILED) { // Allowe re-use of names if the previous release is marked deleted. log.Printf("reusing name %q", start) return start, nil } else if reuse { return "", errors.New("cannot re-use a name that is still in use") } return "", fmt.Errorf("a release named %q already exists", start) } maxTries := 5 for i := 0; i < maxTries; i++ { namer := moniker.New() name := namer.NameSep("-") if len(name) > releaseNameMaxLen { name = name[:releaseNameMaxLen] } if _, err := s.env.Releases.Get(name, 1); err == driver.ErrReleaseNotFound { return name, nil } log.Printf("info: Name %q is taken. Searching again.", name) } log.Printf("warning: No available release names found after %d tries", maxTries) return "ERROR", errors.New("no available release name found") } func (s *ReleaseServer) engine(ch *chart.Chart) environment.Engine { renderer := s.env.EngineYard.Default() if ch.Metadata.Engine != "" { if r, ok := s.env.EngineYard.Get(ch.Metadata.Engine); ok { renderer = r } else { log.Printf("warning: %s requested non-existent template engine %s", ch.Metadata.Name, ch.Metadata.Engine) } } return renderer } // InstallRelease installs a release and stores the release record. func (s *ReleaseServer) InstallRelease(c ctx.Context, req *services.InstallReleaseRequest) (*services.InstallReleaseResponse, error) { rel, err := s.prepareRelease(req) if err != nil { log.Printf("Failed install prepare step: %s", err) res := &services.InstallReleaseResponse{Release: rel} // On dry run, append the manifest contents to a failed release. This is // a stop-gap until we can revisit an error backchannel post-2.0. if req.DryRun && strings.HasPrefix(err.Error(), "YAML parse error") { err = fmt.Errorf("%s\n%s", err, rel.Manifest) } return res, err } res, err := s.performRelease(rel, req) if err != nil { log.Printf("Failed install perform step: %s", err) } return res, err } // capabilities builds a Capabilities from discovery information. func capabilities(disc discovery.DiscoveryInterface) (*chartutil.Capabilities, error) { sv, err := disc.ServerVersion() if err != nil { return nil, err } vs, err := getVersionSet(disc) if err != nil { return nil, fmt.Errorf("Could not get apiVersions from Kubernetes: %s", err) } return &chartutil.Capabilities{ APIVersions: vs, KubeVersion: sv, TillerVersion: version.GetVersionProto(), }, nil } // prepareRelease builds a release for an install operation. func (s *ReleaseServer) prepareRelease(req *services.InstallReleaseRequest) (*release.Release, error) { if req.Chart == nil { return nil, errMissingChart } name, err := s.uniqName(req.Name, req.ReuseName) if err != nil { return nil, err } caps, err := capabilities(s.clientset.Discovery()) if err != nil { return nil, err } revision := 1 ts := timeconv.Now() options := chartutil.ReleaseOptions{ Name: name, Time: ts, Namespace: req.Namespace, Revision: revision, IsInstall: true, } valuesToRender, err := chartutil.ToRenderValuesCaps(req.Chart, req.Values, options, caps) if err != nil { return nil, err } hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, caps.APIVersions) if err != nil { // Return a release with partial data so that client can show debugging // information. rel := &release.Release{ Name: name, Namespace: req.Namespace, Chart: req.Chart, Config: req.Values, Info: &release.Info{ FirstDeployed: ts, LastDeployed: ts, Status: &release.Status{Code: release.Status_UNKNOWN}, }, Version: 0, } if manifestDoc != nil { rel.Manifest = manifestDoc.String() } return rel, err } // Store a release. rel := &release.Release{ Name: name, Namespace: req.Namespace, Chart: req.Chart, Config: req.Values, Info: &release.Info{ FirstDeployed: ts, LastDeployed: ts, Status: &release.Status{Code: release.Status_UNKNOWN}, }, Manifest: manifestDoc.String(), Hooks: hooks, Version: int32(revision), } if len(notesTxt) > 0 { rel.Info.Status.Notes = notesTxt } err = validateManifest(s.env.KubeClient, req.Namespace, manifestDoc.Bytes()) return rel, err } func getVersionSet(client discovery.ServerGroupsInterface) (chartutil.VersionSet, error) { groups, err := client.ServerGroups() if err != nil { return chartutil.DefaultVersionSet, err } // FIXME: The Kubernetes test fixture for cli appears to always return nil // for calls to Discovery().ServerGroups(). So in this case, we return // the default API list. This is also a safe value to return in any other // odd-ball case. if groups == nil { return chartutil.DefaultVersionSet, nil } versions := unversioned.ExtractGroupVersions(groups) return chartutil.NewVersionSet(versions...), nil } func (s *ReleaseServer) renderResources(ch *chart.Chart, values chartutil.Values, vs chartutil.VersionSet) ([]*release.Hook, *bytes.Buffer, string, error) { renderer := s.engine(ch) files, err := renderer.Render(ch, values) if err != nil { return nil, nil, "", err } // NOTES.txt gets rendered like all the other files, but because it's not a hook nor a resource, // pull it out of here into a separate file so that we can actually use the output of the rendered // text file. We have to spin through this map because the file contains path information, so we // look for terminating NOTES.txt. We also remove it from the files so that we don't have to skip // it in the sortHooks. notes := "" for k, v := range files { if strings.HasSuffix(k, notesFileSuffix) { // Only apply the notes if it belongs to the parent chart // Note: Do not use filePath.Join since it creates a path with \ which is not expected if k == path.Join(ch.Metadata.Name, "templates", notesFileSuffix) { notes = v } delete(files, k) } } // Sort hooks, manifests, and partials. Only hooks and manifests are returned, // as partials are not used after renderer.Render. Empty manifests are also // removed here. hooks, manifests, err := sortManifests(files, vs, InstallOrder) if err != nil { // By catching parse errors here, we can prevent bogus releases from going // to Kubernetes. // // We return the files as a big blob of data to help the user debug parser // errors. b := bytes.NewBuffer(nil) for name, content := range files { if len(strings.TrimSpace(content)) == 0 { continue } b.WriteString("\n---\n# Source: " + name + "\n") b.WriteString(content) } return nil, b, "", err } // Aggregate all valid manifests into one big doc. b := bytes.NewBuffer(nil) for _, m := range manifests { b.WriteString("\n---\n# Source: " + m.name + "\n") b.WriteString(m.content) } return hooks, b, notes, nil } func (s *ReleaseServer) recordRelease(r *release.Release, reuse bool) { if reuse { if err := s.env.Releases.Update(r); err != nil { log.Printf("warning: Failed to update release %q: %s", r.Name, err) } } else if err := s.env.Releases.Create(r); err != nil { log.Printf("warning: Failed to record release %q: %s", r.Name, err) } } // performRelease runs a release. func (s *ReleaseServer) performRelease(r *release.Release, req *services.InstallReleaseRequest) (*services.InstallReleaseResponse, error) { res := &services.InstallReleaseResponse{Release: r} if req.DryRun { log.Printf("Dry run for %s", r.Name) return res, nil } // pre-install hooks if !req.DisableHooks { if err := s.execHook(r.Hooks, r.Name, r.Namespace, preInstall, req.Timeout); err != nil { return res, err } } switch h, err := s.env.Releases.History(req.Name); { // if this is a replace operation, append to the release history case req.ReuseName && err == nil && len(h) >= 1: // get latest release revision relutil.Reverse(h, relutil.SortByRevision) // old release old := h[0] // update old release status old.Info.Status.Code = release.Status_SUPERSEDED s.recordRelease(old, true) // update new release with next revision number // so as to append to the old release's history r.Version = old.Version + 1 if err := s.performKubeUpdate(old, r, false); err != nil { log.Printf("warning: Release replace %q failed: %s", r.Name, err) old.Info.Status.Code = release.Status_SUPERSEDED r.Info.Status.Code = release.Status_FAILED s.recordRelease(old, true) s.recordRelease(r, false) return res, err } default: // nothing to replace, create as normal // regular manifests b := bytes.NewBufferString(r.Manifest) if err := s.env.KubeClient.Create(r.Namespace, b); err != nil { log.Printf("warning: Release %q failed: %s", r.Name, err) r.Info.Status.Code = release.Status_FAILED s.recordRelease(r, false) return res, fmt.Errorf("release %s failed: %s", r.Name, err) } } // post-install hooks if !req.DisableHooks { if err := s.execHook(r.Hooks, r.Name, r.Namespace, postInstall, req.Timeout); err != nil { log.Printf("warning: Release %q failed post-install: %s", r.Name, err) r.Info.Status.Code = release.Status_FAILED s.recordRelease(r, false) return res, err } } // This is a tricky case. The release has been created, but the result // cannot be recorded. The truest thing to tell the user is that the // release was created. However, the user will not be able to do anything // further with this release. // // One possible strategy would be to do a timed retry to see if we can get // this stored in the future. r.Info.Status.Code = release.Status_DEPLOYED s.recordRelease(r, false) return res, nil } func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook string, timeout int64) error { kubeCli := s.env.KubeClient code, ok := events[hook] if !ok { return fmt.Errorf("unknown hook %q", hook) } log.Printf("Executing %s hooks for %s", hook, name) for _, h := range hs { found := false for _, e := range h.Events { if e == code { found = true } } // If this doesn't implement the hook, skip it. if !found { continue } b := bytes.NewBufferString(h.Manifest) if err := kubeCli.Create(namespace, b); err != nil { log.Printf("warning: Release %q pre-install %s failed: %s", name, h.Path, err) return err } // No way to rewind a bytes.Buffer()? b.Reset() b.WriteString(h.Manifest) if err := kubeCli.WatchUntilReady(namespace, b, timeout); err != nil { log.Printf("warning: Release %q pre-install %s could not complete: %s", name, h.Path, err) return err } h.LastRun = timeconv.Now() } log.Printf("Hooks complete for %s %s", hook, name) return nil } func (s *ReleaseServer) purgeReleases(rels ...*release.Release) error { for _, rel := range rels { if _, err := s.env.Releases.Delete(rel.Name, rel.Version); err != nil { return err } } return nil } // UninstallRelease deletes all of the resources associated with this release, and marks the release DELETED. func (s *ReleaseServer) UninstallRelease(c ctx.Context, req *services.UninstallReleaseRequest) (*services.UninstallReleaseResponse, error) { if !ValidName.MatchString(req.Name) { log.Printf("uninstall: Release not found: %s", req.Name) return nil, errMissingRelease } rels, err := s.env.Releases.History(req.Name) if err != nil { log.Printf("uninstall: Release not loaded: %s", req.Name) return nil, err } if len(rels) < 1 { return nil, errMissingRelease } relutil.SortByRevision(rels) rel := rels[len(rels)-1] // TODO: Are there any cases where we want to force a delete even if it's // already marked deleted? if rel.Info.Status.Code == release.Status_DELETED { if req.Purge { if err := s.purgeReleases(rels...); err != nil { log.Printf("uninstall: Failed to purge the release: %s", err) return nil, err } return &services.UninstallReleaseResponse{Release: rel}, nil } return nil, fmt.Errorf("the release named %q is already deleted", req.Name) } log.Printf("uninstall: Deleting %s", req.Name) rel.Info.Status.Code = release.Status_DELETING rel.Info.Deleted = timeconv.Now() res := &services.UninstallReleaseResponse{Release: rel} if !req.DisableHooks { if err := s.execHook(rel.Hooks, rel.Name, rel.Namespace, preDelete, req.Timeout); err != nil { return res, err } } vs, err := getVersionSet(s.clientset.Discovery()) if err != nil { return nil, fmt.Errorf("Could not get apiVersions from Kubernetes: %s", err) } // From here on out, the release is currently considered to be in Status_DELETING // state. if err := s.env.Releases.Update(rel); err != nil { log.Printf("uninstall: Failed to store updated release: %s", err) } manifests := splitManifests(rel.Manifest) _, files, err := sortManifests(manifests, vs, UninstallOrder) if err != nil { // We could instead just delete everything in no particular order. // FIXME: One way to delete at this point would be to try a label-based // deletion. The problem with this is that we could get a false positive // and delete something that was not legitimately part of this release. return nil, fmt.Errorf("corrupted release record. You must manually delete the resources: %s", err) } filesToKeep, filesToDelete := filterManifestsToKeep(files) if len(filesToKeep) > 0 { res.Info = summarizeKeptManifests(filesToKeep) } // Collect the errors, and return them later. es := []string{} for _, file := range filesToDelete { b := bytes.NewBufferString(file.content) if err := s.env.KubeClient.Delete(rel.Namespace, b); err != nil { log.Printf("uninstall: Failed deletion of %q: %s", req.Name, err) if err == kube.ErrNoObjectsVisited { // Rewrite the message from "no objects visited" err = errors.New("object not found, skipping delete") } es = append(es, err.Error()) } } if !req.DisableHooks { if err := s.execHook(rel.Hooks, rel.Name, rel.Namespace, postDelete, req.Timeout); err != nil { es = append(es, err.Error()) } } rel.Info.Status.Code = release.Status_DELETED if req.Purge { err := s.purgeReleases(rels...) if err != nil { log.Printf("uninstall: Failed to purge the release: %s", err) } return res, err } if err := s.env.Releases.Update(rel); err != nil { log.Printf("uninstall: Failed to store updated release: %s", err) } var errs error if len(es) > 0 { errs = fmt.Errorf("deletion completed with %d error(s): %s", len(es), strings.Join(es, "; ")) } return res, errs } func splitManifests(bigfile string) map[string]string { // This is not the best way of doing things, but it's how k8s itself does it. // Basically, we're quickly splitting a stream of YAML documents into an // array of YAML docs. In the current implementation, the file name is just // a place holder, and doesn't have any further meaning. sep := "\n---\n" tpl := "manifest-%d" res := map[string]string{} tmp := strings.Split(bigfile, sep) for i, d := range tmp { res[fmt.Sprintf(tpl, i)] = d } return res } func validateManifest(c environment.KubeClient, ns string, manifest []byte) error { r := bytes.NewReader(manifest) _, err := c.Build(ns, r) return err } Logging real name of the failing hook /* Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package tiller import ( "bytes" "errors" "fmt" "log" "path" "regexp" "strings" "github.com/technosophos/moniker" ctx "golang.org/x/net/context" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/typed/discovery" "k8s.io/helm/pkg/chartutil" "k8s.io/helm/pkg/kube" "k8s.io/helm/pkg/proto/hapi/chart" "k8s.io/helm/pkg/proto/hapi/release" "k8s.io/helm/pkg/proto/hapi/services" relutil "k8s.io/helm/pkg/releaseutil" "k8s.io/helm/pkg/storage/driver" "k8s.io/helm/pkg/tiller/environment" "k8s.io/helm/pkg/timeconv" "k8s.io/helm/pkg/version" ) // releaseNameMaxLen is the maximum length of a release name. // // As of Kubernetes 1.4, the max limit on a name is 63 chars. We reserve 10 for // charts to add data. Effectively, that gives us 53 chars. // See https://github.com/kubernetes/helm/issues/1528 const releaseNameMaxLen = 53 // NOTESFILE_SUFFIX that we want to treat special. It goes through the templating engine // but it's not a yaml file (resource) hence can't have hooks, etc. And the user actually // wants to see this file after rendering in the status command. However, it must be a suffix // since there can be filepath in front of it. const notesFileSuffix = "NOTES.txt" var ( // errMissingChart indicates that a chart was not provided. errMissingChart = errors.New("no chart provided") // errMissingRelease indicates that a release (name) was not provided. errMissingRelease = errors.New("no release provided") // errInvalidRevision indicates that an invalid release revision number was provided. errInvalidRevision = errors.New("invalid release revision") ) // ListDefaultLimit is the default limit for number of items returned in a list. var ListDefaultLimit int64 = 512 // ValidName is a regular expression for names. // // According to the Kubernetes help text, the regular expression it uses is: // // (([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])? // // We modified that. First, we added start and end delimiters. Second, we changed // the final ? to + to require that the pattern match at least once. This modification // prevents an empty string from matching. var ValidName = regexp.MustCompile("^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])+$") // ReleaseServer implements the server-side gRPC endpoint for the HAPI services. type ReleaseServer struct { env *environment.Environment clientset internalclientset.Interface } // NewReleaseServer creates a new release server. func NewReleaseServer(env *environment.Environment, clientset internalclientset.Interface) *ReleaseServer { return &ReleaseServer{ env: env, clientset: clientset, } } // ListReleases lists the releases found by the server. func (s *ReleaseServer) ListReleases(req *services.ListReleasesRequest, stream services.ReleaseService_ListReleasesServer) error { if len(req.StatusCodes) == 0 { req.StatusCodes = []release.Status_Code{release.Status_DEPLOYED} } //rels, err := s.env.Releases.ListDeployed() rels, err := s.env.Releases.ListFilterAll(func(r *release.Release) bool { for _, sc := range req.StatusCodes { if sc == r.Info.Status.Code { return true } } return false }) if err != nil { return err } if len(req.Filter) != 0 { rels, err = filterReleases(req.Filter, rels) if err != nil { return err } } total := int64(len(rels)) switch req.SortBy { case services.ListSort_NAME: relutil.SortByName(rels) case services.ListSort_LAST_RELEASED: relutil.SortByDate(rels) } if req.SortOrder == services.ListSort_DESC { ll := len(rels) rr := make([]*release.Release, ll) for i, item := range rels { rr[ll-i-1] = item } rels = rr } l := int64(len(rels)) if req.Offset != "" { i := -1 for ii, cur := range rels { if cur.Name == req.Offset { i = ii } } if i == -1 { return fmt.Errorf("offset %q not found", req.Offset) } if len(rels) < i { return fmt.Errorf("no items after %q", req.Offset) } rels = rels[i:] l = int64(len(rels)) } if req.Limit == 0 { req.Limit = ListDefaultLimit } next := "" if l > req.Limit { next = rels[req.Limit].Name rels = rels[0:req.Limit] l = int64(len(rels)) } res := &services.ListReleasesResponse{ Next: next, Count: l, Total: total, Releases: rels, } return stream.Send(res) } func filterReleases(filter string, rels []*release.Release) ([]*release.Release, error) { preg, err := regexp.Compile(filter) if err != nil { return rels, err } matches := []*release.Release{} for _, r := range rels { if preg.MatchString(r.Name) { matches = append(matches, r) } } return matches, nil } // GetVersion sends the server version. func (s *ReleaseServer) GetVersion(c ctx.Context, req *services.GetVersionRequest) (*services.GetVersionResponse, error) { v := version.GetVersionProto() return &services.GetVersionResponse{Version: v}, nil } // GetReleaseStatus gets the status information for a named release. func (s *ReleaseServer) GetReleaseStatus(c ctx.Context, req *services.GetReleaseStatusRequest) (*services.GetReleaseStatusResponse, error) { if !ValidName.MatchString(req.Name) { return nil, errMissingRelease } var rel *release.Release if req.Version <= 0 { var err error rel, err = s.env.Releases.Last(req.Name) if err != nil { return nil, fmt.Errorf("getting deployed release %q: %s", req.Name, err) } } else { var err error if rel, err = s.env.Releases.Get(req.Name, req.Version); err != nil { return nil, fmt.Errorf("getting release '%s' (v%d): %s", req.Name, req.Version, err) } } if rel.Info == nil { return nil, errors.New("release info is missing") } if rel.Chart == nil { return nil, errors.New("release chart is missing") } sc := rel.Info.Status.Code statusResp := &services.GetReleaseStatusResponse{ Name: rel.Name, Namespace: rel.Namespace, Info: rel.Info, } // Ok, we got the status of the release as we had jotted down, now we need to match the // manifest we stashed away with reality from the cluster. kubeCli := s.env.KubeClient resp, err := kubeCli.Get(rel.Namespace, bytes.NewBufferString(rel.Manifest)) if sc == release.Status_DELETED || sc == release.Status_FAILED { // Skip errors if this is already deleted or failed. return statusResp, nil } else if err != nil { log.Printf("warning: Get for %s failed: %v", rel.Name, err) return nil, err } rel.Info.Status.Resources = resp return statusResp, nil } // GetReleaseContent gets all of the stored information for the given release. func (s *ReleaseServer) GetReleaseContent(c ctx.Context, req *services.GetReleaseContentRequest) (*services.GetReleaseContentResponse, error) { if !ValidName.MatchString(req.Name) { return nil, errMissingRelease } if req.Version <= 0 { rel, err := s.env.Releases.Deployed(req.Name) return &services.GetReleaseContentResponse{Release: rel}, err } rel, err := s.env.Releases.Get(req.Name, req.Version) return &services.GetReleaseContentResponse{Release: rel}, err } // UpdateRelease takes an existing release and new information, and upgrades the release. func (s *ReleaseServer) UpdateRelease(c ctx.Context, req *services.UpdateReleaseRequest) (*services.UpdateReleaseResponse, error) { currentRelease, updatedRelease, err := s.prepareUpdate(req) if err != nil { return nil, err } res, err := s.performUpdate(currentRelease, updatedRelease, req) if err != nil { return res, err } if !req.DryRun { if err := s.env.Releases.Create(updatedRelease); err != nil { return res, err } } return res, nil } func (s *ReleaseServer) performUpdate(originalRelease, updatedRelease *release.Release, req *services.UpdateReleaseRequest) (*services.UpdateReleaseResponse, error) { res := &services.UpdateReleaseResponse{Release: updatedRelease} if req.DryRun { log.Printf("Dry run for %s", updatedRelease.Name) return res, nil } // pre-upgrade hooks if !req.DisableHooks { if err := s.execHook(updatedRelease.Hooks, updatedRelease.Name, updatedRelease.Namespace, preUpgrade, req.Timeout); err != nil { return res, err } } if err := s.performKubeUpdate(originalRelease, updatedRelease, req.Recreate); err != nil { log.Printf("warning: Release Upgrade %q failed: %s", updatedRelease.Name, err) originalRelease.Info.Status.Code = release.Status_SUPERSEDED updatedRelease.Info.Status.Code = release.Status_FAILED s.recordRelease(originalRelease, true) s.recordRelease(updatedRelease, false) return res, err } // post-upgrade hooks if !req.DisableHooks { if err := s.execHook(updatedRelease.Hooks, updatedRelease.Name, updatedRelease.Namespace, postUpgrade, req.Timeout); err != nil { return res, err } } originalRelease.Info.Status.Code = release.Status_SUPERSEDED s.recordRelease(originalRelease, true) updatedRelease.Info.Status.Code = release.Status_DEPLOYED return res, nil } // reuseValues copies values from the current release to a new release if the new release does not have any values. // // If the request already has values, or if there are no values in the current release, this does nothing. func (s *ReleaseServer) reuseValues(req *services.UpdateReleaseRequest, current *release.Release) { if (req.Values == nil || req.Values.Raw == "" || req.Values.Raw == "{}\n") && current.Config != nil && current.Config.Raw != "" && current.Config.Raw != "{}\n" { log.Printf("Copying values from %s (v%d) to new release.", current.Name, current.Version) req.Values = current.Config } } // prepareUpdate builds an updated release for an update operation. func (s *ReleaseServer) prepareUpdate(req *services.UpdateReleaseRequest) (*release.Release, *release.Release, error) { if !ValidName.MatchString(req.Name) { return nil, nil, errMissingRelease } if req.Chart == nil { return nil, nil, errMissingChart } // finds the non-deleted release with the given name currentRelease, err := s.env.Releases.Last(req.Name) if err != nil { return nil, nil, err } // If new values were not supplied in the upgrade, re-use the existing values. s.reuseValues(req, currentRelease) // Increment revision count. This is passed to templates, and also stored on // the release object. revision := currentRelease.Version + 1 ts := timeconv.Now() options := chartutil.ReleaseOptions{ Name: req.Name, Time: ts, Namespace: currentRelease.Namespace, IsUpgrade: true, Revision: int(revision), } caps, err := capabilities(s.clientset.Discovery()) if err != nil { return nil, nil, err } valuesToRender, err := chartutil.ToRenderValuesCaps(req.Chart, req.Values, options, caps) if err != nil { return nil, nil, err } hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, caps.APIVersions) if err != nil { return nil, nil, err } // Store an updated release. updatedRelease := &release.Release{ Name: req.Name, Namespace: currentRelease.Namespace, Chart: req.Chart, Config: req.Values, Info: &release.Info{ FirstDeployed: currentRelease.Info.FirstDeployed, LastDeployed: ts, Status: &release.Status{Code: release.Status_UNKNOWN}, }, Version: revision, Manifest: manifestDoc.String(), Hooks: hooks, } if len(notesTxt) > 0 { updatedRelease.Info.Status.Notes = notesTxt } err = validateManifest(s.env.KubeClient, currentRelease.Namespace, manifestDoc.Bytes()) return currentRelease, updatedRelease, err } // RollbackRelease rolls back to a previous version of the given release. func (s *ReleaseServer) RollbackRelease(c ctx.Context, req *services.RollbackReleaseRequest) (*services.RollbackReleaseResponse, error) { currentRelease, targetRelease, err := s.prepareRollback(req) if err != nil { return nil, err } res, err := s.performRollback(currentRelease, targetRelease, req) if err != nil { return res, err } if !req.DryRun { if err := s.env.Releases.Create(targetRelease); err != nil { return res, err } } return res, nil } func (s *ReleaseServer) performRollback(currentRelease, targetRelease *release.Release, req *services.RollbackReleaseRequest) (*services.RollbackReleaseResponse, error) { res := &services.RollbackReleaseResponse{Release: targetRelease} if req.DryRun { log.Printf("Dry run for %s", targetRelease.Name) return res, nil } // pre-rollback hooks if !req.DisableHooks { if err := s.execHook(targetRelease.Hooks, targetRelease.Name, targetRelease.Namespace, preRollback, req.Timeout); err != nil { return res, err } } if err := s.performKubeUpdate(currentRelease, targetRelease, req.Recreate); err != nil { log.Printf("warning: Release Rollback %q failed: %s", targetRelease.Name, err) currentRelease.Info.Status.Code = release.Status_SUPERSEDED targetRelease.Info.Status.Code = release.Status_FAILED s.recordRelease(currentRelease, true) s.recordRelease(targetRelease, false) return res, err } // post-rollback hooks if !req.DisableHooks { if err := s.execHook(targetRelease.Hooks, targetRelease.Name, targetRelease.Namespace, postRollback, req.Timeout); err != nil { return res, err } } currentRelease.Info.Status.Code = release.Status_SUPERSEDED s.recordRelease(currentRelease, true) targetRelease.Info.Status.Code = release.Status_DEPLOYED return res, nil } func (s *ReleaseServer) performKubeUpdate(currentRelease, targetRelease *release.Release, recreate bool) error { kubeCli := s.env.KubeClient current := bytes.NewBufferString(currentRelease.Manifest) target := bytes.NewBufferString(targetRelease.Manifest) return kubeCli.Update(targetRelease.Namespace, current, target, recreate) } // prepareRollback finds the previous release and prepares a new release object with // the previous release's configuration func (s *ReleaseServer) prepareRollback(req *services.RollbackReleaseRequest) (*release.Release, *release.Release, error) { switch { case !ValidName.MatchString(req.Name): return nil, nil, errMissingRelease case req.Version < 0: return nil, nil, errInvalidRevision } crls, err := s.env.Releases.Last(req.Name) if err != nil { return nil, nil, err } rbv := req.Version if req.Version == 0 { rbv = crls.Version - 1 } log.Printf("rolling back %s (current: v%d, target: v%d)", req.Name, crls.Version, rbv) prls, err := s.env.Releases.Get(req.Name, rbv) if err != nil { return nil, nil, err } // Store a new release object with previous release's configuration target := &release.Release{ Name: req.Name, Namespace: crls.Namespace, Chart: prls.Chart, Config: prls.Config, Info: &release.Info{ FirstDeployed: crls.Info.FirstDeployed, LastDeployed: timeconv.Now(), Status: &release.Status{ Code: release.Status_UNKNOWN, Notes: prls.Info.Status.Notes, }, }, Version: crls.Version + 1, Manifest: prls.Manifest, Hooks: prls.Hooks, } return crls, target, nil } func (s *ReleaseServer) uniqName(start string, reuse bool) (string, error) { // If a name is supplied, we check to see if that name is taken. If not, it // is granted. If reuse is true and a deleted release with that name exists, // we re-grant it. Otherwise, an error is returned. if start != "" { if len(start) > releaseNameMaxLen { return "", fmt.Errorf("release name %q exceeds max length of %d", start, releaseNameMaxLen) } h, err := s.env.Releases.History(start) if err != nil || len(h) < 1 { return start, nil } relutil.Reverse(h, relutil.SortByRevision) rel := h[0] if st := rel.Info.Status.Code; reuse && (st == release.Status_DELETED || st == release.Status_FAILED) { // Allowe re-use of names if the previous release is marked deleted. log.Printf("reusing name %q", start) return start, nil } else if reuse { return "", errors.New("cannot re-use a name that is still in use") } return "", fmt.Errorf("a release named %q already exists", start) } maxTries := 5 for i := 0; i < maxTries; i++ { namer := moniker.New() name := namer.NameSep("-") if len(name) > releaseNameMaxLen { name = name[:releaseNameMaxLen] } if _, err := s.env.Releases.Get(name, 1); err == driver.ErrReleaseNotFound { return name, nil } log.Printf("info: Name %q is taken. Searching again.", name) } log.Printf("warning: No available release names found after %d tries", maxTries) return "ERROR", errors.New("no available release name found") } func (s *ReleaseServer) engine(ch *chart.Chart) environment.Engine { renderer := s.env.EngineYard.Default() if ch.Metadata.Engine != "" { if r, ok := s.env.EngineYard.Get(ch.Metadata.Engine); ok { renderer = r } else { log.Printf("warning: %s requested non-existent template engine %s", ch.Metadata.Name, ch.Metadata.Engine) } } return renderer } // InstallRelease installs a release and stores the release record. func (s *ReleaseServer) InstallRelease(c ctx.Context, req *services.InstallReleaseRequest) (*services.InstallReleaseResponse, error) { rel, err := s.prepareRelease(req) if err != nil { log.Printf("Failed install prepare step: %s", err) res := &services.InstallReleaseResponse{Release: rel} // On dry run, append the manifest contents to a failed release. This is // a stop-gap until we can revisit an error backchannel post-2.0. if req.DryRun && strings.HasPrefix(err.Error(), "YAML parse error") { err = fmt.Errorf("%s\n%s", err, rel.Manifest) } return res, err } res, err := s.performRelease(rel, req) if err != nil { log.Printf("Failed install perform step: %s", err) } return res, err } // capabilities builds a Capabilities from discovery information. func capabilities(disc discovery.DiscoveryInterface) (*chartutil.Capabilities, error) { sv, err := disc.ServerVersion() if err != nil { return nil, err } vs, err := getVersionSet(disc) if err != nil { return nil, fmt.Errorf("Could not get apiVersions from Kubernetes: %s", err) } return &chartutil.Capabilities{ APIVersions: vs, KubeVersion: sv, TillerVersion: version.GetVersionProto(), }, nil } // prepareRelease builds a release for an install operation. func (s *ReleaseServer) prepareRelease(req *services.InstallReleaseRequest) (*release.Release, error) { if req.Chart == nil { return nil, errMissingChart } name, err := s.uniqName(req.Name, req.ReuseName) if err != nil { return nil, err } caps, err := capabilities(s.clientset.Discovery()) if err != nil { return nil, err } revision := 1 ts := timeconv.Now() options := chartutil.ReleaseOptions{ Name: name, Time: ts, Namespace: req.Namespace, Revision: revision, IsInstall: true, } valuesToRender, err := chartutil.ToRenderValuesCaps(req.Chart, req.Values, options, caps) if err != nil { return nil, err } hooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, caps.APIVersions) if err != nil { // Return a release with partial data so that client can show debugging // information. rel := &release.Release{ Name: name, Namespace: req.Namespace, Chart: req.Chart, Config: req.Values, Info: &release.Info{ FirstDeployed: ts, LastDeployed: ts, Status: &release.Status{Code: release.Status_UNKNOWN}, }, Version: 0, } if manifestDoc != nil { rel.Manifest = manifestDoc.String() } return rel, err } // Store a release. rel := &release.Release{ Name: name, Namespace: req.Namespace, Chart: req.Chart, Config: req.Values, Info: &release.Info{ FirstDeployed: ts, LastDeployed: ts, Status: &release.Status{Code: release.Status_UNKNOWN}, }, Manifest: manifestDoc.String(), Hooks: hooks, Version: int32(revision), } if len(notesTxt) > 0 { rel.Info.Status.Notes = notesTxt } err = validateManifest(s.env.KubeClient, req.Namespace, manifestDoc.Bytes()) return rel, err } func getVersionSet(client discovery.ServerGroupsInterface) (chartutil.VersionSet, error) { groups, err := client.ServerGroups() if err != nil { return chartutil.DefaultVersionSet, err } // FIXME: The Kubernetes test fixture for cli appears to always return nil // for calls to Discovery().ServerGroups(). So in this case, we return // the default API list. This is also a safe value to return in any other // odd-ball case. if groups == nil { return chartutil.DefaultVersionSet, nil } versions := unversioned.ExtractGroupVersions(groups) return chartutil.NewVersionSet(versions...), nil } func (s *ReleaseServer) renderResources(ch *chart.Chart, values chartutil.Values, vs chartutil.VersionSet) ([]*release.Hook, *bytes.Buffer, string, error) { renderer := s.engine(ch) files, err := renderer.Render(ch, values) if err != nil { return nil, nil, "", err } // NOTES.txt gets rendered like all the other files, but because it's not a hook nor a resource, // pull it out of here into a separate file so that we can actually use the output of the rendered // text file. We have to spin through this map because the file contains path information, so we // look for terminating NOTES.txt. We also remove it from the files so that we don't have to skip // it in the sortHooks. notes := "" for k, v := range files { if strings.HasSuffix(k, notesFileSuffix) { // Only apply the notes if it belongs to the parent chart // Note: Do not use filePath.Join since it creates a path with \ which is not expected if k == path.Join(ch.Metadata.Name, "templates", notesFileSuffix) { notes = v } delete(files, k) } } // Sort hooks, manifests, and partials. Only hooks and manifests are returned, // as partials are not used after renderer.Render. Empty manifests are also // removed here. hooks, manifests, err := sortManifests(files, vs, InstallOrder) if err != nil { // By catching parse errors here, we can prevent bogus releases from going // to Kubernetes. // // We return the files as a big blob of data to help the user debug parser // errors. b := bytes.NewBuffer(nil) for name, content := range files { if len(strings.TrimSpace(content)) == 0 { continue } b.WriteString("\n---\n# Source: " + name + "\n") b.WriteString(content) } return nil, b, "", err } // Aggregate all valid manifests into one big doc. b := bytes.NewBuffer(nil) for _, m := range manifests { b.WriteString("\n---\n# Source: " + m.name + "\n") b.WriteString(m.content) } return hooks, b, notes, nil } func (s *ReleaseServer) recordRelease(r *release.Release, reuse bool) { if reuse { if err := s.env.Releases.Update(r); err != nil { log.Printf("warning: Failed to update release %q: %s", r.Name, err) } } else if err := s.env.Releases.Create(r); err != nil { log.Printf("warning: Failed to record release %q: %s", r.Name, err) } } // performRelease runs a release. func (s *ReleaseServer) performRelease(r *release.Release, req *services.InstallReleaseRequest) (*services.InstallReleaseResponse, error) { res := &services.InstallReleaseResponse{Release: r} if req.DryRun { log.Printf("Dry run for %s", r.Name) return res, nil } // pre-install hooks if !req.DisableHooks { if err := s.execHook(r.Hooks, r.Name, r.Namespace, preInstall, req.Timeout); err != nil { return res, err } } switch h, err := s.env.Releases.History(req.Name); { // if this is a replace operation, append to the release history case req.ReuseName && err == nil && len(h) >= 1: // get latest release revision relutil.Reverse(h, relutil.SortByRevision) // old release old := h[0] // update old release status old.Info.Status.Code = release.Status_SUPERSEDED s.recordRelease(old, true) // update new release with next revision number // so as to append to the old release's history r.Version = old.Version + 1 if err := s.performKubeUpdate(old, r, false); err != nil { log.Printf("warning: Release replace %q failed: %s", r.Name, err) old.Info.Status.Code = release.Status_SUPERSEDED r.Info.Status.Code = release.Status_FAILED s.recordRelease(old, true) s.recordRelease(r, false) return res, err } default: // nothing to replace, create as normal // regular manifests b := bytes.NewBufferString(r.Manifest) if err := s.env.KubeClient.Create(r.Namespace, b); err != nil { log.Printf("warning: Release %q failed: %s", r.Name, err) r.Info.Status.Code = release.Status_FAILED s.recordRelease(r, false) return res, fmt.Errorf("release %s failed: %s", r.Name, err) } } // post-install hooks if !req.DisableHooks { if err := s.execHook(r.Hooks, r.Name, r.Namespace, postInstall, req.Timeout); err != nil { log.Printf("warning: Release %q failed post-install: %s", r.Name, err) r.Info.Status.Code = release.Status_FAILED s.recordRelease(r, false) return res, err } } // This is a tricky case. The release has been created, but the result // cannot be recorded. The truest thing to tell the user is that the // release was created. However, the user will not be able to do anything // further with this release. // // One possible strategy would be to do a timed retry to see if we can get // this stored in the future. r.Info.Status.Code = release.Status_DEPLOYED s.recordRelease(r, false) return res, nil } func (s *ReleaseServer) execHook(hs []*release.Hook, name, namespace, hook string, timeout int64) error { kubeCli := s.env.KubeClient code, ok := events[hook] if !ok { return fmt.Errorf("unknown hook %q", hook) } log.Printf("Executing %s hooks for %s", hook, name) for _, h := range hs { found := false for _, e := range h.Events { if e == code { found = true } } // If this doesn't implement the hook, skip it. if !found { continue } b := bytes.NewBufferString(h.Manifest) if err := kubeCli.Create(namespace, b); err != nil { log.Printf("warning: Release %q %s %s failed: %s", name, hook, h.Path, err) return err } // No way to rewind a bytes.Buffer()? b.Reset() b.WriteString(h.Manifest) if err := kubeCli.WatchUntilReady(namespace, b, timeout); err != nil { log.Printf("warning: Release %q %s %s could not complete: %s", name, hook, h.Path, err) return err } h.LastRun = timeconv.Now() } log.Printf("Hooks complete for %s %s", hook, name) return nil } func (s *ReleaseServer) purgeReleases(rels ...*release.Release) error { for _, rel := range rels { if _, err := s.env.Releases.Delete(rel.Name, rel.Version); err != nil { return err } } return nil } // UninstallRelease deletes all of the resources associated with this release, and marks the release DELETED. func (s *ReleaseServer) UninstallRelease(c ctx.Context, req *services.UninstallReleaseRequest) (*services.UninstallReleaseResponse, error) { if !ValidName.MatchString(req.Name) { log.Printf("uninstall: Release not found: %s", req.Name) return nil, errMissingRelease } rels, err := s.env.Releases.History(req.Name) if err != nil { log.Printf("uninstall: Release not loaded: %s", req.Name) return nil, err } if len(rels) < 1 { return nil, errMissingRelease } relutil.SortByRevision(rels) rel := rels[len(rels)-1] // TODO: Are there any cases where we want to force a delete even if it's // already marked deleted? if rel.Info.Status.Code == release.Status_DELETED { if req.Purge { if err := s.purgeReleases(rels...); err != nil { log.Printf("uninstall: Failed to purge the release: %s", err) return nil, err } return &services.UninstallReleaseResponse{Release: rel}, nil } return nil, fmt.Errorf("the release named %q is already deleted", req.Name) } log.Printf("uninstall: Deleting %s", req.Name) rel.Info.Status.Code = release.Status_DELETING rel.Info.Deleted = timeconv.Now() res := &services.UninstallReleaseResponse{Release: rel} if !req.DisableHooks { if err := s.execHook(rel.Hooks, rel.Name, rel.Namespace, preDelete, req.Timeout); err != nil { return res, err } } vs, err := getVersionSet(s.clientset.Discovery()) if err != nil { return nil, fmt.Errorf("Could not get apiVersions from Kubernetes: %s", err) } // From here on out, the release is currently considered to be in Status_DELETING // state. if err := s.env.Releases.Update(rel); err != nil { log.Printf("uninstall: Failed to store updated release: %s", err) } manifests := splitManifests(rel.Manifest) _, files, err := sortManifests(manifests, vs, UninstallOrder) if err != nil { // We could instead just delete everything in no particular order. // FIXME: One way to delete at this point would be to try a label-based // deletion. The problem with this is that we could get a false positive // and delete something that was not legitimately part of this release. return nil, fmt.Errorf("corrupted release record. You must manually delete the resources: %s", err) } filesToKeep, filesToDelete := filterManifestsToKeep(files) if len(filesToKeep) > 0 { res.Info = summarizeKeptManifests(filesToKeep) } // Collect the errors, and return them later. es := []string{} for _, file := range filesToDelete { b := bytes.NewBufferString(file.content) if err := s.env.KubeClient.Delete(rel.Namespace, b); err != nil { log.Printf("uninstall: Failed deletion of %q: %s", req.Name, err) if err == kube.ErrNoObjectsVisited { // Rewrite the message from "no objects visited" err = errors.New("object not found, skipping delete") } es = append(es, err.Error()) } } if !req.DisableHooks { if err := s.execHook(rel.Hooks, rel.Name, rel.Namespace, postDelete, req.Timeout); err != nil { es = append(es, err.Error()) } } rel.Info.Status.Code = release.Status_DELETED if req.Purge { err := s.purgeReleases(rels...) if err != nil { log.Printf("uninstall: Failed to purge the release: %s", err) } return res, err } if err := s.env.Releases.Update(rel); err != nil { log.Printf("uninstall: Failed to store updated release: %s", err) } var errs error if len(es) > 0 { errs = fmt.Errorf("deletion completed with %d error(s): %s", len(es), strings.Join(es, "; ")) } return res, errs } func splitManifests(bigfile string) map[string]string { // This is not the best way of doing things, but it's how k8s itself does it. // Basically, we're quickly splitting a stream of YAML documents into an // array of YAML docs. In the current implementation, the file name is just // a place holder, and doesn't have any further meaning. sep := "\n---\n" tpl := "manifest-%d" res := map[string]string{} tmp := strings.Split(bigfile, sep) for i, d := range tmp { res[fmt.Sprintf(tpl, i)] = d } return res } func validateManifest(c environment.KubeClient, ns string, manifest []byte) error { r := bytes.NewReader(manifest) _, err := c.Build(ns, r) return err }
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flate import ( "io" "math" ) const ( // The largest offset code. offsetCodeCount = 30 // The special code used to mark the end of a block. endBlockMarker = 256 // The first length code. lengthCodesStart = 257 // The number of codegen codes. codegenCodeCount = 19 badCode = 255 // Output byte buffer size // Must be multiple of 6 (48 bits) + 8 bufferSize = 240 + 8 ) // The number of extra bits needed by length code X - LENGTH_CODES_START. var lengthExtraBits = []int8{ /* 257 */ 0, 0, 0, /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, /* 280 */ 4, 5, 5, 5, 5, 0, } // The length indicated by length code X - LENGTH_CODES_START. var lengthBase = []uint32{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 255, } // offset code word extra bits. var offsetExtraBits = []int8{ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, /* extended window */ 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, } var offsetBase = []uint32{ /* normal deflate */ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, /* extended window */ 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, 0x100000, 0x180000, 0x200000, 0x300000, } // The odd order in which the codegen code sizes are written. var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} type huffmanBitWriter struct { w io.Writer // Data waiting to be written is bytes[0:nbytes] // and then the low nbits of bits. bits uint64 nbits uint32 bytes [bufferSize]byte nbytes int literalFreq []int32 offsetFreq []int32 codegen []uint8 codegenFreq []int32 literalEncoding *huffmanEncoder offsetEncoding *huffmanEncoder codegenEncoding *huffmanEncoder err error } func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { return &huffmanBitWriter{ w: w, literalFreq: make([]int32, maxNumLit), offsetFreq: make([]int32, offsetCodeCount), codegen: make([]uint8, maxNumLit+offsetCodeCount+1), codegenFreq: make([]int32, codegenCodeCount), literalEncoding: newHuffmanEncoder(maxNumLit), offsetEncoding: newHuffmanEncoder(offsetCodeCount), codegenEncoding: newHuffmanEncoder(codegenCodeCount), } } func (w *huffmanBitWriter) reset(writer io.Writer) { w.w = writer w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil w.bytes = [bufferSize]byte{} for i := range w.codegen { w.codegen[i] = 0 } for _, s := range [...][]int32{w.literalFreq, w.offsetFreq, w.codegenFreq} { for i := range s { s[i] = 0 } } for _, enc := range [...]*huffmanEncoder{ w.literalEncoding, w.offsetEncoding, w.codegenEncoding} { for i := range enc.codes { enc.codes[i] = 0 } } } /* Inlined in writeBits func (w *huffmanBitWriter) flushBits() { if w.err != nil { w.nbits = 0 return } bits := w.bits w.bits >>= 16 w.nbits -= 16 n := w.nbytes w.bytes[n] = byte(bits) w.bytes[n+1] = byte(bits >> 8) if n += 2; n >= len(w.bytes) { _, w.err = w.w.Write(w.bytes[0:]) n = 0 } w.nbytes = n } */ func (w *huffmanBitWriter) flush() { if w.err != nil { w.nbits = 0 return } n := w.nbytes for w.nbits != 0 { w.bytes[n] = byte(w.bits) w.bits >>= 8 if w.nbits > 8 { // Avoid underflow w.nbits -= 8 } else { w.nbits = 0 } n++ } w.bits = 0 _, w.err = w.w.Write(w.bytes[0:n]) w.nbytes = 0 } func (w *huffmanBitWriter) writeBits(b, nb int32) { w.bits |= uint64(b) << w.nbits w.nbits += uint32(nb) if w.nbits >= 48 { bits := w.bits w.bits >>= 48 w.nbits -= 48 n := w.nbytes w.bytes[n] = byte(bits) w.bytes[n+1] = byte(bits >> 8) w.bytes[n+2] = byte(bits >> 16) w.bytes[n+3] = byte(bits >> 24) w.bytes[n+4] = byte(bits >> 32) w.bytes[n+5] = byte(bits >> 40) n += 6 if n >= bufferSize-8 { _, w.err = w.w.Write(w.bytes[:bufferSize-8]) n = 0 } w.nbytes = n } } func (w *huffmanBitWriter) writeBytes(bytes []byte) { if w.err != nil { return } n := w.nbytes for w.nbits != 0 { w.bytes[n] = byte(w.bits) w.bits >>= 8 w.nbits -= 8 n++ } if w.nbits != 0 { w.err = InternalError("writeBytes with unfinished bits") return } if n != 0 { _, w.err = w.w.Write(w.bytes[0:n]) if w.err != nil { return } } w.nbytes = 0 _, w.err = w.w.Write(bytes) } // RFC 1951 3.2.7 specifies a special run-length encoding for specifying // the literal and offset lengths arrays (which are concatenated into a single // array). This method generates that run-length encoding. // // The result is written into the codegen array, and the frequencies // of each code is written into the codegenFreq array. // Codes 0-15 are single byte codes. Codes 16-18 are followed by additional // information. Code badCode is an end marker // // numLiterals The number of literals in literalEncoding // numOffsets The number of offsets in offsetEncoding func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int) { for i := range w.codegenFreq { w.codegenFreq[i] = 0 } // Note that we are using codegen both as a temporary variable for holding // a copy of the frequencies, and as the place where we put the result. // This is fine because the output is always shorter than the input used // so far. codegen := w.codegen // cache // Copy the concatenated code sizes to codegen. Put a marker at the end. //copy(codegen[0:numLiterals], w.literalEncoding.codeBits) cgnl := codegen[0:numLiterals] for i := range cgnl { cgnl[i] = w.literalEncoding.codes[i].bits() } //copy(codegen[numLiterals:numLiterals+numOffsets], w.offsetEncoding.codeBits) cgnl = codegen[numLiterals : numLiterals+numOffsets] for i := range cgnl { cgnl[i] = w.offsetEncoding.codes[i].bits() } codegen[numLiterals+numOffsets] = badCode size := codegen[0] count := 1 outIndex := 0 for inIndex := 1; size != badCode; inIndex++ { // INVARIANT: We have seen "count" copies of size that have not yet // had output generated for them. nextSize := codegen[inIndex] if nextSize == size { count++ continue } // We need to generate codegen indicating "count" of size. if size != 0 { codegen[outIndex] = size outIndex++ w.codegenFreq[size]++ count-- for count >= 3 { n := 6 if n > count { n = count } codegen[outIndex] = 16 outIndex++ codegen[outIndex] = uint8(n - 3) outIndex++ w.codegenFreq[16]++ count -= n } } else { for count >= 11 { n := 138 if n > count { n = count } codegen[outIndex] = 18 outIndex++ codegen[outIndex] = uint8(n - 11) outIndex++ w.codegenFreq[18]++ count -= n } if count >= 3 { // count >= 3 && count <= 10 codegen[outIndex] = 17 outIndex++ codegen[outIndex] = uint8(count - 3) outIndex++ w.codegenFreq[17]++ count = 0 } } count-- for ; count >= 0; count-- { codegen[outIndex] = size outIndex++ w.codegenFreq[size]++ } // Set up invariant for next time through the loop. size = nextSize count = 1 } // Marker indicating the end of the codegen. codegen[outIndex] = badCode } /* non-inlined: func (w *huffmanBitWriter) writeCode(code *huffmanEncoder, literal uint32) { if w.err != nil { return } c := code.codes[literal] w.writeBits(int32(c.code()), int32(c.bits())) } */ func (w *huffmanBitWriter) writeCode(code *huffmanEncoder, literal uint32) { if w.err != nil { return } c := code.codes[literal] w.bits |= uint64(c.code()) << w.nbits w.nbits += uint32(c.bits()) if w.nbits >= 48 { bits := w.bits w.bits >>= 48 w.nbits -= 48 n := w.nbytes w.bytes[n] = byte(bits) w.bytes[n+1] = byte(bits >> 8) w.bytes[n+2] = byte(bits >> 16) w.bytes[n+3] = byte(bits >> 24) w.bytes[n+4] = byte(bits >> 32) w.bytes[n+5] = byte(bits >> 40) n += 6 if n >= bufferSize-8 { _, w.err = w.w.Write(w.bytes[:bufferSize-8]) n = 0 } w.nbytes = n } } // Write the header of a dynamic Huffman block to the output stream. // // numLiterals The number of literals specified in codegen // numOffsets The number of offsets specified in codegen // numCodegens The number of codegens used in codegen func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { if w.err != nil { return } var firstBits int32 = 4 if isEof { firstBits = 5 } w.writeBits(firstBits, 3) w.writeBits(int32(numLiterals-257), 5) w.writeBits(int32(numOffsets-1), 5) w.writeBits(int32(numCodegens-4), 4) for i := 0; i < numCodegens; i++ { //value := w.codegenEncoding.codeBits[codegenOrder[i]] value := w.codegenEncoding.codes[codegenOrder[i]].bits() w.writeBits(int32(value), 3) } i := 0 for { var codeWord int = int(w.codegen[i]) i++ if codeWord == badCode { break } // The low byte contains the actual code to generate. w.writeCode(w.codegenEncoding, uint32(codeWord)) switch codeWord { case 16: w.writeBits(int32(w.codegen[i]), 2) i++ break case 17: w.writeBits(int32(w.codegen[i]), 3) i++ break case 18: w.writeBits(int32(w.codegen[i]), 7) i++ break } } } func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { if w.err != nil { return } var flag int32 if isEof { flag = 1 } w.writeBits(flag, 3) w.flush() w.writeBits(int32(length), 16) w.writeBits(int32(^uint16(length)), 16) } func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { if w.err != nil { return } // Indicate that we are a fixed Huffman block var value int32 = 2 if isEof { value = 3 } w.writeBits(value, 3) } func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { if w.err != nil { return } copy(w.literalFreq, zeroLits[:]) for i := range w.offsetFreq { w.offsetFreq[i] = 0 } n := len(tokens) tokens = tokens[0 : n+1] tokens[n] = endBlockMarker for _, t := range tokens { switch t.typ() { case literalType: w.literalFreq[t.literal()]++ case matchType: length := t.length() offset := t.offset() w.literalFreq[lengthCodesStart+lengthCode(length)]++ w.offsetFreq[offsetCode(offset)]++ } } // get the number of literals numLiterals := len(w.literalFreq) for w.literalFreq[numLiterals-1] == 0 { numLiterals-- } // get the number of offsets numOffsets := len(w.offsetFreq) for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { numOffsets-- } if numOffsets == 0 { // We haven't found a single match. If we want to go with the dynamic encoding, // we should count at least one offset to be sure that the offset huffman tree could be encoded. w.offsetFreq[0] = 1 numOffsets = 1 } w.literalEncoding.generate(w.literalFreq, 15) w.offsetEncoding.generate(w.offsetFreq, 15) storedBytes := 0 if input != nil { storedBytes = len(input) } var extraBits int64 var storedSize int64 = math.MaxInt64 if storedBytes <= maxStoreBlockSize && input != nil { storedSize = int64((storedBytes + 5) * 8) // We only bother calculating the costs of the extra bits required by // the length of offset fields (which will be the same for both fixed // and dynamic encoding), if we need to compare those two encodings // against stored encoding. for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { // First eight length codes have extra size = 0. extraBits += int64(w.literalFreq[lengthCode]) * int64(lengthExtraBits[lengthCode-lengthCodesStart]) } for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { // First four offset codes have extra size = 0. extraBits += int64(w.offsetFreq[offsetCode]) * int64(offsetExtraBits[offsetCode]) } } // Figure out smallest code. // Fixed Huffman baseline. var size = int64(3) + fixedLiteralEncoding.bitLength(w.literalFreq) + fixedOffsetEncoding.bitLength(w.offsetFreq) + extraBits var literalEncoding = fixedLiteralEncoding var offsetEncoding = fixedOffsetEncoding // Dynamic Huffman? var numCodegens int // Generate codegen and codegenFrequencies, which indicates how to encode // the literalEncoding and the offsetEncoding. w.generateCodegen(numLiterals, numOffsets) w.codegenEncoding.generate(w.codegenFreq, 7) numCodegens = len(w.codegenFreq) for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { numCodegens-- } dynamicHeader := int64(3+5+5+4+(3*numCodegens)) + w.codegenEncoding.bitLength(w.codegenFreq) + int64(extraBits) + int64(w.codegenFreq[16]*2) + int64(w.codegenFreq[17]*3) + int64(w.codegenFreq[18]*7) dynamicSize := dynamicHeader + w.literalEncoding.bitLength(w.literalFreq) + w.offsetEncoding.bitLength(w.offsetFreq) if dynamicSize < size { size = dynamicSize literalEncoding = w.literalEncoding offsetEncoding = w.offsetEncoding } // Stored bytes? if storedSize < size { w.writeStoredHeader(storedBytes, eof) w.writeBytes(input[0:storedBytes]) return } // Huffman. if literalEncoding == fixedLiteralEncoding { w.writeFixedHeader(eof) } else { w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) } for _, t := range tokens { switch t.typ() { case literalType: w.writeCode(literalEncoding, t.literal()) break case matchType: // Write the length length := t.length() lengthCode := lengthCode(length) w.writeCode(literalEncoding, lengthCode+lengthCodesStart) extraLengthBits := int32(lengthExtraBits[lengthCode]) if extraLengthBits > 0 { extraLength := int32(length - lengthBase[lengthCode]) w.writeBits(extraLength, extraLengthBits) } // Write the offset offset := t.offset() offsetCode := offsetCode(offset) w.writeCode(offsetEncoding, offsetCode) extraOffsetBits := int32(offsetExtraBits[offsetCode]) if extraOffsetBits > 0 { extraOffset := int32(offset - offsetBase[offsetCode]) w.writeBits(extraOffset, extraOffsetBits) } break default: panic("unknown token type: " + string(t)) } } } var huffOffset *huffmanEncoder var zeroLits [maxNumLit]int32 func init() { var w = newHuffmanBitWriter(nil) w.offsetFreq[0] = 1 w.offsetEncoding = newHuffmanEncoder(offsetCodeCount) w.offsetEncoding.generate(w.offsetFreq, 15) huffOffset = w.offsetEncoding } func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) { if w.err != nil { return } copy(w.literalFreq, zeroLits[:]) for _, t := range input { w.literalFreq[t]++ } w.literalFreq[endBlockMarker]++ // get the number of literals numLiterals := len(w.literalFreq) for w.literalFreq[numLiterals-1] == 0 { numLiterals-- } // We haven't found a single match. If we want to go with the dynamic encoding, // we should count at least one offset to be sure that the offset huffman tree could be encoded. numOffsets := 1 w.literalEncoding.generate(w.literalFreq, 15) w.offsetEncoding = huffOffset storedBytes := len(input) var extraBits int64 var storedSize int64 = math.MaxInt64 if storedBytes <= maxStoreBlockSize { storedSize = int64((storedBytes + 5) * 8) // We only bother calculating the costs of the extra bits required by // the length of offset fields (which will be the same for both fixed // and dynamic encoding), if we need to compare those two encodings // against stored encoding. for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { // First eight length codes have extra size = 0. extraBits += int64(w.literalFreq[lengthCode]) * int64(lengthExtraBits[lengthCode-lengthCodesStart]) } for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { // First four offset codes have extra size = 0. extraBits += int64(w.offsetFreq[offsetCode]) * int64(offsetExtraBits[offsetCode]) } } // Figure out smallest code. // Always use dynamic Huffman or Store var numCodegens int // Generate codegen and codegenFrequencies, which indicates how to encode // the literalEncoding and the offsetEncoding. w.generateCodegen(numLiterals, numOffsets) w.codegenEncoding.generate(w.codegenFreq, 7) numCodegens = len(w.codegenFreq) for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { numCodegens-- } dynamicHeader := int64(3+5+5+4+(3*numCodegens)) + w.codegenEncoding.bitLength(w.codegenFreq) + int64(extraBits) + int64(w.codegenFreq[16]*2) + int64(w.codegenFreq[17]*3) + int64(w.codegenFreq[18]*7) size := dynamicHeader + w.literalEncoding.bitLength(w.literalFreq) + 1 /*w.offsetEncoding.bitLength(w.offsetFreq)*/ // Stored bytes? if storedSize < size { w.writeStoredHeader(storedBytes, eof) w.writeBytes(input[0:storedBytes]) return } // Huffman. w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) for _, t := range input { w.writeCode(w.literalEncoding, uint32(t)) } // Write EOB w.writeCode(w.literalEncoding, endBlockMarker) } Inline bitwriter. benchmark old ns/op new ns/op delta BenchmarkEncodeDigitsConstant1e4 95105 66603 -29.97% BenchmarkEncodeDigitsConstant1e5 896551 610034 -31.96% BenchmarkEncodeDigitsConstant1e6 8920510 6125350 -31.33% BenchmarkEncodeTwainConstant1e4 117706 89555 -23.92% BenchmarkEncodeTwainConstant1e5 978555 696039 -28.87% BenchmarkEncodeTwainConstant1e6 9685554 6970398 -28.03% benchmark old MB/s new MB/s speedup BenchmarkEncodeDigitsConstant1e4 105.15 150.14 1.43x BenchmarkEncodeDigitsConstant1e5 111.54 163.93 1.47x BenchmarkEncodeDigitsConstant1e6 112.10 163.26 1.46x BenchmarkEncodeTwainConstant1e4 84.96 111.66 1.31x BenchmarkEncodeTwainConstant1e5 102.19 143.67 1.41x BenchmarkEncodeTwainConstant1e6 103.25 143.46 1.39x // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flate import ( "io" "math" ) const ( // The largest offset code. offsetCodeCount = 30 // The special code used to mark the end of a block. endBlockMarker = 256 // The first length code. lengthCodesStart = 257 // The number of codegen codes. codegenCodeCount = 19 badCode = 255 // Output byte buffer size // Must be multiple of 6 (48 bits) + 8 bufferSize = 240 + 8 ) // The number of extra bits needed by length code X - LENGTH_CODES_START. var lengthExtraBits = []int8{ /* 257 */ 0, 0, 0, /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, /* 280 */ 4, 5, 5, 5, 5, 0, } // The length indicated by length code X - LENGTH_CODES_START. var lengthBase = []uint32{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 255, } // offset code word extra bits. var offsetExtraBits = []int8{ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, /* extended window */ 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, } var offsetBase = []uint32{ /* normal deflate */ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, /* extended window */ 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, 0x100000, 0x180000, 0x200000, 0x300000, } // The odd order in which the codegen code sizes are written. var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} type huffmanBitWriter struct { w io.Writer // Data waiting to be written is bytes[0:nbytes] // and then the low nbits of bits. bits uint64 nbits uint32 bytes [bufferSize]byte nbytes int literalFreq []int32 offsetFreq []int32 codegen []uint8 codegenFreq []int32 literalEncoding *huffmanEncoder offsetEncoding *huffmanEncoder codegenEncoding *huffmanEncoder err error } func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { return &huffmanBitWriter{ w: w, literalFreq: make([]int32, maxNumLit), offsetFreq: make([]int32, offsetCodeCount), codegen: make([]uint8, maxNumLit+offsetCodeCount+1), codegenFreq: make([]int32, codegenCodeCount), literalEncoding: newHuffmanEncoder(maxNumLit), offsetEncoding: newHuffmanEncoder(offsetCodeCount), codegenEncoding: newHuffmanEncoder(codegenCodeCount), } } func (w *huffmanBitWriter) reset(writer io.Writer) { w.w = writer w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil w.bytes = [bufferSize]byte{} for i := range w.codegen { w.codegen[i] = 0 } for _, s := range [...][]int32{w.literalFreq, w.offsetFreq, w.codegenFreq} { for i := range s { s[i] = 0 } } for _, enc := range [...]*huffmanEncoder{ w.literalEncoding, w.offsetEncoding, w.codegenEncoding} { for i := range enc.codes { enc.codes[i] = 0 } } } /* Inlined in writeBits func (w *huffmanBitWriter) flushBits() { if w.err != nil { w.nbits = 0 return } bits := w.bits w.bits >>= 16 w.nbits -= 16 n := w.nbytes w.bytes[n] = byte(bits) w.bytes[n+1] = byte(bits >> 8) if n += 2; n >= len(w.bytes) { _, w.err = w.w.Write(w.bytes[0:]) n = 0 } w.nbytes = n } */ func (w *huffmanBitWriter) flush() { if w.err != nil { w.nbits = 0 return } n := w.nbytes for w.nbits != 0 { w.bytes[n] = byte(w.bits) w.bits >>= 8 if w.nbits > 8 { // Avoid underflow w.nbits -= 8 } else { w.nbits = 0 } n++ } w.bits = 0 _, w.err = w.w.Write(w.bytes[0:n]) w.nbytes = 0 } func (w *huffmanBitWriter) writeBits(b, nb int32) { w.bits |= uint64(b) << w.nbits w.nbits += uint32(nb) if w.nbits >= 48 { bits := w.bits w.bits >>= 48 w.nbits -= 48 n := w.nbytes w.bytes[n] = byte(bits) w.bytes[n+1] = byte(bits >> 8) w.bytes[n+2] = byte(bits >> 16) w.bytes[n+3] = byte(bits >> 24) w.bytes[n+4] = byte(bits >> 32) w.bytes[n+5] = byte(bits >> 40) n += 6 if n >= bufferSize-8 { _, w.err = w.w.Write(w.bytes[:bufferSize-8]) n = 0 } w.nbytes = n } } func (w *huffmanBitWriter) writeBytes(bytes []byte) { if w.err != nil { return } n := w.nbytes for w.nbits != 0 { w.bytes[n] = byte(w.bits) w.bits >>= 8 w.nbits -= 8 n++ } if w.nbits != 0 { w.err = InternalError("writeBytes with unfinished bits") return } if n != 0 { _, w.err = w.w.Write(w.bytes[0:n]) if w.err != nil { return } } w.nbytes = 0 _, w.err = w.w.Write(bytes) } // RFC 1951 3.2.7 specifies a special run-length encoding for specifying // the literal and offset lengths arrays (which are concatenated into a single // array). This method generates that run-length encoding. // // The result is written into the codegen array, and the frequencies // of each code is written into the codegenFreq array. // Codes 0-15 are single byte codes. Codes 16-18 are followed by additional // information. Code badCode is an end marker // // numLiterals The number of literals in literalEncoding // numOffsets The number of offsets in offsetEncoding func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int) { for i := range w.codegenFreq { w.codegenFreq[i] = 0 } // Note that we are using codegen both as a temporary variable for holding // a copy of the frequencies, and as the place where we put the result. // This is fine because the output is always shorter than the input used // so far. codegen := w.codegen // cache // Copy the concatenated code sizes to codegen. Put a marker at the end. //copy(codegen[0:numLiterals], w.literalEncoding.codeBits) cgnl := codegen[0:numLiterals] for i := range cgnl { cgnl[i] = w.literalEncoding.codes[i].bits() } //copy(codegen[numLiterals:numLiterals+numOffsets], w.offsetEncoding.codeBits) cgnl = codegen[numLiterals : numLiterals+numOffsets] for i := range cgnl { cgnl[i] = w.offsetEncoding.codes[i].bits() } codegen[numLiterals+numOffsets] = badCode size := codegen[0] count := 1 outIndex := 0 for inIndex := 1; size != badCode; inIndex++ { // INVARIANT: We have seen "count" copies of size that have not yet // had output generated for them. nextSize := codegen[inIndex] if nextSize == size { count++ continue } // We need to generate codegen indicating "count" of size. if size != 0 { codegen[outIndex] = size outIndex++ w.codegenFreq[size]++ count-- for count >= 3 { n := 6 if n > count { n = count } codegen[outIndex] = 16 outIndex++ codegen[outIndex] = uint8(n - 3) outIndex++ w.codegenFreq[16]++ count -= n } } else { for count >= 11 { n := 138 if n > count { n = count } codegen[outIndex] = 18 outIndex++ codegen[outIndex] = uint8(n - 11) outIndex++ w.codegenFreq[18]++ count -= n } if count >= 3 { // count >= 3 && count <= 10 codegen[outIndex] = 17 outIndex++ codegen[outIndex] = uint8(count - 3) outIndex++ w.codegenFreq[17]++ count = 0 } } count-- for ; count >= 0; count-- { codegen[outIndex] = size outIndex++ w.codegenFreq[size]++ } // Set up invariant for next time through the loop. size = nextSize count = 1 } // Marker indicating the end of the codegen. codegen[outIndex] = badCode } /* non-inlined: func (w *huffmanBitWriter) writeCode(code *huffmanEncoder, literal uint32) { if w.err != nil { return } c := code.codes[literal] w.writeBits(int32(c.code()), int32(c.bits())) } */ func (w *huffmanBitWriter) writeCode(code *huffmanEncoder, literal uint32) { if w.err != nil { return } c := code.codes[literal] w.bits |= uint64(c.code()) << w.nbits w.nbits += uint32(c.bits()) if w.nbits >= 48 { bits := w.bits w.bits >>= 48 w.nbits -= 48 n := w.nbytes w.bytes[n] = byte(bits) w.bytes[n+1] = byte(bits >> 8) w.bytes[n+2] = byte(bits >> 16) w.bytes[n+3] = byte(bits >> 24) w.bytes[n+4] = byte(bits >> 32) w.bytes[n+5] = byte(bits >> 40) n += 6 if n >= bufferSize-8 { _, w.err = w.w.Write(w.bytes[:bufferSize-8]) n = 0 } w.nbytes = n } } // Write the header of a dynamic Huffman block to the output stream. // // numLiterals The number of literals specified in codegen // numOffsets The number of offsets specified in codegen // numCodegens The number of codegens used in codegen func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { if w.err != nil { return } var firstBits int32 = 4 if isEof { firstBits = 5 } w.writeBits(firstBits, 3) w.writeBits(int32(numLiterals-257), 5) w.writeBits(int32(numOffsets-1), 5) w.writeBits(int32(numCodegens-4), 4) for i := 0; i < numCodegens; i++ { //value := w.codegenEncoding.codeBits[codegenOrder[i]] value := w.codegenEncoding.codes[codegenOrder[i]].bits() w.writeBits(int32(value), 3) } i := 0 for { var codeWord int = int(w.codegen[i]) i++ if codeWord == badCode { break } // The low byte contains the actual code to generate. w.writeCode(w.codegenEncoding, uint32(codeWord)) switch codeWord { case 16: w.writeBits(int32(w.codegen[i]), 2) i++ break case 17: w.writeBits(int32(w.codegen[i]), 3) i++ break case 18: w.writeBits(int32(w.codegen[i]), 7) i++ break } } } func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { if w.err != nil { return } var flag int32 if isEof { flag = 1 } w.writeBits(flag, 3) w.flush() w.writeBits(int32(length), 16) w.writeBits(int32(^uint16(length)), 16) } func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { if w.err != nil { return } // Indicate that we are a fixed Huffman block var value int32 = 2 if isEof { value = 3 } w.writeBits(value, 3) } func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { if w.err != nil { return } copy(w.literalFreq, zeroLits[:]) for i := range w.offsetFreq { w.offsetFreq[i] = 0 } n := len(tokens) tokens = tokens[0 : n+1] tokens[n] = endBlockMarker for _, t := range tokens { switch t.typ() { case literalType: w.literalFreq[t.literal()]++ case matchType: length := t.length() offset := t.offset() w.literalFreq[lengthCodesStart+lengthCode(length)]++ w.offsetFreq[offsetCode(offset)]++ } } // get the number of literals numLiterals := len(w.literalFreq) for w.literalFreq[numLiterals-1] == 0 { numLiterals-- } // get the number of offsets numOffsets := len(w.offsetFreq) for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { numOffsets-- } if numOffsets == 0 { // We haven't found a single match. If we want to go with the dynamic encoding, // we should count at least one offset to be sure that the offset huffman tree could be encoded. w.offsetFreq[0] = 1 numOffsets = 1 } w.literalEncoding.generate(w.literalFreq, 15) w.offsetEncoding.generate(w.offsetFreq, 15) storedBytes := 0 if input != nil { storedBytes = len(input) } var extraBits int64 var storedSize int64 = math.MaxInt64 if storedBytes <= maxStoreBlockSize && input != nil { storedSize = int64((storedBytes + 5) * 8) // We only bother calculating the costs of the extra bits required by // the length of offset fields (which will be the same for both fixed // and dynamic encoding), if we need to compare those two encodings // against stored encoding. for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { // First eight length codes have extra size = 0. extraBits += int64(w.literalFreq[lengthCode]) * int64(lengthExtraBits[lengthCode-lengthCodesStart]) } for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { // First four offset codes have extra size = 0. extraBits += int64(w.offsetFreq[offsetCode]) * int64(offsetExtraBits[offsetCode]) } } // Figure out smallest code. // Fixed Huffman baseline. var size = int64(3) + fixedLiteralEncoding.bitLength(w.literalFreq) + fixedOffsetEncoding.bitLength(w.offsetFreq) + extraBits var literalEncoding = fixedLiteralEncoding var offsetEncoding = fixedOffsetEncoding // Dynamic Huffman? var numCodegens int // Generate codegen and codegenFrequencies, which indicates how to encode // the literalEncoding and the offsetEncoding. w.generateCodegen(numLiterals, numOffsets) w.codegenEncoding.generate(w.codegenFreq, 7) numCodegens = len(w.codegenFreq) for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { numCodegens-- } dynamicHeader := int64(3+5+5+4+(3*numCodegens)) + w.codegenEncoding.bitLength(w.codegenFreq) + int64(extraBits) + int64(w.codegenFreq[16]*2) + int64(w.codegenFreq[17]*3) + int64(w.codegenFreq[18]*7) dynamicSize := dynamicHeader + w.literalEncoding.bitLength(w.literalFreq) + w.offsetEncoding.bitLength(w.offsetFreq) if dynamicSize < size { size = dynamicSize literalEncoding = w.literalEncoding offsetEncoding = w.offsetEncoding } // Stored bytes? if storedSize < size { w.writeStoredHeader(storedBytes, eof) w.writeBytes(input[0:storedBytes]) return } // Huffman. if literalEncoding == fixedLiteralEncoding { w.writeFixedHeader(eof) } else { w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) } for _, t := range tokens { switch t.typ() { case literalType: w.writeCode(literalEncoding, t.literal()) break case matchType: // Write the length length := t.length() lengthCode := lengthCode(length) w.writeCode(literalEncoding, lengthCode+lengthCodesStart) extraLengthBits := int32(lengthExtraBits[lengthCode]) if extraLengthBits > 0 { extraLength := int32(length - lengthBase[lengthCode]) w.writeBits(extraLength, extraLengthBits) } // Write the offset offset := t.offset() offsetCode := offsetCode(offset) w.writeCode(offsetEncoding, offsetCode) extraOffsetBits := int32(offsetExtraBits[offsetCode]) if extraOffsetBits > 0 { extraOffset := int32(offset - offsetBase[offsetCode]) w.writeBits(extraOffset, extraOffsetBits) } break default: panic("unknown token type: " + string(t)) } } } var huffOffset *huffmanEncoder var zeroLits [maxNumLit]int32 func init() { var w = newHuffmanBitWriter(nil) w.offsetFreq[0] = 1 w.offsetEncoding = newHuffmanEncoder(offsetCodeCount) w.offsetEncoding.generate(w.offsetFreq, 15) huffOffset = w.offsetEncoding } func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) { if w.err != nil { return } copy(w.literalFreq, zeroLits[:]) for _, t := range input { w.literalFreq[t]++ } w.literalFreq[endBlockMarker]++ // get the number of literals numLiterals := len(w.literalFreq) for w.literalFreq[numLiterals-1] == 0 { numLiterals-- } // We haven't found a single match. If we want to go with the dynamic encoding, // we should count at least one offset to be sure that the offset huffman tree could be encoded. numOffsets := 1 w.literalEncoding.generate(w.literalFreq, 15) w.offsetEncoding = huffOffset storedBytes := len(input) var extraBits int64 var storedSize int64 = math.MaxInt64 if storedBytes <= maxStoreBlockSize { storedSize = int64((storedBytes + 5) * 8) // We only bother calculating the costs of the extra bits required by // the length of offset fields (which will be the same for both fixed // and dynamic encoding), if we need to compare those two encodings // against stored encoding. for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { // First eight length codes have extra size = 0. extraBits += int64(w.literalFreq[lengthCode]) * int64(lengthExtraBits[lengthCode-lengthCodesStart]) } for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { // First four offset codes have extra size = 0. extraBits += int64(w.offsetFreq[offsetCode]) * int64(offsetExtraBits[offsetCode]) } } // Figure out smallest code. // Always use dynamic Huffman or Store var numCodegens int // Generate codegen and codegenFrequencies, which indicates how to encode // the literalEncoding and the offsetEncoding. w.generateCodegen(numLiterals, numOffsets) w.codegenEncoding.generate(w.codegenFreq, 7) numCodegens = len(w.codegenFreq) for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { numCodegens-- } dynamicHeader := int64(3+5+5+4+(3*numCodegens)) + w.codegenEncoding.bitLength(w.codegenFreq) + int64(extraBits) + int64(w.codegenFreq[16]*2) + int64(w.codegenFreq[17]*3) + int64(w.codegenFreq[18]*7) size := dynamicHeader + w.literalEncoding.bitLength(w.literalFreq) + 1 /*w.offsetEncoding.bitLength(w.offsetFreq)*/ // Stored bytes? if storedSize < size { w.writeStoredHeader(storedBytes, eof) w.writeBytes(input[0:storedBytes]) return } // Huffman. w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) for _, t := range input { c := w.literalEncoding.codes[t] w.bits |= uint64(c.code()) << w.nbits w.nbits += uint32(c.bits()) if w.nbits >= 48 { bits := w.bits w.bits >>= 48 w.nbits -= 48 n := w.nbytes w.bytes[n] = byte(bits) w.bytes[n+1] = byte(bits >> 8) w.bytes[n+2] = byte(bits >> 16) w.bytes[n+3] = byte(bits >> 24) w.bytes[n+4] = byte(bits >> 32) w.bytes[n+5] = byte(bits >> 40) n += 6 if n >= bufferSize-8 { _, w.err = w.w.Write(w.bytes[:bufferSize-8]) n = 0 } w.nbytes = n } } // Write EOB w.writeCode(w.literalEncoding, endBlockMarker) }
package service import ( "fmt" "net" "os" "github.com/codegangsta/cli" "github.com/keybase/client/go/libcmdline" "github.com/keybase/client/go/libkb" keybase1 "github.com/keybase/client/protocol/go" "github.com/maxtaco/go-framed-msgpack-rpc/rpc2" ) // Keep this around to simplify things var G = libkb.G type Service struct { chdirTo string lockPid *libkb.LockPIDFile } func NewService(d bool) *Service { return &Service{} } func RegisterProtocols(srv *rpc2.Server, xp *rpc2.Transport) { srv.Register(keybase1.BTCProtocol(NewBTCHandler(xp))) srv.Register(keybase1.ConfigProtocol(ConfigHandler{xp})) srv.Register(keybase1.CtlProtocol(CtlHandler{})) srv.Register(keybase1.DeviceProtocol(NewDeviceHandler(xp))) srv.Register(keybase1.DoctorProtocol(NewDoctorHandler(xp))) srv.Register(keybase1.IdentifyProtocol(NewIdentifyHandler(xp))) srv.Register(keybase1.LoginProtocol(NewLoginHandler(xp))) srv.Register(keybase1.ProveProtocol(NewProveHandler(xp))) srv.Register(keybase1.SessionProtocol(NewSessionHandler(xp))) srv.Register(keybase1.SignupProtocol(NewSignupHandler(xp))) srv.Register(keybase1.SigsProtocol(NewSigsHandler(xp))) srv.Register(keybase1.PgpProtocol(NewPGPHandler(xp))) srv.Register(keybase1.RevokeProtocol(NewRevokeHandler(xp))) srv.Register(keybase1.TrackProtocol(NewTrackHandler(xp))) srv.Register(keybase1.UserProtocol(NewUserHandler(xp))) } func (d *Service) Handle(c net.Conn) { xp := rpc2.NewTransport(c, libkb.NewRpcLogFactory(), libkb.WrapError) server := rpc2.NewServer(xp, libkb.WrapError) RegisterProtocols(server, xp) server.Run(true) } func (d *Service) RunClient() (err error) { return fmt.Errorf("can't run service in client mode") } func (d *Service) Run() (err error) { G.Service = true if len(d.chdirTo) != 0 { e_tmp := os.Chdir(d.chdirTo) if e_tmp != nil { G.Log.Warning("Could not change directory to %s: %s", d.chdirTo, e_tmp.Error()) } else { G.Log.Info("Changing runtime dir to %s", d.chdirTo) } } if err = d.GetExclusiveLock(); err != nil { return } if err = d.OpenSocket(); err != nil { return } if err = d.ConfigRpcServer(); err != nil { return } if err = d.ListenLoop(); err != nil { return } return } func (d *Service) ReleaseLock() error { return d.lockPid.Close() } func (d *Service) GetExclusiveLock() error { dir, err := G.Env.GetRuntimeDir() if err != nil { return err } if err = os.MkdirAll(dir, libkb.PERM_DIR); err != nil { return err } if err := d.lockPIDFile(); err != nil { return err } return nil } func (d *Service) OpenSocket() error { sf, err := G.Env.GetSocketFile() if err != nil { return err } if exists, err := libkb.FileExists(sf); err != nil { return err } else if exists { G.Log.Debug("removing stale socket file: %s", sf) if err = os.Remove(sf); err != nil { G.Log.Warning("error removing stale socket file: %s", err) return err } } return nil } func (d *Service) lockPIDFile() (err error) { var fn string if fn, err = G.Env.GetPidFile(); err != nil { return } d.lockPid = libkb.NewLockPIDFile(fn) if err = d.lockPid.Lock(); err != nil { return fmt.Errorf("error locking %s: server already running", fn) } G.Log.Debug("Locking pidfile %s\n", fn) return nil } func (d *Service) ConfigRpcServer() (err error) { return nil } func (d *Service) ListenLoop() (err error) { var l net.Listener if l, err = G.BindToSocket(); err != nil { return } G.PushShutdownHook(func() error { G.Log.Info("Closing socket") d.lockPid.Close() return l.Close() }) for { var c net.Conn if c, err = l.Accept(); err != nil { return } go d.Handle(c) } } func (d *Service) ParseArgv(ctx *cli.Context) error { d.chdirTo = ctx.String("chdir") return nil } func NewCmdService(cl *libcmdline.CommandLine) cli.Command { return cli.Command{ Name: "service", Usage: "keybase service [--chdir <dir>]", Description: "run the keybase local service", Flags: []cli.Flag{ cli.StringFlag{ Name: "chdir", Usage: "specify where to run as a daemon (via chdir)", }, }, Action: func(c *cli.Context) { cl.ChooseCommand(&Service{}, "service", c) cl.SetService() }, } } func (d *Service) GetUsage() libkb.Usage { return libkb.Usage{ Config: true, KbKeyring: true, GpgKeyring: true, API: true, Socket: true, } } func GetCommands(cl *libcmdline.CommandLine) []cli.Command { return []cli.Command{ NewCmdService(cl), } } write .cache/keybase/service.version on service startup This provides an easy way for updated clients to find out what version of the service is currently running, so that they can bounce it if need be. Closes https://github.com/keybase/client/issues/415. package service import ( "fmt" "io/ioutil" "net" "os" "path" "github.com/codegangsta/cli" "github.com/keybase/client/go/libcmdline" "github.com/keybase/client/go/libkb" keybase1 "github.com/keybase/client/protocol/go" "github.com/maxtaco/go-framed-msgpack-rpc/rpc2" ) // Keep this around to simplify things var G = libkb.G type Service struct { chdirTo string lockPid *libkb.LockPIDFile } func NewService(d bool) *Service { return &Service{} } func RegisterProtocols(srv *rpc2.Server, xp *rpc2.Transport) { srv.Register(keybase1.BTCProtocol(NewBTCHandler(xp))) srv.Register(keybase1.ConfigProtocol(ConfigHandler{xp})) srv.Register(keybase1.CtlProtocol(CtlHandler{})) srv.Register(keybase1.DeviceProtocol(NewDeviceHandler(xp))) srv.Register(keybase1.DoctorProtocol(NewDoctorHandler(xp))) srv.Register(keybase1.IdentifyProtocol(NewIdentifyHandler(xp))) srv.Register(keybase1.LoginProtocol(NewLoginHandler(xp))) srv.Register(keybase1.ProveProtocol(NewProveHandler(xp))) srv.Register(keybase1.SessionProtocol(NewSessionHandler(xp))) srv.Register(keybase1.SignupProtocol(NewSignupHandler(xp))) srv.Register(keybase1.SigsProtocol(NewSigsHandler(xp))) srv.Register(keybase1.PgpProtocol(NewPGPHandler(xp))) srv.Register(keybase1.RevokeProtocol(NewRevokeHandler(xp))) srv.Register(keybase1.TrackProtocol(NewTrackHandler(xp))) srv.Register(keybase1.UserProtocol(NewUserHandler(xp))) } func (d *Service) Handle(c net.Conn) { xp := rpc2.NewTransport(c, libkb.NewRpcLogFactory(), libkb.WrapError) server := rpc2.NewServer(xp, libkb.WrapError) RegisterProtocols(server, xp) server.Run(true) } func (d *Service) RunClient() (err error) { return fmt.Errorf("can't run service in client mode") } func (d *Service) Run() (err error) { G.Service = true err = d.writeVersionFile() if err != nil { return } if len(d.chdirTo) != 0 { e_tmp := os.Chdir(d.chdirTo) if e_tmp != nil { G.Log.Warning("Could not change directory to %s: %s", d.chdirTo, e_tmp.Error()) } else { G.Log.Info("Changing runtime dir to %s", d.chdirTo) } } if err = d.GetExclusiveLock(); err != nil { return } if err = d.OpenSocket(); err != nil { return } if err = d.ConfigRpcServer(); err != nil { return } if err = d.ListenLoop(); err != nil { return } return } // If the daemon is already running, we need to be able to check what version // it is, in case the client has been updated. func (d *Service) writeVersionFile() error { versionFilePath := path.Join(G.Env.GetCacheDir(), "service.version") return ioutil.WriteFile(versionFilePath, []byte(libkb.CLIENT_VERSION), 0644) } func (d *Service) ReleaseLock() error { return d.lockPid.Close() } func (d *Service) GetExclusiveLock() error { dir, err := G.Env.GetRuntimeDir() if err != nil { return err } if err = os.MkdirAll(dir, libkb.PERM_DIR); err != nil { return err } if err := d.lockPIDFile(); err != nil { return err } return nil } func (d *Service) OpenSocket() error { sf, err := G.Env.GetSocketFile() if err != nil { return err } if exists, err := libkb.FileExists(sf); err != nil { return err } else if exists { G.Log.Debug("removing stale socket file: %s", sf) if err = os.Remove(sf); err != nil { G.Log.Warning("error removing stale socket file: %s", err) return err } } return nil } func (d *Service) lockPIDFile() (err error) { var fn string if fn, err = G.Env.GetPidFile(); err != nil { return } d.lockPid = libkb.NewLockPIDFile(fn) if err = d.lockPid.Lock(); err != nil { return fmt.Errorf("error locking %s: server already running", fn) } G.Log.Debug("Locking pidfile %s\n", fn) return nil } func (d *Service) ConfigRpcServer() (err error) { return nil } func (d *Service) ListenLoop() (err error) { var l net.Listener if l, err = G.BindToSocket(); err != nil { return } G.PushShutdownHook(func() error { G.Log.Info("Closing socket") d.lockPid.Close() return l.Close() }) for { var c net.Conn if c, err = l.Accept(); err != nil { return } go d.Handle(c) } } func (d *Service) ParseArgv(ctx *cli.Context) error { d.chdirTo = ctx.String("chdir") return nil } func NewCmdService(cl *libcmdline.CommandLine) cli.Command { return cli.Command{ Name: "service", Usage: "keybase service [--chdir <dir>]", Description: "run the keybase local service", Flags: []cli.Flag{ cli.StringFlag{ Name: "chdir", Usage: "specify where to run as a daemon (via chdir)", }, }, Action: func(c *cli.Context) { cl.ChooseCommand(&Service{}, "service", c) cl.SetService() }, } } func (d *Service) GetUsage() libkb.Usage { return libkb.Usage{ Config: true, KbKeyring: true, GpgKeyring: true, API: true, Socket: true, } } func GetCommands(cl *libcmdline.CommandLine) []cli.Command { return []cli.Command{ NewCmdService(cl), } }
package goiostat_test import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "testing" ) func TestGoiostat(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Goiostat Suite") } breaks build thinks it has 2 main... figure that out later
package drivers import ( "text/template" ) // Base config. This is common for all VMs and has no variables in it. var qemuBase = template.Must(template.New("qemuBase").Parse(` # Machine [machine] graphics = "off" {{if eq .architecture "x86_64" -}} type = "q35" {{end -}} {{if eq .architecture "aarch64" -}} type = "virt" gic-version = "max" {{end -}} {{if eq .architecture "ppc64le" -}} type = "pseries" cap-large-decr = "off" {{end -}} {{if eq .architecture "s390x" -}} type = "s390-ccw-virtio" {{end -}} accel = "kvm" usb = "off" {{if eq .architecture "x86_64" -}} [global] driver = "ICH9-LPC" property = "disable_s3" value = "1" [global] driver = "ICH9-LPC" property = "disable_s4" value = "1" {{end -}} [boot-opts] strict = "on" `)) var qemuMemory = template.Must(template.New("qemuMemory").Parse(` # Memory [memory] size = "{{.memSizeBytes}}M" `)) var qemuSerial = template.Must(template.New("qemuSerial").Parse(` # Virtual serial bus [device "dev-qemu_serial"] {{- if eq .bus "pci" "pcie"}} driver = "virtio-serial-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "virtio-serial-ccw" {{- end}} {{if .multifunction -}} multifunction = "on" {{- end }} # LXD serial identifier [chardev "{{.chardevName}}"] backend = "ringbuf" size = "{{.ringbufSizeBytes}}B" [device "qemu_serial"] driver = "virtserialport" name = "org.linuxcontainers.lxd" chardev = "{{.chardevName}}" bus = "dev-qemu_serial.0" # Spice agent [chardev "qemu_spice-chardev"] backend = "spicevmc" name = "vdagent" [device "qemu_spice"] driver = "virtserialport" name = "com.redhat.spice.0" chardev = "qemu_spice-chardev" bus = "dev-qemu_serial.0" # Spice folder [chardev "qemu_spicedir-chardev"] backend = "spiceport" name = "org.spice-space.webdav.0" [device "qemu_spicedir"] driver = "virtserialport" name = "org.spice-space.webdav.0" chardev = "qemu_spicedir-chardev" bus = "dev-qemu_serial.0" `)) var qemuPCIe = template.Must(template.New("qemuPCIe").Parse(` [device "{{.portName}}"] driver = "pcie-root-port" bus = "pcie.0" addr = "{{.addr}}" chassis = "{{.index}}" {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuSCSI = template.Must(template.New("qemuSCSI").Parse(` # SCSI controller [device "qemu_scsi"] {{- if eq .bus "pci" "pcie"}} driver = "virtio-scsi-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "virtio-scsi-ccw" {{- end}} {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuBalloon = template.Must(template.New("qemuBalloon").Parse(` # Balloon driver [device "qemu_balloon"] {{- if eq .bus "pci" "pcie"}} driver = "virtio-balloon-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "virtio-balloon-ccw" {{- end}} {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuRNG = template.Must(template.New("qemuRNG").Parse(` # Random number generator [object "qemu_rng"] qom-type = "rng-random" filename = "/dev/urandom" [device "dev-qemu_rng"] {{- if eq .bus "pci" "pcie"}} driver = "virtio-rng-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "virtio-rng-ccw" {{- end}} rng = "qemu_rng" {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuVsock = template.Must(template.New("qemuVsock").Parse(` # Vsock [device "qemu_vsock"] {{- if eq .bus "pci" "pcie"}} driver = "vhost-vsock-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "vhost-vsock-ccw" {{- end}} guest-cid = "{{.vsockID}}" {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuGPU = template.Must(template.New("qemuGPU").Parse(` # GPU [device "qemu_gpu"] {{- if eq .bus "pci" "pcie"}} {{if eq .architecture "x86_64" -}} driver = "virtio-vga" {{- else}} driver = "virtio-gpu-pci" {{- end}} bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "virtio-gpu-ccw" {{- end}} {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuKeyboard = template.Must(template.New("qemuKeyboard").Parse(` # Input [device "qemu_keyboard"] {{- if eq .bus "pci" "pcie"}} driver = "virtio-keyboard-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "virtio-keyboard-ccw" {{- end}} {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuTablet = template.Must(template.New("qemuTablet").Parse(` # Input [device "qemu_tablet"] {{- if eq .bus "pci" "pcie"}} driver = "virtio-tablet-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "virtio-tablet-ccw" {{- end}} {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuCPU = template.Must(template.New("qemuCPU").Parse(` # CPU [smp-opts] cpus = "{{.cpuCount}}" sockets = "{{.cpuSockets}}" cores = "{{.cpuCores}}" threads = "{{.cpuThreads}}" {{if eq .architecture "x86_64" -}} {{$memory := .memory -}} {{$hugepages := .hugepages -}} {{if .cpuNumaHostNodes -}} {{range $index, $element := .cpuNumaHostNodes}} [object "mem{{$index}}"] {{if ne $hugepages "" -}} qom-type = "memory-backend-file" mem-path = "{{$hugepages}}" prealloc = "on" discard-data = "on" share = "on" {{- else}} qom-type = "memory-backend-memfd" {{- end }} size = "{{$memory}}M" policy = "bind" {{- if eq $.qemuMemObjectFormat "indexed"}} host-nodes.0 = "{{$element}}" {{- else}} host-nodes = "{{$element}}" {{- end}} [numa] type = "node" nodeid = "{{$index}}" memdev = "mem{{$index}}" {{end}} {{else}} [object "mem0"] {{if ne $hugepages "" -}} qom-type = "memory-backend-file" mem-path = "{{$hugepages}}" prealloc = "on" discard-data = "on" {{- else}} qom-type = "memory-backend-memfd" {{- end }} size = "{{$memory}}M" share = "on" [numa] type = "node" nodeid = "0" memdev = "mem0" {{end}} {{range .cpuNumaMapping}} [numa] type = "cpu" node-id = "{{.node}}" socket-id = "{{.socket}}" core-id = "{{.core}}" thread-id = "{{.thread}}" {{end}} {{end}} `)) var qemuControlSocket = template.Must(template.New("qemuControlSocket").Parse(` # Qemu control [chardev "monitor"] backend = "socket" path = "{{.path}}" server = "on" wait = "off" [mon] chardev = "monitor" mode = "control" `)) var qemuConsole = template.Must(template.New("qemuConsole").Parse(` # Console [chardev "console"] backend = "socket" path = "{{.path}}" server = "on" wait = "off" `)) var qemuDriveFirmware = template.Must(template.New("qemuDriveFirmware").Parse(` {{if eq .architecture "x86_64" "aarch64" -}} # Firmware (read only) [drive] file = "{{.roPath}}" if = "pflash" format = "raw" unit = "0" readonly = "on" # Firmware settings (writable) [drive] file = "{{.nvramPath}}" if = "pflash" format = "raw" unit = "1" {{- end }} `)) // Devices use "qemu_" prefix indicating that this is a internally named device. var qemuDriveConfig = template.Must(template.New("qemuDriveConfig").Parse(` # Config drive ({{.protocol}}) {{- if eq .protocol "9p" }} [fsdev "qemu_config"] fsdriver = "local" security_model = "none" readonly = "on" path = "{{.path}}" {{- else if eq .protocol "virtio-fs" }} [chardev "qemu_config"] backend = "socket" path = "{{.path}}" {{- end }} [device "dev-qemu_config-drive-{{.protocol}}"] {{- if eq .bus "pci" "pcie"}} {{- if eq .protocol "9p" }} driver = "virtio-9p-pci" {{- else if eq .protocol "virtio-fs" }} driver = "vhost-user-fs-pci" {{- end }} bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{- if eq .bus "ccw" }} {{- if eq .protocol "9p" }} driver = "virtio-9p-ccw" {{- else if eq .protocol "virtio-fs" }} driver = "vhost-user-fs-ccw" {{- end }} {{- end}} {{- if eq .protocol "9p" }} mount_tag = "config" fsdev = "qemu_config" {{- else if eq .protocol "virtio-fs" }} chardev = "qemu_config" tag = "config" {{- end }} {{if .multifunction -}} multifunction = "on" {{- end }} `)) // Devices use "lxd_" prefix indicating that this is a user named device. var qemuDriveDir = template.Must(template.New("qemuDriveDir").Parse(` # {{.devName}} drive ({{.protocol}}) {{- if eq .protocol "9p" }} [fsdev "lxd_{{.devName}}"] fsdriver = "proxy" sock_fd = "{{.proxyFD}}" {{- if .readonly}} readonly = "on" {{- else}} readonly = "off" {{- end}} {{- else if eq .protocol "virtio-fs" }} [chardev "lxd_{{.devName}}"] backend = "socket" path = "{{.path}}" {{- end }} [device "dev-lxd_{{.devName}}-{{.protocol}}"] {{- if eq .bus "pci" "pcie"}} {{- if eq .protocol "9p" }} driver = "virtio-9p-pci" {{- else if eq .protocol "virtio-fs" }} driver = "vhost-user-fs-pci" {{- end }} bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end -}} {{if eq .bus "ccw" -}} {{- if eq .protocol "9p" }} driver = "virtio-9p-ccw" {{- else if eq .protocol "virtio-fs" }} driver = "vhost-user-fs-ccw" {{- end }} {{- end}} {{- if eq .protocol "9p" }} fsdev = "lxd_{{.devName}}" mount_tag = "{{.mountTag}}" {{- else if eq .protocol "virtio-fs" }} chardev = "lxd_{{.devName}}" tag = "{{.mountTag}}" {{- end }} {{if .multifunction -}} multifunction = "on" {{- end }} `)) // Devices use "lxd_" prefix indicating that this is a user named device. var qemuPCIPhysical = template.Must(template.New("qemuPCIPhysical").Parse(` # PCI card ("{{.devName}}" device) [device "dev-lxd_{{.devName}}"] {{- if eq .bus "pci" "pcie"}} driver = "vfio-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "vfio-ccw" {{- end}} host = "{{.pciSlotName}}" {{if .bootIndex -}} bootindex = "{{.bootIndex}}" {{- end }} {{if .multifunction -}} multifunction = "on" {{- end }} `)) // Devices use "lxd_" prefix indicating that this is a user named device. var qemuGPUDevPhysical = template.Must(template.New("qemuGPUDevPhysical").Parse(` # GPU card ("{{.devName}}" device) [device "dev-lxd_{{.devName}}"] {{- if eq .bus "pci" "pcie"}} driver = "vfio-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "vfio-ccw" {{- end}} {{- if ne .vgpu "" -}} sysfsdev = "/sys/bus/mdev/devices/{{.vgpu}}" {{- else}} host = "{{.pciSlotName}}" {{if .vga -}} x-vga = "on" {{- end }} {{- end }} {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuUSB = template.Must(template.New("qemuUSB").Parse(` # USB controller [device "qemu_usb"] driver = "qemu-xhci" bus = "{{.devBus}}" addr = "{{.devAddr}}" p2 = "{{.ports}}" p3 = "{{.ports}}" {{if .multifunction -}} multifunction = "on" {{- end }} [chardev "qemu_spice-usb-chardev1"] backend = "spicevmc" name = "usbredir" [chardev "qemu_spice-usb-chardev2"] backend = "spicevmc" name = "usbredir" [chardev "qemu_spice-usb-chardev3"] backend = "spicevmc" name = "usbredir" [device "qemu_spice-usb1"] driver = "usb-redir" chardev = "qemu_spice-usb-chardev1" [device "qemu_spice-usb2"] driver = "usb-redir" chardev = "qemu_spice-usb-chardev2" [device "qemu_spice-usb3"] driver = "usb-redir" chardev = "qemu_spice-usb-chardev3" `)) var qemuTPM = template.Must(template.New("qemuTPM").Parse(` [chardev "qemu_tpm-chardev_{{.devName}}"] backend = "socket" path = "{{.path}}" [tpmdev "qemu_tpm-tpmdev_{{.devName}}"] type = "emulator" chardev = "qemu_tpm-chardev_{{.devName}}" [device "dev-lxd_{{.devName}}"] driver = "tpm-crb" tpmdev = "qemu_tpm-tpmdev_{{.devName}}" `)) lxd/instance/drivers/driver/qemu/templates: Removes duplicated arch check in qemuDriveFirmware This is done inside LXD now. Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com> package drivers import ( "text/template" ) // Base config. This is common for all VMs and has no variables in it. var qemuBase = template.Must(template.New("qemuBase").Parse(` # Machine [machine] graphics = "off" {{if eq .architecture "x86_64" -}} type = "q35" {{end -}} {{if eq .architecture "aarch64" -}} type = "virt" gic-version = "max" {{end -}} {{if eq .architecture "ppc64le" -}} type = "pseries" cap-large-decr = "off" {{end -}} {{if eq .architecture "s390x" -}} type = "s390-ccw-virtio" {{end -}} accel = "kvm" usb = "off" {{if eq .architecture "x86_64" -}} [global] driver = "ICH9-LPC" property = "disable_s3" value = "1" [global] driver = "ICH9-LPC" property = "disable_s4" value = "1" {{end -}} [boot-opts] strict = "on" `)) var qemuMemory = template.Must(template.New("qemuMemory").Parse(` # Memory [memory] size = "{{.memSizeBytes}}M" `)) var qemuSerial = template.Must(template.New("qemuSerial").Parse(` # Virtual serial bus [device "dev-qemu_serial"] {{- if eq .bus "pci" "pcie"}} driver = "virtio-serial-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "virtio-serial-ccw" {{- end}} {{if .multifunction -}} multifunction = "on" {{- end }} # LXD serial identifier [chardev "{{.chardevName}}"] backend = "ringbuf" size = "{{.ringbufSizeBytes}}B" [device "qemu_serial"] driver = "virtserialport" name = "org.linuxcontainers.lxd" chardev = "{{.chardevName}}" bus = "dev-qemu_serial.0" # Spice agent [chardev "qemu_spice-chardev"] backend = "spicevmc" name = "vdagent" [device "qemu_spice"] driver = "virtserialport" name = "com.redhat.spice.0" chardev = "qemu_spice-chardev" bus = "dev-qemu_serial.0" # Spice folder [chardev "qemu_spicedir-chardev"] backend = "spiceport" name = "org.spice-space.webdav.0" [device "qemu_spicedir"] driver = "virtserialport" name = "org.spice-space.webdav.0" chardev = "qemu_spicedir-chardev" bus = "dev-qemu_serial.0" `)) var qemuPCIe = template.Must(template.New("qemuPCIe").Parse(` [device "{{.portName}}"] driver = "pcie-root-port" bus = "pcie.0" addr = "{{.addr}}" chassis = "{{.index}}" {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuSCSI = template.Must(template.New("qemuSCSI").Parse(` # SCSI controller [device "qemu_scsi"] {{- if eq .bus "pci" "pcie"}} driver = "virtio-scsi-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "virtio-scsi-ccw" {{- end}} {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuBalloon = template.Must(template.New("qemuBalloon").Parse(` # Balloon driver [device "qemu_balloon"] {{- if eq .bus "pci" "pcie"}} driver = "virtio-balloon-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "virtio-balloon-ccw" {{- end}} {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuRNG = template.Must(template.New("qemuRNG").Parse(` # Random number generator [object "qemu_rng"] qom-type = "rng-random" filename = "/dev/urandom" [device "dev-qemu_rng"] {{- if eq .bus "pci" "pcie"}} driver = "virtio-rng-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "virtio-rng-ccw" {{- end}} rng = "qemu_rng" {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuVsock = template.Must(template.New("qemuVsock").Parse(` # Vsock [device "qemu_vsock"] {{- if eq .bus "pci" "pcie"}} driver = "vhost-vsock-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "vhost-vsock-ccw" {{- end}} guest-cid = "{{.vsockID}}" {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuGPU = template.Must(template.New("qemuGPU").Parse(` # GPU [device "qemu_gpu"] {{- if eq .bus "pci" "pcie"}} {{if eq .architecture "x86_64" -}} driver = "virtio-vga" {{- else}} driver = "virtio-gpu-pci" {{- end}} bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "virtio-gpu-ccw" {{- end}} {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuKeyboard = template.Must(template.New("qemuKeyboard").Parse(` # Input [device "qemu_keyboard"] {{- if eq .bus "pci" "pcie"}} driver = "virtio-keyboard-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "virtio-keyboard-ccw" {{- end}} {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuTablet = template.Must(template.New("qemuTablet").Parse(` # Input [device "qemu_tablet"] {{- if eq .bus "pci" "pcie"}} driver = "virtio-tablet-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "virtio-tablet-ccw" {{- end}} {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuCPU = template.Must(template.New("qemuCPU").Parse(` # CPU [smp-opts] cpus = "{{.cpuCount}}" sockets = "{{.cpuSockets}}" cores = "{{.cpuCores}}" threads = "{{.cpuThreads}}" {{if eq .architecture "x86_64" -}} {{$memory := .memory -}} {{$hugepages := .hugepages -}} {{if .cpuNumaHostNodes -}} {{range $index, $element := .cpuNumaHostNodes}} [object "mem{{$index}}"] {{if ne $hugepages "" -}} qom-type = "memory-backend-file" mem-path = "{{$hugepages}}" prealloc = "on" discard-data = "on" share = "on" {{- else}} qom-type = "memory-backend-memfd" {{- end }} size = "{{$memory}}M" policy = "bind" {{- if eq $.qemuMemObjectFormat "indexed"}} host-nodes.0 = "{{$element}}" {{- else}} host-nodes = "{{$element}}" {{- end}} [numa] type = "node" nodeid = "{{$index}}" memdev = "mem{{$index}}" {{end}} {{else}} [object "mem0"] {{if ne $hugepages "" -}} qom-type = "memory-backend-file" mem-path = "{{$hugepages}}" prealloc = "on" discard-data = "on" {{- else}} qom-type = "memory-backend-memfd" {{- end }} size = "{{$memory}}M" share = "on" [numa] type = "node" nodeid = "0" memdev = "mem0" {{end}} {{range .cpuNumaMapping}} [numa] type = "cpu" node-id = "{{.node}}" socket-id = "{{.socket}}" core-id = "{{.core}}" thread-id = "{{.thread}}" {{end}} {{end}} `)) var qemuControlSocket = template.Must(template.New("qemuControlSocket").Parse(` # Qemu control [chardev "monitor"] backend = "socket" path = "{{.path}}" server = "on" wait = "off" [mon] chardev = "monitor" mode = "control" `)) var qemuConsole = template.Must(template.New("qemuConsole").Parse(` # Console [chardev "console"] backend = "socket" path = "{{.path}}" server = "on" wait = "off" `)) var qemuDriveFirmware = template.Must(template.New("qemuDriveFirmware").Parse(` # Firmware (read only) [drive] file = "{{.roPath}}" if = "pflash" format = "raw" unit = "0" readonly = "on" # Firmware settings (writable) [drive] file = "{{.nvramPath}}" if = "pflash" format = "raw" unit = "1" `)) // Devices use "qemu_" prefix indicating that this is a internally named device. var qemuDriveConfig = template.Must(template.New("qemuDriveConfig").Parse(` # Config drive ({{.protocol}}) {{- if eq .protocol "9p" }} [fsdev "qemu_config"] fsdriver = "local" security_model = "none" readonly = "on" path = "{{.path}}" {{- else if eq .protocol "virtio-fs" }} [chardev "qemu_config"] backend = "socket" path = "{{.path}}" {{- end }} [device "dev-qemu_config-drive-{{.protocol}}"] {{- if eq .bus "pci" "pcie"}} {{- if eq .protocol "9p" }} driver = "virtio-9p-pci" {{- else if eq .protocol "virtio-fs" }} driver = "vhost-user-fs-pci" {{- end }} bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{- if eq .bus "ccw" }} {{- if eq .protocol "9p" }} driver = "virtio-9p-ccw" {{- else if eq .protocol "virtio-fs" }} driver = "vhost-user-fs-ccw" {{- end }} {{- end}} {{- if eq .protocol "9p" }} mount_tag = "config" fsdev = "qemu_config" {{- else if eq .protocol "virtio-fs" }} chardev = "qemu_config" tag = "config" {{- end }} {{if .multifunction -}} multifunction = "on" {{- end }} `)) // Devices use "lxd_" prefix indicating that this is a user named device. var qemuDriveDir = template.Must(template.New("qemuDriveDir").Parse(` # {{.devName}} drive ({{.protocol}}) {{- if eq .protocol "9p" }} [fsdev "lxd_{{.devName}}"] fsdriver = "proxy" sock_fd = "{{.proxyFD}}" {{- if .readonly}} readonly = "on" {{- else}} readonly = "off" {{- end}} {{- else if eq .protocol "virtio-fs" }} [chardev "lxd_{{.devName}}"] backend = "socket" path = "{{.path}}" {{- end }} [device "dev-lxd_{{.devName}}-{{.protocol}}"] {{- if eq .bus "pci" "pcie"}} {{- if eq .protocol "9p" }} driver = "virtio-9p-pci" {{- else if eq .protocol "virtio-fs" }} driver = "vhost-user-fs-pci" {{- end }} bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end -}} {{if eq .bus "ccw" -}} {{- if eq .protocol "9p" }} driver = "virtio-9p-ccw" {{- else if eq .protocol "virtio-fs" }} driver = "vhost-user-fs-ccw" {{- end }} {{- end}} {{- if eq .protocol "9p" }} fsdev = "lxd_{{.devName}}" mount_tag = "{{.mountTag}}" {{- else if eq .protocol "virtio-fs" }} chardev = "lxd_{{.devName}}" tag = "{{.mountTag}}" {{- end }} {{if .multifunction -}} multifunction = "on" {{- end }} `)) // Devices use "lxd_" prefix indicating that this is a user named device. var qemuPCIPhysical = template.Must(template.New("qemuPCIPhysical").Parse(` # PCI card ("{{.devName}}" device) [device "dev-lxd_{{.devName}}"] {{- if eq .bus "pci" "pcie"}} driver = "vfio-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "vfio-ccw" {{- end}} host = "{{.pciSlotName}}" {{if .bootIndex -}} bootindex = "{{.bootIndex}}" {{- end }} {{if .multifunction -}} multifunction = "on" {{- end }} `)) // Devices use "lxd_" prefix indicating that this is a user named device. var qemuGPUDevPhysical = template.Must(template.New("qemuGPUDevPhysical").Parse(` # GPU card ("{{.devName}}" device) [device "dev-lxd_{{.devName}}"] {{- if eq .bus "pci" "pcie"}} driver = "vfio-pci" bus = "{{.devBus}}" addr = "{{.devAddr}}" {{- end}} {{if eq .bus "ccw" -}} driver = "vfio-ccw" {{- end}} {{- if ne .vgpu "" -}} sysfsdev = "/sys/bus/mdev/devices/{{.vgpu}}" {{- else}} host = "{{.pciSlotName}}" {{if .vga -}} x-vga = "on" {{- end }} {{- end }} {{if .multifunction -}} multifunction = "on" {{- end }} `)) var qemuUSB = template.Must(template.New("qemuUSB").Parse(` # USB controller [device "qemu_usb"] driver = "qemu-xhci" bus = "{{.devBus}}" addr = "{{.devAddr}}" p2 = "{{.ports}}" p3 = "{{.ports}}" {{if .multifunction -}} multifunction = "on" {{- end }} [chardev "qemu_spice-usb-chardev1"] backend = "spicevmc" name = "usbredir" [chardev "qemu_spice-usb-chardev2"] backend = "spicevmc" name = "usbredir" [chardev "qemu_spice-usb-chardev3"] backend = "spicevmc" name = "usbredir" [device "qemu_spice-usb1"] driver = "usb-redir" chardev = "qemu_spice-usb-chardev1" [device "qemu_spice-usb2"] driver = "usb-redir" chardev = "qemu_spice-usb-chardev2" [device "qemu_spice-usb3"] driver = "usb-redir" chardev = "qemu_spice-usb-chardev3" `)) var qemuTPM = template.Must(template.New("qemuTPM").Parse(` [chardev "qemu_tpm-chardev_{{.devName}}"] backend = "socket" path = "{{.path}}" [tpmdev "qemu_tpm-tpmdev_{{.devName}}"] type = "emulator" chardev = "qemu_tpm-chardev_{{.devName}}" [device "dev-lxd_{{.devName}}"] driver = "tpm-crb" tpmdev = "qemu_tpm-tpmdev_{{.devName}}" `))
package gqt_test import ( "encoding/json" "fmt" "io" "io/ioutil" "net" "os" "os/exec" "path" "path/filepath" "strconv" "strings" "time" "code.cloudfoundry.org/garden" "code.cloudfoundry.org/guardian/gqt/cgrouper" "code.cloudfoundry.org/guardian/gqt/runner" . "code.cloudfoundry.org/guardian/matchers" . "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" "github.com/onsi/gomega/gbytes" "github.com/onsi/gomega/gexec" ) var _ = Describe("Creating a Container", func() { var ( client *runner.RunningGarden container garden.Container initialSockets int initialPipes int ) JustBeforeEach(func() { client = runner.Start(config) initialSockets = numOpenSockets(client.Pid) initialPipes = numPipes(client.Pid) }) AfterEach(func() { Expect(client.DestroyAndStop()).To(Succeed()) }) It("has the expected device list allowed", func() { var err error container, err = client.Create(garden.ContainerSpec{}) Expect(err).NotTo(HaveOccurred()) parentPath, err := cgrouper.GetCGroupPath(client.CgroupsRootPath(), "devices", strconv.Itoa(GinkgoParallelNode()), false) Expect(err).NotTo(HaveOccurred()) cgroupPath := filepath.Join(parentPath, container.Handle()) content := readFileString(filepath.Join(cgroupPath, "devices.list")) expectedAllowedDevices := []string{ "c 1:3 rwm", "c 5:0 rwm", "c 1:8 rwm", "c 1:9 rwm", "c 1:5 rwm", "c 1:7 rwm", "c 10:229 rwm", "c *:* m", "b *:* m", "c 5:1 rwm", "c 136:* rwm", "c 5:2 rwm", "c 10:200 rwm", } contentLines := strings.Split(strings.TrimSpace(content), "\n") Expect(contentLines).To(HaveLen(len(expectedAllowedDevices))) Expect(contentLines).To(ConsistOf(expectedAllowedDevices)) }) Context("when creating fails", func() { // cause Create to fail by specifying an invalid network CIDR address var containerSpec = garden.ContainerSpec{ Network: "not-a-valid-network", } It("returns a nice error rather than timing out", func() { _, err := client.Create(containerSpec) Expect(err).To(MatchError(ContainSubstring("invalid CIDR address"))) }) It("cleans up the depot directory", func() { _, err := client.Create(containerSpec) Expect(err).To(HaveOccurred()) Expect(ioutil.ReadDir(client.DepotDir)).To(BeEmpty()) }) It("cleans up the groot store", func() { // pre-warm cache to avoid test pollution // i.e. ensure base layers that are never removed are already in the groot store _, err := client.Create(containerSpec) Expect(err).To(HaveOccurred()) prev, err := ioutil.ReadDir(filepath.Join(client.TmpDir, "groot_store", "images")) Expect(err).NotTo(HaveOccurred()) _, err = client.Create(containerSpec) Expect(err).To(HaveOccurred()) Eventually(func() int { num, err := ioutil.ReadDir(filepath.Join(client.TmpDir, "groot_store", "images")) Expect(err).NotTo(HaveOccurred()) return len(num) }).Should(Equal(len(prev))) }) Context("because runc doesn't exist", func() { BeforeEach(func() { skipIfContainerd() config.RuntimePluginBin = "/tmp/does/not/exist" }) It("returns a sensible error", func() { _, err := client.Create(garden.ContainerSpec{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("no such file or directory")) }) }) }) Context("after creating a container without a specified handle", func() { var ( privileged bool initProcPid int ) BeforeEach(func() { privileged = false }) JustBeforeEach(func() { var err error container, err = client.Create(garden.ContainerSpec{ Privileged: privileged, }) Expect(err).NotTo(HaveOccurred()) initProcPid = initProcessPID(container.Handle()) }) It("should create a depot subdirectory based on the container handle", func() { Expect(container.Handle()).NotTo(BeEmpty()) Expect(filepath.Join(client.DepotDir, container.Handle())).To(BeADirectory()) Expect(filepath.Join(client.DepotDir, container.Handle(), "config.json")).To(BeARegularFile()) }) It("should lookup the right container", func() { lookupContainer, lookupError := client.Lookup(container.Handle()) Expect(lookupError).NotTo(HaveOccurred()) Expect(lookupContainer).To(Equal(container)) }) It("should not leak pipes", func() { process, err := container.Run(garden.ProcessSpec{Path: "echo", Args: []string{"hello"}}, garden.ProcessIO{}) Expect(err).NotTo(HaveOccurred()) Expect(process.Wait()).To(Equal(0)) Expect(client.Destroy(container.Handle())).To(Succeed()) container = nil // avoid double-destroying Eventually(func() int { return numPipes(client.Pid) }).Should(Equal(initialPipes)) }) It("should not leak sockets", func() { Expect(client.Destroy(container.Handle())).To(Succeed()) container = nil // avoid double-destroying Eventually(func() int { return numOpenSockets(client.Pid) }).Should(Equal(initialSockets)) }) It("should avoid leaving zombie processes", func() { Expect(client.Destroy(container.Handle())).To(Succeed()) container = nil // avoid double-destroying Eventually(func() *gexec.Session { sess, err := gexec.Start(exec.Command("ps"), GinkgoWriter, GinkgoWriter) Expect(err).NotTo(HaveOccurred()) Eventually(sess).Should(gexec.Exit(0)) return sess }, "10s").ShouldNot(gbytes.Say("defunct")) // this is a pretty broad test since we're looking at all processes, so give it quite a while to see no defuncts }) DescribeTable("placing the container in to all namespaces", func(ns string) { hostNSInode, err := os.Readlink(fmt.Sprintf("/proc/1/ns/%s", ns)) Expect(err).NotTo(HaveOccurred()) containerNSInode, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/%s", initProcPid, ns)) Expect(err).NotTo(HaveOccurred()) Expect(hostNSInode).NotTo(Equal(containerNSInode)) }, Entry("should place the container in to the NET namespace", "net"), Entry("should place the container in to the IPC namespace", "ipc"), Entry("should place the container in to the UTS namespace", "uts"), Entry("should place the container in to the PID namespace", "pid"), Entry("should place the container in to the MNT namespace", "mnt"), Entry("should place the container in to the USER namespace", "user"), ) Context("which is privileged", func() { BeforeEach(func() { privileged = true }) It("should not place the container in its own user namespace", func() { hostNS, err := gexec.Start(exec.Command("ls", "-l", "/proc/1/ns/user"), GinkgoWriter, GinkgoWriter) Expect(err).NotTo(HaveOccurred()) containerNS, err := gexec.Start(exec.Command("ls", "-l", fmt.Sprintf("/proc/%d/ns/user", initProcPid)), GinkgoWriter, GinkgoWriter) Expect(err).NotTo(HaveOccurred()) Eventually(containerNS).Should(gexec.Exit(0)) Eventually(hostNS).Should(gexec.Exit(0)) hostFD := strings.Split(string(hostNS.Out.Contents()), ">")[1] containerFD := strings.Split(string(containerNS.Out.Contents()), ">")[1] Expect(hostFD).To(Equal(containerFD)) }) }) }) Context("after creating a container with a specified root filesystem", func() { var ( tmpDir string rootFSPath string ) JustBeforeEach(func() { var err error rootFSPath = createRootfsTar(func(unpackedRootfs string) { Expect(ioutil.WriteFile(filepath.Join(unpackedRootfs, "my-file"), []byte("some-content"), 0644)).To(Succeed()) Expect(os.Mkdir(path.Join(unpackedRootfs, "somedir"), 0777)).To(Succeed()) }) container, err = client.Create(garden.ContainerSpec{ RootFSPath: rootFSPath, }) Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { Expect(os.RemoveAll(tmpDir)).To(Succeed()) }) It("provides the containers with the right rootfs", func() { Expect(container).To(HaveFile("/my-file")) By("Isolating the filesystem propertly for multiple containers") runInContainer(container, "touch", []string{"/somedir/created-file"}) Expect(container).To(HaveFile("/somedir/created-file")) container2, err := client.Create(garden.ContainerSpec{ RootFSPath: rootFSPath, }) Expect(err).NotTo(HaveOccurred()) Expect(container2).To(HaveFile("/my-file")) Expect(container2).NotTo(HaveFile("/somedir/created-file")) }) }) Context("after creating a container with a specified handle", func() { It("should lookup the right container for the handle", func() { container, err := client.Create(garden.ContainerSpec{ Handle: "container-banana", }) Expect(err).NotTo(HaveOccurred()) lookupContainer, lookupError := client.Lookup("container-banana") Expect(lookupError).NotTo(HaveOccurred()) Expect(lookupContainer).To(Equal(container)) }) It("allow the container to be created with the same name after destroying", func() { container, err := client.Create(garden.ContainerSpec{ Handle: "another-banana", }) Expect(err).NotTo(HaveOccurred()) Expect(client.Destroy(container.Handle())).To(Succeed()) container, err = client.Create(garden.ContainerSpec{ Handle: "another-banana", }) Expect(err).NotTo(HaveOccurred()) }) }) //TODO why duplicate? Context("when creating a container fails", func() { It("should not leak networking configuration", func() { _, err := client.Create(garden.ContainerSpec{ Network: fmt.Sprintf("172.250.%d.20/24", GinkgoParallelNode()), RootFSPath: "/banana/does/not/exist", }) Expect(err).To(HaveOccurred()) session, err := gexec.Start( exec.Command("ifconfig"), GinkgoWriter, GinkgoWriter, ) Expect(err).NotTo(HaveOccurred()) Consistently(session).ShouldNot(gbytes.Say(fmt.Sprintf("172-250-%d-0", GinkgoParallelNode()))) }) }) Context("when creating a container with NetOut rules", func() { var container garden.Container JustBeforeEach(func() { config.DenyNetworks = []string{"0.0.0.0/0"} rules := []garden.NetOutRule{ garden.NetOutRule{ Protocol: garden.ProtocolTCP, Networks: []garden.IPRange{garden.IPRangeFromIP(net.ParseIP("8.8.8.8"))}, Ports: []garden.PortRange{garden.PortRangeFromPort(53)}, }, garden.NetOutRule{ Protocol: garden.ProtocolTCP, Networks: []garden.IPRange{garden.IPRangeFromIP(net.ParseIP("8.8.4.4"))}, Ports: []garden.PortRange{garden.PortRangeFromPort(53)}, }, } var err error container, err = client.Create(garden.ContainerSpec{ NetOut: rules, }) Expect(err).NotTo(HaveOccurred()) }) It("provides connectivity to the addresses provided", func() { Expect(checkConnectionWithRetries(container, "8.8.8.8", 53, DEFAULT_RETRIES)).To(Succeed()) Expect(checkConnectionWithRetries(container, "8.8.4.4", 53, DEFAULT_RETRIES)).To(Succeed()) }) }) Context("when creating a container with NetIn rules", func() { var container garden.Container JustBeforeEach(func() { netIn := []garden.NetIn{ garden.NetIn{HostPort: 9888, ContainerPort: 9080}, } var err error container, err = client.Create(garden.ContainerSpec{ NetIn: netIn, }) Expect(err).NotTo(HaveOccurred()) }) It("maps the provided host port to the container port", func() { Expect(listenInContainer(container, 9080)).To(Succeed()) externalIP := externalIP(container) Eventually(func() *gexec.Session { return sendRequest(externalIP, 9888).Wait() }). Should(gbytes.Say(fmt.Sprintf("%d", 9080))) }) }) Context("when creating a container and specifying CPU configuration", func() { createContainerWithCpuConfig := func(weight, shares uint64) (garden.Container, error) { limits := garden.Limits{ CPU: garden.CPULimits{ Weight: weight, LimitInShares: shares, }, } container, err := client.Create(garden.ContainerSpec{ Limits: limits, }) return container, err } checkCPUSharesInContainer := func(container garden.Container, clientPid int, expected int) { cpuset := strings.TrimSpace(readFileString(fmt.Sprintf("/proc/%d/cpuset", clientPid))) cpuset = strings.TrimLeft(cpuset, "/") cpuSharesPath := fmt.Sprintf("%s/cpu/%s/garden-%s/%s/cpu.shares", client.CgroupsRootPath(), cpuset, config.Tag, container.Handle()) cpuShares := strings.TrimSpace(readFileString(cpuSharesPath)) Expect(cpuShares).To(Equal(strconv.Itoa(expected))) } It("can set the cpu weight", func() { container, err := createContainerWithCpuConfig(2, 0) Expect(err).NotTo(HaveOccurred()) checkCPUSharesInContainer(container, client.Pid, 2) }) It("should return an error when the cpu shares is invalid", func() { _, err := createContainerWithCpuConfig(1, 0) Expect(err.Error()).To(ContainSubstring("The minimum allowed cpu-shares is 2")) }) It("should use the default weight value when neither the cpu share or weight are set", func() { container, err := createContainerWithCpuConfig(0, 0) Expect(err).NotTo(HaveOccurred()) checkCPUSharesInContainer(container, client.Pid, 1024) }) Context("when LimitInShares is set", func() { It("creates a container with the shares", func() { container, err := createContainerWithCpuConfig(0, 123) Expect(err).NotTo(HaveOccurred()) checkCPUSharesInContainer(container, client.Pid, 123) }) }) Context("when both Weight and LimitInShares are set", func() { It("Weight has precedence", func() { container, err := createContainerWithCpuConfig(123, 456) Expect(err).NotTo(HaveOccurred()) checkCPUSharesInContainer(container, client.Pid, 123) }) }) }) Describe("block IO weight", func() { BeforeEach(func() { config.DefaultBlkioWeight = uint64ptr(400) }) checkBlockIOWeightInContainer := func(container garden.Container, expected string) { parentCgroupPath, err := cgrouper.GetCGroup("blkio") Expect(err).NotTo(HaveOccurred()) parentCgroupPath = strings.TrimLeft(parentCgroupPath, "/") blkIOWeightPath := fmt.Sprintf("%s/blkio/%s/garden-%s/%s/blkio.weight", client.CgroupsRootPath(), parentCgroupPath, config.Tag, container.Handle()) blkIOWeight := strings.TrimSpace(readFileString(blkIOWeightPath)) Expect(blkIOWeight).To(Equal(expected)) } It("uses the specified block IO weight", func() { container, err := client.Create(garden.ContainerSpec{}) Expect(err).NotTo(HaveOccurred()) checkBlockIOWeightInContainer(container, "400") }) Context("when specifying a block IO weight of 0", func() { BeforeEach(func() { config.DefaultBlkioWeight = uint64ptr(0) }) It("uses the system default value of 500", func() { container, err := client.Create(garden.ContainerSpec{}) Expect(err).NotTo(HaveOccurred()) checkBlockIOWeightInContainer(container, "500") }) }) Context("when specifying block IO weight outside the range 10 - 1000", func() { BeforeEach(func() { config.DefaultBlkioWeight = uint64ptr(9) }) It("returns an out of range error", func() { _, err := client.Create(garden.ContainerSpec{}) Expect(err.Error()).To(ContainSubstring("numerical result out of range")) }) }) }) Context("when running with an external network plugin", func() { var pluginOutput string BeforeEach(func() { tmpFile := path.Join(tempDir("", "netplugtest"), "iwasrun.log") config.NetworkPluginBin = binaries.NetworkPlugin config.NetworkPluginExtraArgs = []string{tmpFile, "/dev/null"} }) Context("when the plugin returns a properties key", func() { BeforeEach(func() { pluginOutput = `{"properties": {"key":"value", "garden.network.container-ip":"10.10.24.3"}}` config.NetworkPluginExtraArgs = append(config.NetworkPluginExtraArgs, pluginOutput) }) It("does not run kawasaki", func() { container, err := client.Create(garden.ContainerSpec{}) Expect(err).NotTo(HaveOccurred()) out := gbytes.NewBuffer() process, err := container.Run(garden.ProcessSpec{ Path: "ip", Args: []string{ "-o", "link", "show", }, }, garden.ProcessIO{ Stdout: io.MultiWriter(GinkgoWriter, out), }) Expect(err).NotTo(HaveOccurred()) exitCode, err := process.Wait() Expect(err).NotTo(HaveOccurred()) Expect(exitCode).To(BeZero()) // ip link appends a new line on the end so let's trim that first contents := strings.TrimRight(string(out.Contents()), "\n") // Check that we only have 1 interface, the loopback interface Expect(strings.Split(contents, "\n")).To(HaveLen(1)) Expect(contents).To(ContainSubstring("LOOPBACK")) }) }) Context("when the external network plugin returns invalid JSON", func() { BeforeEach(func() { pluginOutput = "invalid-json" config.NetworkPluginExtraArgs = append(config.NetworkPluginExtraArgs, pluginOutput) }) It("returns a useful error message", func() { _, err := client.Create(garden.ContainerSpec{}) Expect(err).To(MatchError(ContainSubstring("unmarshaling result from external networker: invalid character"))) }) }) }) It("does not make containers available to lookup until creation is completed", func() { handle := "handlecake" assertionsComplete := make(chan struct{}) go func(done chan<- struct{}) { defer GinkgoRecover() defer close(done) var lookupContainer garden.Container Eventually(func() error { var err error lookupContainer, err = client.Lookup(handle) return err }, time.Second*20, time.Millisecond*200).ShouldNot(HaveOccurred()) // Properties used to be set after containers were available from lookup Expect(lookupContainer.Properties()).To(HaveKeyWithValue("somename", "somevalue")) }(assertionsComplete) _, err := client.Create(garden.ContainerSpec{ Handle: handle, Properties: garden.Properties{"somename": "somevalue"}, }) Expect(err).NotTo(HaveOccurred()) <-assertionsComplete }) Context("create more containers than the maxkeyring limit", func() { BeforeEach(func() { Expect(ioutil.WriteFile("/proc/sys/kernel/keys/maxkeys", []byte("1"), 0644)).To(Succeed()) }) AfterEach(func() { Expect(ioutil.WriteFile("/proc/sys/kernel/keys/maxkeys", []byte("200"), 0644)).To(Succeed()) }) It("works", func() { c1, err := client.Create(garden.ContainerSpec{}) Expect(err).NotTo(HaveOccurred()) c2, err := client.Create(garden.ContainerSpec{}) Expect(err).NotTo(HaveOccurred()) Expect(client.Destroy(c1.Handle())).To(Succeed()) Expect(client.Destroy(c2.Handle())).To(Succeed()) }) }) Context("when creating more than --max-containers containers", func() { BeforeEach(func() { config.MaxContainers = uint64ptr(1) }) JustBeforeEach(func() { _, err := client.Create(garden.ContainerSpec{}) Expect(err).NotTo(HaveOccurred()) }) It("returns an error", func() { _, err := client.Create(garden.ContainerSpec{}) Expect(err).To(HaveOccurred()) Expect(err).To(MatchError(("max containers reached"))) }) }) Describe("creating privileged containers", func() { Context("when --disable-privileged-containers is not specified", func() { It("can create privileged containers", func() { _, err := client.Create(garden.ContainerSpec{Privileged: true}) Expect(err).NotTo(HaveOccurred()) }) }) Context("when --disable-privileged-containers is set", func() { BeforeEach(func() { config.DisablePrivilegedContainers = boolptr(true) }) It("cannot create privileged containers, even when gdn runs as root", func() { _, err := client.Create(garden.ContainerSpec{Privileged: true}) Expect(err).To(MatchError("privileged container creation is disabled")) }) }) }) }) func initProcessPID(handle string) int { Eventually(fmt.Sprintf("%s/%s/state.json", getRuncRoot(), handle)).Should(BeAnExistingFile()) state := struct { Pid int `json:"init_process_pid"` }{} Eventually(func() error { stateFile, err := os.Open(fmt.Sprintf("%s/%s/state.json", getRuncRoot(), handle)) Expect(err).NotTo(HaveOccurred()) defer stateFile.Close() // state.json is sometimes empty immediately after creation, so keep // trying until it's valid json return json.NewDecoder(stateFile).Decode(&state) }).Should(Succeed()) return state.Pid } func runInContainer(container garden.Container, path string, args []string) { proc, err := container.Run( garden.ProcessSpec{ Path: path, Args: args, }, ginkgoIO) Expect(err).NotTo(HaveOccurred()) exitCode, err := proc.Wait() Expect(err).NotTo(HaveOccurred()) Expect(exitCode).To(Equal(0)) } func numOpenSockets(pid int) (num int) { stdout := runCommand(exec.Command("sh", "-c", fmt.Sprintf("lsof -p %d | grep sock", pid))) return strings.Count(stdout, "\n") } func numPipes(pid int) (num int) { stdout := runCommand(exec.Command("sh", "-c", fmt.Sprintf("lsof -p %d | grep pipe", pid))) return strings.Count(stdout, "\n") } Tests now rely on systemd cgroups [#163941676] package gqt_test import ( "encoding/json" "fmt" "io" "io/ioutil" "net" "os" "os/exec" "path" "path/filepath" "strconv" "strings" "time" "code.cloudfoundry.org/garden" "code.cloudfoundry.org/guardian/gqt/cgrouper" "code.cloudfoundry.org/guardian/gqt/runner" . "code.cloudfoundry.org/guardian/matchers" . "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" "github.com/onsi/gomega/gbytes" "github.com/onsi/gomega/gexec" ) var _ = Describe("Creating a Container", func() { var ( client *runner.RunningGarden container garden.Container initialSockets int initialPipes int ) JustBeforeEach(func() { client = runner.Start(config) initialSockets = numOpenSockets(client.Pid) initialPipes = numPipes(client.Pid) }) AfterEach(func() { Expect(client.DestroyAndStop()).To(Succeed()) }) It("has the expected device list allowed", func() { var err error container, err = client.Create(garden.ContainerSpec{}) Expect(err).NotTo(HaveOccurred()) parentPath, err := cgrouper.GetCGroupPath(client.CgroupsRootPath(), "devices", strconv.Itoa(GinkgoParallelNode()), false) Expect(err).NotTo(HaveOccurred()) cgroupPath := filepath.Join(parentPath, container.Handle()) content := readFileString(filepath.Join(cgroupPath, "devices.list")) expectedAllowedDevices := []string{ "c 1:3 rwm", "c 5:0 rwm", "c 1:8 rwm", "c 1:9 rwm", "c 1:5 rwm", "c 1:7 rwm", "c 10:229 rwm", "c *:* m", "b *:* m", "c 5:1 rwm", "c 136:* rwm", "c 5:2 rwm", "c 10:200 rwm", } contentLines := strings.Split(strings.TrimSpace(content), "\n") Expect(contentLines).To(HaveLen(len(expectedAllowedDevices))) Expect(contentLines).To(ConsistOf(expectedAllowedDevices)) }) Context("when creating fails", func() { // cause Create to fail by specifying an invalid network CIDR address var containerSpec = garden.ContainerSpec{ Network: "not-a-valid-network", } It("returns a nice error rather than timing out", func() { _, err := client.Create(containerSpec) Expect(err).To(MatchError(ContainSubstring("invalid CIDR address"))) }) It("cleans up the depot directory", func() { _, err := client.Create(containerSpec) Expect(err).To(HaveOccurred()) Expect(ioutil.ReadDir(client.DepotDir)).To(BeEmpty()) }) It("cleans up the groot store", func() { // pre-warm cache to avoid test pollution // i.e. ensure base layers that are never removed are already in the groot store _, err := client.Create(containerSpec) Expect(err).To(HaveOccurred()) prev, err := ioutil.ReadDir(filepath.Join(client.TmpDir, "groot_store", "images")) Expect(err).NotTo(HaveOccurred()) _, err = client.Create(containerSpec) Expect(err).To(HaveOccurred()) Eventually(func() int { num, err := ioutil.ReadDir(filepath.Join(client.TmpDir, "groot_store", "images")) Expect(err).NotTo(HaveOccurred()) return len(num) }).Should(Equal(len(prev))) }) Context("because runc doesn't exist", func() { BeforeEach(func() { skipIfContainerd() config.RuntimePluginBin = "/tmp/does/not/exist" }) It("returns a sensible error", func() { _, err := client.Create(garden.ContainerSpec{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("no such file or directory")) }) }) }) Context("after creating a container without a specified handle", func() { var ( privileged bool initProcPid int ) BeforeEach(func() { privileged = false }) JustBeforeEach(func() { var err error container, err = client.Create(garden.ContainerSpec{ Privileged: privileged, }) Expect(err).NotTo(HaveOccurred()) initProcPid = initProcessPID(container.Handle()) }) It("should create a depot subdirectory based on the container handle", func() { Expect(container.Handle()).NotTo(BeEmpty()) Expect(filepath.Join(client.DepotDir, container.Handle())).To(BeADirectory()) Expect(filepath.Join(client.DepotDir, container.Handle(), "config.json")).To(BeARegularFile()) }) It("should lookup the right container", func() { lookupContainer, lookupError := client.Lookup(container.Handle()) Expect(lookupError).NotTo(HaveOccurred()) Expect(lookupContainer).To(Equal(container)) }) It("should not leak pipes", func() { process, err := container.Run(garden.ProcessSpec{Path: "echo", Args: []string{"hello"}}, garden.ProcessIO{}) Expect(err).NotTo(HaveOccurred()) Expect(process.Wait()).To(Equal(0)) Expect(client.Destroy(container.Handle())).To(Succeed()) container = nil // avoid double-destroying Eventually(func() int { return numPipes(client.Pid) }).Should(Equal(initialPipes)) }) It("should not leak sockets", func() { Expect(client.Destroy(container.Handle())).To(Succeed()) container = nil // avoid double-destroying Eventually(func() int { return numOpenSockets(client.Pid) }).Should(Equal(initialSockets)) }) It("should avoid leaving zombie processes", func() { Expect(client.Destroy(container.Handle())).To(Succeed()) container = nil // avoid double-destroying Eventually(func() *gexec.Session { sess, err := gexec.Start(exec.Command("ps"), GinkgoWriter, GinkgoWriter) Expect(err).NotTo(HaveOccurred()) Eventually(sess).Should(gexec.Exit(0)) return sess }, "10s").ShouldNot(gbytes.Say("defunct")) // this is a pretty broad test since we're looking at all processes, so give it quite a while to see no defuncts }) DescribeTable("placing the container in to all namespaces", func(ns string) { hostNSInode, err := os.Readlink(fmt.Sprintf("/proc/1/ns/%s", ns)) Expect(err).NotTo(HaveOccurred()) containerNSInode, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/%s", initProcPid, ns)) Expect(err).NotTo(HaveOccurred()) Expect(hostNSInode).NotTo(Equal(containerNSInode)) }, Entry("should place the container in to the NET namespace", "net"), Entry("should place the container in to the IPC namespace", "ipc"), Entry("should place the container in to the UTS namespace", "uts"), Entry("should place the container in to the PID namespace", "pid"), Entry("should place the container in to the MNT namespace", "mnt"), Entry("should place the container in to the USER namespace", "user"), ) Context("which is privileged", func() { BeforeEach(func() { privileged = true }) It("should not place the container in its own user namespace", func() { hostNS, err := gexec.Start(exec.Command("ls", "-l", "/proc/1/ns/user"), GinkgoWriter, GinkgoWriter) Expect(err).NotTo(HaveOccurred()) containerNS, err := gexec.Start(exec.Command("ls", "-l", fmt.Sprintf("/proc/%d/ns/user", initProcPid)), GinkgoWriter, GinkgoWriter) Expect(err).NotTo(HaveOccurred()) Eventually(containerNS).Should(gexec.Exit(0)) Eventually(hostNS).Should(gexec.Exit(0)) hostFD := strings.Split(string(hostNS.Out.Contents()), ">")[1] containerFD := strings.Split(string(containerNS.Out.Contents()), ">")[1] Expect(hostFD).To(Equal(containerFD)) }) }) }) Context("after creating a container with a specified root filesystem", func() { var ( tmpDir string rootFSPath string ) JustBeforeEach(func() { var err error rootFSPath = createRootfsTar(func(unpackedRootfs string) { Expect(ioutil.WriteFile(filepath.Join(unpackedRootfs, "my-file"), []byte("some-content"), 0644)).To(Succeed()) Expect(os.Mkdir(path.Join(unpackedRootfs, "somedir"), 0777)).To(Succeed()) }) container, err = client.Create(garden.ContainerSpec{ RootFSPath: rootFSPath, }) Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { Expect(os.RemoveAll(tmpDir)).To(Succeed()) }) It("provides the containers with the right rootfs", func() { Expect(container).To(HaveFile("/my-file")) By("Isolating the filesystem propertly for multiple containers") runInContainer(container, "touch", []string{"/somedir/created-file"}) Expect(container).To(HaveFile("/somedir/created-file")) container2, err := client.Create(garden.ContainerSpec{ RootFSPath: rootFSPath, }) Expect(err).NotTo(HaveOccurred()) Expect(container2).To(HaveFile("/my-file")) Expect(container2).NotTo(HaveFile("/somedir/created-file")) }) }) Context("after creating a container with a specified handle", func() { It("should lookup the right container for the handle", func() { container, err := client.Create(garden.ContainerSpec{ Handle: "container-banana", }) Expect(err).NotTo(HaveOccurred()) lookupContainer, lookupError := client.Lookup("container-banana") Expect(lookupError).NotTo(HaveOccurred()) Expect(lookupContainer).To(Equal(container)) }) It("allow the container to be created with the same name after destroying", func() { container, err := client.Create(garden.ContainerSpec{ Handle: "another-banana", }) Expect(err).NotTo(HaveOccurred()) Expect(client.Destroy(container.Handle())).To(Succeed()) container, err = client.Create(garden.ContainerSpec{ Handle: "another-banana", }) Expect(err).NotTo(HaveOccurred()) }) }) //TODO why duplicate? Context("when creating a container fails", func() { It("should not leak networking configuration", func() { _, err := client.Create(garden.ContainerSpec{ Network: fmt.Sprintf("172.250.%d.20/24", GinkgoParallelNode()), RootFSPath: "/banana/does/not/exist", }) Expect(err).To(HaveOccurred()) session, err := gexec.Start( exec.Command("ifconfig"), GinkgoWriter, GinkgoWriter, ) Expect(err).NotTo(HaveOccurred()) Consistently(session).ShouldNot(gbytes.Say(fmt.Sprintf("172-250-%d-0", GinkgoParallelNode()))) }) }) Context("when creating a container with NetOut rules", func() { var container garden.Container JustBeforeEach(func() { config.DenyNetworks = []string{"0.0.0.0/0"} rules := []garden.NetOutRule{ garden.NetOutRule{ Protocol: garden.ProtocolTCP, Networks: []garden.IPRange{garden.IPRangeFromIP(net.ParseIP("8.8.8.8"))}, Ports: []garden.PortRange{garden.PortRangeFromPort(53)}, }, garden.NetOutRule{ Protocol: garden.ProtocolTCP, Networks: []garden.IPRange{garden.IPRangeFromIP(net.ParseIP("8.8.4.4"))}, Ports: []garden.PortRange{garden.PortRangeFromPort(53)}, }, } var err error container, err = client.Create(garden.ContainerSpec{ NetOut: rules, }) Expect(err).NotTo(HaveOccurred()) }) It("provides connectivity to the addresses provided", func() { Expect(checkConnectionWithRetries(container, "8.8.8.8", 53, DEFAULT_RETRIES)).To(Succeed()) Expect(checkConnectionWithRetries(container, "8.8.4.4", 53, DEFAULT_RETRIES)).To(Succeed()) }) }) Context("when creating a container with NetIn rules", func() { var container garden.Container JustBeforeEach(func() { netIn := []garden.NetIn{ garden.NetIn{HostPort: 9888, ContainerPort: 9080}, } var err error container, err = client.Create(garden.ContainerSpec{ NetIn: netIn, }) Expect(err).NotTo(HaveOccurred()) }) It("maps the provided host port to the container port", func() { Expect(listenInContainer(container, 9080)).To(Succeed()) externalIP := externalIP(container) Eventually(func() *gexec.Session { return sendRequest(externalIP, 9888).Wait() }). Should(gbytes.Say(fmt.Sprintf("%d", 9080))) }) }) Context("when creating a container and specifying CPU configuration", func() { createContainerWithCpuConfig := func(weight, shares uint64) (garden.Container, error) { limits := garden.Limits{ CPU: garden.CPULimits{ Weight: weight, LimitInShares: shares, }, } container, err := client.Create(garden.ContainerSpec{ Limits: limits, }) return container, err } checkCPUSharesInContainer := func(container garden.Container, clientPid int, expected int) { cpuset := strings.TrimSpace(readFileString(fmt.Sprintf("/proc/%d/cpuset", clientPid))) cpuset = strings.TrimLeft(cpuset, "/") cpuSharesPath := fmt.Sprintf("%s/cpu/system.slice/garden.service/%s/garden-%s/%s/cpu.shares", client.CgroupsRootPath(), cpuset, config.Tag, container.Handle()) cpuShares := strings.TrimSpace(readFileString(cpuSharesPath)) Expect(cpuShares).To(Equal(strconv.Itoa(expected))) } It("can set the cpu weight", func() { container, err := createContainerWithCpuConfig(2, 0) Expect(err).NotTo(HaveOccurred()) checkCPUSharesInContainer(container, client.Pid, 2) }) It("should return an error when the cpu shares is invalid", func() { _, err := createContainerWithCpuConfig(1, 0) Expect(err.Error()).To(ContainSubstring("The minimum allowed cpu-shares is 2")) }) It("should use the default weight value when neither the cpu share or weight are set", func() { container, err := createContainerWithCpuConfig(0, 0) Expect(err).NotTo(HaveOccurred()) checkCPUSharesInContainer(container, client.Pid, 1024) }) Context("when LimitInShares is set", func() { It("creates a container with the shares", func() { container, err := createContainerWithCpuConfig(0, 123) Expect(err).NotTo(HaveOccurred()) checkCPUSharesInContainer(container, client.Pid, 123) }) }) Context("when both Weight and LimitInShares are set", func() { It("Weight has precedence", func() { container, err := createContainerWithCpuConfig(123, 456) Expect(err).NotTo(HaveOccurred()) checkCPUSharesInContainer(container, client.Pid, 123) }) }) }) Describe("block IO weight", func() { BeforeEach(func() { config.DefaultBlkioWeight = uint64ptr(400) }) checkBlockIOWeightInContainer := func(container garden.Container, expected string) { parentCgroupPath, err := cgrouper.GetCGroup("blkio") Expect(err).NotTo(HaveOccurred()) parentCgroupPath = strings.TrimLeft(parentCgroupPath, "/") blkIOWeightPath := fmt.Sprintf("%s/blkio/%s/garden-%s/%s/blkio.weight", client.CgroupsRootPath(), parentCgroupPath, config.Tag, container.Handle()) blkIOWeight := strings.TrimSpace(readFileString(blkIOWeightPath)) Expect(blkIOWeight).To(Equal(expected)) } It("uses the specified block IO weight", func() { container, err := client.Create(garden.ContainerSpec{}) Expect(err).NotTo(HaveOccurred()) checkBlockIOWeightInContainer(container, "400") }) Context("when specifying a block IO weight of 0", func() { BeforeEach(func() { config.DefaultBlkioWeight = uint64ptr(0) }) It("uses the system default value of 500", func() { container, err := client.Create(garden.ContainerSpec{}) Expect(err).NotTo(HaveOccurred()) checkBlockIOWeightInContainer(container, "500") }) }) Context("when specifying block IO weight outside the range 10 - 1000", func() { BeforeEach(func() { config.DefaultBlkioWeight = uint64ptr(9) }) It("returns an out of range error", func() { _, err := client.Create(garden.ContainerSpec{}) Expect(err.Error()).To(ContainSubstring("numerical result out of range")) }) }) }) Context("when running with an external network plugin", func() { var pluginOutput string BeforeEach(func() { tmpFile := path.Join(tempDir("", "netplugtest"), "iwasrun.log") config.NetworkPluginBin = binaries.NetworkPlugin config.NetworkPluginExtraArgs = []string{tmpFile, "/dev/null"} }) Context("when the plugin returns a properties key", func() { BeforeEach(func() { pluginOutput = `{"properties": {"key":"value", "garden.network.container-ip":"10.10.24.3"}}` config.NetworkPluginExtraArgs = append(config.NetworkPluginExtraArgs, pluginOutput) }) It("does not run kawasaki", func() { container, err := client.Create(garden.ContainerSpec{}) Expect(err).NotTo(HaveOccurred()) out := gbytes.NewBuffer() process, err := container.Run(garden.ProcessSpec{ Path: "ip", Args: []string{ "-o", "link", "show", }, }, garden.ProcessIO{ Stdout: io.MultiWriter(GinkgoWriter, out), }) Expect(err).NotTo(HaveOccurred()) exitCode, err := process.Wait() Expect(err).NotTo(HaveOccurred()) Expect(exitCode).To(BeZero()) // ip link appends a new line on the end so let's trim that first contents := strings.TrimRight(string(out.Contents()), "\n") // Check that we only have 1 interface, the loopback interface Expect(strings.Split(contents, "\n")).To(HaveLen(1)) Expect(contents).To(ContainSubstring("LOOPBACK")) }) }) Context("when the external network plugin returns invalid JSON", func() { BeforeEach(func() { pluginOutput = "invalid-json" config.NetworkPluginExtraArgs = append(config.NetworkPluginExtraArgs, pluginOutput) }) It("returns a useful error message", func() { _, err := client.Create(garden.ContainerSpec{}) Expect(err).To(MatchError(ContainSubstring("unmarshaling result from external networker: invalid character"))) }) }) }) It("does not make containers available to lookup until creation is completed", func() { handle := "handlecake" assertionsComplete := make(chan struct{}) go func(done chan<- struct{}) { defer GinkgoRecover() defer close(done) var lookupContainer garden.Container Eventually(func() error { var err error lookupContainer, err = client.Lookup(handle) return err }, time.Second*20, time.Millisecond*200).ShouldNot(HaveOccurred()) // Properties used to be set after containers were available from lookup Expect(lookupContainer.Properties()).To(HaveKeyWithValue("somename", "somevalue")) }(assertionsComplete) _, err := client.Create(garden.ContainerSpec{ Handle: handle, Properties: garden.Properties{"somename": "somevalue"}, }) Expect(err).NotTo(HaveOccurred()) <-assertionsComplete }) Context("create more containers than the maxkeyring limit", func() { BeforeEach(func() { Expect(ioutil.WriteFile("/proc/sys/kernel/keys/maxkeys", []byte("1"), 0644)).To(Succeed()) }) AfterEach(func() { Expect(ioutil.WriteFile("/proc/sys/kernel/keys/maxkeys", []byte("200"), 0644)).To(Succeed()) }) It("works", func() { c1, err := client.Create(garden.ContainerSpec{}) Expect(err).NotTo(HaveOccurred()) c2, err := client.Create(garden.ContainerSpec{}) Expect(err).NotTo(HaveOccurred()) Expect(client.Destroy(c1.Handle())).To(Succeed()) Expect(client.Destroy(c2.Handle())).To(Succeed()) }) }) Context("when creating more than --max-containers containers", func() { BeforeEach(func() { config.MaxContainers = uint64ptr(1) }) JustBeforeEach(func() { _, err := client.Create(garden.ContainerSpec{}) Expect(err).NotTo(HaveOccurred()) }) It("returns an error", func() { _, err := client.Create(garden.ContainerSpec{}) Expect(err).To(HaveOccurred()) Expect(err).To(MatchError(("max containers reached"))) }) }) Describe("creating privileged containers", func() { Context("when --disable-privileged-containers is not specified", func() { It("can create privileged containers", func() { _, err := client.Create(garden.ContainerSpec{Privileged: true}) Expect(err).NotTo(HaveOccurred()) }) }) Context("when --disable-privileged-containers is set", func() { BeforeEach(func() { config.DisablePrivilegedContainers = boolptr(true) }) It("cannot create privileged containers, even when gdn runs as root", func() { _, err := client.Create(garden.ContainerSpec{Privileged: true}) Expect(err).To(MatchError("privileged container creation is disabled")) }) }) }) }) func initProcessPID(handle string) int { Eventually(fmt.Sprintf("%s/%s/state.json", getRuncRoot(), handle)).Should(BeAnExistingFile()) state := struct { Pid int `json:"init_process_pid"` }{} Eventually(func() error { stateFile, err := os.Open(fmt.Sprintf("%s/%s/state.json", getRuncRoot(), handle)) Expect(err).NotTo(HaveOccurred()) defer stateFile.Close() // state.json is sometimes empty immediately after creation, so keep // trying until it's valid json return json.NewDecoder(stateFile).Decode(&state) }).Should(Succeed()) return state.Pid } func runInContainer(container garden.Container, path string, args []string) { proc, err := container.Run( garden.ProcessSpec{ Path: path, Args: args, }, ginkgoIO) Expect(err).NotTo(HaveOccurred()) exitCode, err := proc.Wait() Expect(err).NotTo(HaveOccurred()) Expect(exitCode).To(Equal(0)) } func numOpenSockets(pid int) (num int) { stdout := runCommand(exec.Command("sh", "-c", fmt.Sprintf("lsof -p %d | grep sock", pid))) return strings.Count(stdout, "\n") } func numPipes(pid int) (num int) { stdout := runCommand(exec.Command("sh", "-c", fmt.Sprintf("lsof -p %d | grep pipe", pid))) return strings.Count(stdout, "\n") }
package graceful import ( "os" "os/signal" "sync" "sync/atomic" "time" "github.com/zenazn/goji/graceful/listener" ) var mu sync.Mutex // protects everything that follows var listeners = make([]*listener.T, 0) var prehooks = make([]func(), 0) var posthooks = make([]func(), 0) var closing int32 var doubleKick, timeout time.Duration var wait = make(chan struct{}) var stdSignals = []os.Signal{os.Interrupt} var sigchan = make(chan os.Signal, 1) // HandleSignals installs signal handlers for a set of standard signals. By // default, this set only includes keyboard interrupts, however when the package // detects that it is running under Einhorn, a SIGUSR2 handler is installed as // well. func HandleSignals() { AddSignal(stdSignals...) } // AddSignal adds the given signal to the set of signals that trigger a graceful // shutdown. func AddSignal(sig ...os.Signal) { signal.Notify(sigchan, sig...) } // ResetSignals resets the list of signals that trigger a graceful shutdown. func ResetSignals() { signal.Stop(sigchan) } // PreHook registers a function to be called before any of this package's normal // shutdown actions. All listeners will be called in the order they were added, // from a single goroutine. func PreHook(f func()) { mu.Lock() defer mu.Unlock() prehooks = append(prehooks, f) } // PostHook registers a function to be called after all of this package's normal // shutdown actions. All listeners will be called in the order they were added, // from a single goroutine, and are guaranteed to be called after all listening // connections have been closed, but before Wait() returns. // // If you've Hijacked any connections that must be gracefully shut down in some // other way (since this library disowns all hijacked connections), it's // reasonable to use a PostHook to signal and wait for them. func PostHook(f func()) { mu.Lock() defer mu.Unlock() posthooks = append(posthooks, f) } // Shutdown manually triggers a shutdown from your application. Like Wait, // blocks until all connections have gracefully shut down. func Shutdown() { shutdown(false) } // ShutdownNow triggers an immediate shutdown from your application. All // connections (not just those that are idle) are immediately closed, even if // they are in the middle of serving a request. func ShutdownNow() { shutdown(true) } // DoubleKickWindow sets the length of the window during which two back-to-back // signals are treated as an especially urgent or forceful request to exit // (i.e., ShutdownNow instead of Shutdown). Signals delivered more than this // duration apart are treated as separate requests to exit gracefully as usual. // // Setting DoubleKickWindow to 0 disables the feature. func DoubleKickWindow(d time.Duration) { if d < 0 { return } mu.Lock() defer mu.Unlock() doubleKick = d } // Timeout sets the maximum amount of time package graceful will wait for // connections to gracefully shut down after receiving a signal. After this // timeout, connections will be forcefully shut down (similar to calling // ShutdownNow). // // Setting Timeout to 0 disables the feature. func Timeout(d time.Duration) { if d < 0 { return } mu.Lock() defer mu.Unlock() timeout = d } // Wait for all connections to gracefully shut down. This is commonly called at // the bottom of the main() function to prevent the program from exiting // prematurely. func Wait() { <-wait } func init() { go sigLoop() } func sigLoop() { var last time.Time for { <-sigchan now := time.Now() mu.Lock() force := doubleKick != 0 && now.Sub(last) < doubleKick if t := timeout; t != 0 && !force { go func() { time.Sleep(t) shutdown(true) }() } mu.Unlock() go shutdown(force) last = now } } var preOnce, closeOnce, forceOnce, postOnce, notifyOnce sync.Once func shutdown(force bool) { preOnce.Do(func() { mu.Lock() defer mu.Unlock() for _, f := range prehooks { f() } }) if force { forceOnce.Do(func() { closeListeners(force) }) } else { closeOnce.Do(func() { closeListeners(force) }) } postOnce.Do(func() { mu.Lock() defer mu.Unlock() for _, f := range posthooks { f() } }) notifyOnce.Do(func() { close(wait) }) } func closeListeners(force bool) { atomic.StoreInt32(&closing, 1) var wg sync.WaitGroup defer wg.Wait() mu.Lock() defer mu.Unlock() wg.Add(len(listeners)) for _, l := range listeners { go func(l *listener.T) { defer wg.Done() l.Close() if force { l.DrainAll() } else { l.Drain() } }(l) } } graceful: add {Pre,Post}HookWithSignal functions package graceful import ( "os" "os/signal" "sync" "sync/atomic" "time" "github.com/zenazn/goji/graceful/listener" ) var mu sync.Mutex // protects everything that follows var listeners = make([]*listener.T, 0) var prehooks = make([]func(os.Signal), 0) var posthooks = make([]func(os.Signal), 0) var closing int32 var doubleKick, timeout time.Duration var wait = make(chan struct{}) var stdSignals = []os.Signal{os.Interrupt} var sigchan = make(chan os.Signal, 1) // HandleSignals installs signal handlers for a set of standard signals. By // default, this set only includes keyboard interrupts, however when the package // detects that it is running under Einhorn, a SIGUSR2 handler is installed as // well. func HandleSignals() { AddSignal(stdSignals...) } // AddSignal adds the given signal to the set of signals that trigger a graceful // shutdown. func AddSignal(sig ...os.Signal) { signal.Notify(sigchan, sig...) } // ResetSignals resets the list of signals that trigger a graceful shutdown. func ResetSignals() { signal.Stop(sigchan) } // PreHookWithSignal registers a function to be called before any of this // package's normal shutdown actions, which recieves the signal that caused the // shutdown (or nil for manual shutdowns). All listeners will be called in the // order they were added, from a single goroutine. func PreHookWithSignal(f func(os.Signal)) { mu.Lock() defer mu.Unlock() prehooks = append(prehooks, f) } // PreHook registers a function to be called before any of this package's normal // shutdown actions. All listeners will be called in the order they were added, // from a single goroutine. func PreHook(f func()) { PreHookWithSignal(func(_ os.Signal) { f() }) } // PostHookWithSignal registers a function to be called after all of this // package's normal shutdown actions, which receives the signal that caused the // shutdown (or nil for manual shutdowns). All listeners will be called in the // order they were added, from a single goroutine, and are guaranteed to be // called after all listening connections have been closed, but before Wait() // returns. // // If you've Hijacked any connections that must be gracefully shut down in some // other way (since this library disowns all hijacked connections), it's // reasonable to use a PostHook to signal and wait for them. func PostHookWithSignal(f func(os.Signal)) { mu.Lock() defer mu.Unlock() posthooks = append(posthooks, f) } // PostHook registers a function to be called after all of this package's normal // shutdown actions. All listeners will be called in the order they were added, // from a single goroutine, and are guaranteed to be called after all listening // connections have been closed, but before Wait() returns. // // If you've Hijacked any connections that must be gracefully shut down in some // other way (since this library disowns all hijacked connections), it's // reasonable to use a PostHook to signal and wait for them. func PostHook(f func()) { PostHookWithSignal(func(_ os.Signal) { f() }) } // Shutdown manually triggers a shutdown from your application. Like Wait, // blocks until all connections have gracefully shut down. func Shutdown() { shutdown(false, nil) } // ShutdownNow triggers an immediate shutdown from your application. All // connections (not just those that are idle) are immediately closed, even if // they are in the middle of serving a request. func ShutdownNow() { shutdown(true, nil) } // DoubleKickWindow sets the length of the window during which two back-to-back // signals are treated as an especially urgent or forceful request to exit // (i.e., ShutdownNow instead of Shutdown). Signals delivered more than this // duration apart are treated as separate requests to exit gracefully as usual. // // Setting DoubleKickWindow to 0 disables the feature. func DoubleKickWindow(d time.Duration) { if d < 0 { return } mu.Lock() defer mu.Unlock() doubleKick = d } // Timeout sets the maximum amount of time package graceful will wait for // connections to gracefully shut down after receiving a signal. After this // timeout, connections will be forcefully shut down (similar to calling // ShutdownNow). // // Setting Timeout to 0 disables the feature. func Timeout(d time.Duration) { if d < 0 { return } mu.Lock() defer mu.Unlock() timeout = d } // Wait for all connections to gracefully shut down. This is commonly called at // the bottom of the main() function to prevent the program from exiting // prematurely. func Wait() { <-wait } func init() { go sigLoop() } func sigLoop() { var last time.Time for { sig := <-sigchan now := time.Now() mu.Lock() force := doubleKick != 0 && now.Sub(last) < doubleKick if t := timeout; t != 0 && !force { go func() { time.Sleep(t) shutdown(true, sig) }() } mu.Unlock() go shutdown(force, sig) last = now } } var preOnce, closeOnce, forceOnce, postOnce, notifyOnce sync.Once func shutdown(force bool, sig os.Signal) { preOnce.Do(func() { mu.Lock() defer mu.Unlock() for _, f := range prehooks { f(sig) } }) if force { forceOnce.Do(func() { closeListeners(force) }) } else { closeOnce.Do(func() { closeListeners(force) }) } postOnce.Do(func() { mu.Lock() defer mu.Unlock() for _, f := range posthooks { f(sig) } }) notifyOnce.Do(func() { close(wait) }) } func closeListeners(force bool) { atomic.StoreInt32(&closing, 1) var wg sync.WaitGroup defer wg.Wait() mu.Lock() defer mu.Unlock() wg.Add(len(listeners)) for _, l := range listeners { go func(l *listener.T) { defer wg.Done() l.Close() if force { l.DrainAll() } else { l.Drain() } }(l) } }
package libkbfs import ( "errors" "testing" "github.com/golang/mock/gomock" "github.com/keybase/client/go/libkb" keybase1 "github.com/keybase/client/go/protocol" "golang.org/x/net/context" ) func keyManagerInit(t *testing.T) (mockCtrl *gomock.Controller, config *ConfigMock, ctx context.Context) { ctr := NewSafeTestReporter(t) mockCtrl = gomock.NewController(ctr) config = NewConfigMock(mockCtrl, ctr) keyman := NewKeyManagerStandard(config) config.SetKeyManager(keyman) ctx = context.Background() return } func keyManagerShutdown(mockCtrl *gomock.Controller, config *ConfigMock) { config.ctr.CheckForFailures() mockCtrl.Finish() } func expectCachedGetTLFCryptKey(config *ConfigMock, rmd *RootMetadata, keyGen KeyGen) { config.mockKcache.EXPECT().GetTLFCryptKey(rmd.ID, keyGen).Return(TLFCryptKey{}, nil) } func expectUncachedGetTLFCryptKey(config *ConfigMock, rmd *RootMetadata, keyGen KeyGen, uid keybase1.UID, subkey CryptPublicKey, encrypt bool) { config.mockKcache.EXPECT().GetTLFCryptKey(rmd.ID, keyGen). Return(TLFCryptKey{}, errors.New("NONE")) // get the xor'd key out of the metadata config.mockKbpki.EXPECT().GetCurrentCryptPublicKey(gomock.Any()). Return(subkey, nil) config.mockCrypto.EXPECT().DecryptTLFCryptKeyClientHalf(gomock.Any(), TLFEphemeralPublicKey{}, gomock.Any()). Return(TLFCryptKeyClientHalf{}, nil) // get the server-side half and retrieve the real secret key config.mockKops.EXPECT().GetTLFCryptKeyServerHalf(gomock.Any(), gomock.Any(), gomock.Any()).Return(TLFCryptKeyServerHalf{}, nil) config.mockCrypto.EXPECT().UnmaskTLFCryptKey(TLFCryptKeyServerHalf{}, TLFCryptKeyClientHalf{}).Return(TLFCryptKey{}, nil) // now put the key into the cache if !encrypt { config.mockKcache.EXPECT().PutTLFCryptKey(rmd.ID, keyGen, TLFCryptKey{}). Return(nil) } } func expectUncachedGetTLFCryptKeyAnyDevice(config *ConfigMock, rmd *RootMetadata, keyGen KeyGen, uid keybase1.UID, subkey CryptPublicKey, encrypt bool) { config.mockKcache.EXPECT().GetTLFCryptKey(rmd.ID, keyGen). Return(TLFCryptKey{}, errors.New("NONE")) // get the xor'd key out of the metadata config.mockKbpki.EXPECT().GetCryptPublicKeys(gomock.Any(), uid). Return([]CryptPublicKey{subkey}, nil) config.mockCrypto.EXPECT().DecryptTLFCryptKeyClientHalfAny(gomock.Any(), gomock.Any()).Return(TLFCryptKeyClientHalf{}, 0, nil) // get the server-side half and retrieve the real secret key config.mockKops.EXPECT().GetTLFCryptKeyServerHalf(gomock.Any(), gomock.Any(), gomock.Any()).Return(TLFCryptKeyServerHalf{}, nil) config.mockCrypto.EXPECT().UnmaskTLFCryptKey(TLFCryptKeyServerHalf{}, TLFCryptKeyClientHalf{}).Return(TLFCryptKey{}, nil) // now put the key into the cache if !encrypt { config.mockKcache.EXPECT().PutTLFCryptKey(rmd.ID, keyGen, TLFCryptKey{}). Return(nil) } } func expectRekey(config *ConfigMock, rmd *RootMetadata) { // generate new keys config.mockCrypto.EXPECT().MakeRandomTLFKeys().Return(TLFPublicKey{}, TLFPrivateKey{}, TLFEphemeralPublicKey{}, TLFEphemeralPrivateKey{}, TLFCryptKey{}, nil) config.mockCrypto.EXPECT().MakeRandomTLFCryptKeyServerHalf().Return(TLFCryptKeyServerHalf{}, nil) subkey := MakeFakeCryptPublicKeyOrBust("crypt public key") config.mockKbpki.EXPECT().GetCryptPublicKeys(gomock.Any(), gomock.Any()). Return([]CryptPublicKey{subkey}, nil) // make keys for the one device config.mockCrypto.EXPECT().MaskTLFCryptKey(TLFCryptKeyServerHalf{}, TLFCryptKey{}).Return(TLFCryptKeyClientHalf{}, nil) config.mockCrypto.EXPECT().EncryptTLFCryptKeyClientHalf(TLFEphemeralPrivateKey{}, subkey, TLFCryptKeyClientHalf{}).Return(EncryptedTLFCryptKeyClientHalf{}, nil) config.mockKops.EXPECT().PutTLFCryptKeyServerHalves(gomock.Any(), gomock.Any()).Return(nil) config.mockCrypto.EXPECT().GetTLFCryptKeyServerHalfID(gomock.Any(), gomock.Any(), gomock.Any()).Return(TLFCryptKeyServerHalfID{}, nil) // Ignore Notify calls for now config.mockRep.EXPECT().Notify(gomock.Any(), gomock.Any()).AnyTimes() } func TestKeyManagerPublicTLFCryptKey(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) id, h, _ := newDir(t, config, 1, false, true) rmd := NewRootMetadataForTest(h, id) tlfCryptKey, err := config.KeyManager(). GetTLFCryptKeyForEncryption(ctx, rmd) if err != nil { t.Error(err) } if tlfCryptKey != PublicTLFCryptKey { t.Errorf("got %v, expected %v", tlfCryptKey, PublicTLFCryptKey) } tlfCryptKey, err = config.KeyManager(). GetTLFCryptKeyForMDDecryption(ctx, rmd) if err != nil { t.Error(err) } if tlfCryptKey != PublicTLFCryptKey { t.Errorf("got %v, expected %v", tlfCryptKey, PublicTLFCryptKey) } tlfCryptKey, err = config.KeyManager(). GetTLFCryptKeyForBlockDecryption(ctx, rmd, BlockPointer{}) if err != nil { t.Error(err) } if tlfCryptKey != PublicTLFCryptKey { t.Errorf("got %v, expected %v", tlfCryptKey, PublicTLFCryptKey) } } func TestKeyManagerCachedSecretKeyForEncryptionSuccess(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) _, id, h := makeID(t, config, false) rmd := NewRootMetadataForTest(h, id) AddNewKeysOrBust(t, rmd, *NewTLFKeyBundle()) expectCachedGetTLFCryptKey(config, rmd, rmd.LatestKeyGeneration()) if _, err := config.KeyManager(). GetTLFCryptKeyForEncryption(ctx, rmd); err != nil { t.Errorf("Got error on GetTLFCryptKeyForEncryption: %v", err) } } func TestKeyManagerCachedSecretKeyForMDDecryptionSuccess(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) _, id, h := makeID(t, config, false) rmd := NewRootMetadataForTest(h, id) AddNewKeysOrBust(t, rmd, *NewTLFKeyBundle()) expectCachedGetTLFCryptKey(config, rmd, rmd.LatestKeyGeneration()) if _, err := config.KeyManager(). GetTLFCryptKeyForMDDecryption(ctx, rmd); err != nil { t.Errorf("Got error on GetTLFCryptKeyForMDDecryption: %v", err) } } func TestKeyManagerCachedSecretKeyForBlockDecryptionSuccess(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) _, id, h := makeID(t, config, false) rmd := NewRootMetadataForTest(h, id) AddNewKeysOrBust(t, rmd, *NewTLFKeyBundle()) AddNewKeysOrBust(t, rmd, *NewTLFKeyBundle()) keyGen := rmd.LatestKeyGeneration() - 1 expectCachedGetTLFCryptKey(config, rmd, keyGen) if _, err := config.KeyManager().GetTLFCryptKeyForBlockDecryption( ctx, rmd, BlockPointer{KeyGen: keyGen}); err != nil { t.Errorf("Got error on GetTLFCryptKeyForBlockDecryption: %v", err) } } func TestKeyManagerUncachedSecretKeyForEncryptionSuccess(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) uid, id, h := makeID(t, config, false) rmd := NewRootMetadataForTest(h, id) subkey := MakeFakeCryptPublicKeyOrBust("crypt public key") AddNewKeysOrBust(t, rmd, MakeDirRKeyBundle(uid, subkey)) expectUncachedGetTLFCryptKey(config, rmd, rmd.LatestKeyGeneration(), uid, subkey, true) if _, err := config.KeyManager(). GetTLFCryptKeyForEncryption(ctx, rmd); err != nil { t.Errorf("Got error on GetTLFCryptKeyForEncryption: %v", err) } } func TestKeyManagerUncachedSecretKeyForMDDecryptionSuccess(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) uid, id, h := makeID(t, config, false) rmd := NewRootMetadataForTest(h, id) subkey := MakeFakeCryptPublicKeyOrBust("crypt public key") AddNewKeysOrBust(t, rmd, MakeDirRKeyBundle(uid, subkey)) expectUncachedGetTLFCryptKeyAnyDevice(config, rmd, rmd.LatestKeyGeneration(), uid, subkey, false) if _, err := config.KeyManager(). GetTLFCryptKeyForMDDecryption(ctx, rmd); err != nil { t.Errorf("Got error on GetTLFCryptKeyForMDDecryption: %v", err) } } func TestKeyManagerUncachedSecretKeyForBlockDecryptionSuccess(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) uid, id, h := makeID(t, config, false) rmd := NewRootMetadataForTest(h, id) subkey := MakeFakeCryptPublicKeyOrBust("crypt public key") AddNewKeysOrBust(t, rmd, MakeDirRKeyBundle(uid, subkey)) AddNewKeysOrBust(t, rmd, MakeDirRKeyBundle(uid, subkey)) keyGen := rmd.LatestKeyGeneration() - 1 expectUncachedGetTLFCryptKey(config, rmd, keyGen, uid, subkey, false) if _, err := config.KeyManager().GetTLFCryptKeyForBlockDecryption( ctx, rmd, BlockPointer{KeyGen: keyGen}); err != nil { t.Errorf("Got error on GetTLFCryptKeyForBlockDecryption: %v", err) } } func TestKeyManagerRekeyFailurePublic(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) _, id, h := makeID(t, config, true) rmd := NewRootMetadataForTest(h, id) if rmd.LatestKeyGeneration() != PublicKeyGen { t.Errorf("Expected %d, got %d", rmd.LatestKeyGeneration(), PublicKeyGen) } if _, _, err := config.KeyManager(). Rekey(ctx, rmd); err != (InvalidPublicTLFOperation{id, "rekey"}) { t.Errorf("Got unexpected error on rekey: %v", err) } if rmd.LatestKeyGeneration() != PublicKeyGen { t.Errorf("Expected %d, got %d", rmd.LatestKeyGeneration(), PublicKeyGen) } } func TestKeyManagerRekeySuccessPrivate(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) _, id, h := makeID(t, config, false) rmd := NewRootMetadataForTest(h, id) oldKeyGen := rmd.LatestKeyGeneration() expectRekey(config, rmd) if done, _, err := config.KeyManager().Rekey(ctx, rmd); !done || err != nil { t.Errorf("Got error on rekey: %t, %v", done, err) } else if rmd.LatestKeyGeneration() != oldKeyGen+1 { t.Errorf("Bad key generation after rekey: %d", rmd.LatestKeyGeneration()) } } func TestKeyManagerRekeyAddAndRevokeDevice(t *testing.T) { var u1, u2 libkb.NormalizedUsername = "u1", "u2" config1, _, ctx := kbfsOpsConcurInit(t, u1, u2) defer CheckConfigAndShutdown(t, config1) config2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2) uid2, err := config2.KBPKI().GetCurrentUID(context.Background()) if err != nil { t.Fatal(err) } // Create a shared folder name := u1.String() + "," + u2.String() kbfsOps1 := config1.KBFSOps() rootNode1, _, err := kbfsOps1.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Couldn't create folder: %v", err) } // user 1 creates a file _, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } config2Dev2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2Dev2) // Now give u2 a new device. The configs don't share a Keybase // Daemon so we have to do it in all places. AddDeviceForLocalUserOrBust(t, config1, uid2) AddDeviceForLocalUserOrBust(t, config2, uid2) devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2) SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex) // user 2 should be unable to read the data now since its device // wasn't registered when the folder was originally created. kbfsOps2Dev2 := config2Dev2.KBFSOps() _, _, err = kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with new key: %v", err) } // Set the KBPKI so we can count the identify calls localDaemon := config1.KeybaseDaemon() measuredDaemon := NewKeybaseDaemonMeasured(localDaemon, config1.MetricsRegistry()) config1.SetKeybaseDaemon(measuredDaemon) // Force the FBO to forget about its previous identify. kbfsOps1.(*KBFSOpsStandard).getOps( rootNode1.GetFolderBranch()).identifyDone = false // now user 1 should rekey err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // Set the local daemon back since there are later dependencies on it. config1.SetKeybaseDaemon(localDaemon) // Only u2 should be identified as part of the rekey. if g, e := measuredDaemon.identifyTimer.Count(), int64(1); g != e { t.Errorf("Expected %d identify calls, but got %d", e, g) } // this device should be able to read now root2Dev2, _, err := kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } // add a third device for user 2 config2Dev3 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2Dev3) AddDeviceForLocalUserOrBust(t, config1, uid2) AddDeviceForLocalUserOrBust(t, config2, uid2) AddDeviceForLocalUserOrBust(t, config2Dev2, uid2) devIndex = AddDeviceForLocalUserOrBust(t, config2Dev3, uid2) SwitchDeviceForLocalUserOrBust(t, config2Dev3, devIndex) // Now revoke the original user 2 device RevokeDeviceForLocalUserOrBust(t, config1, uid2, 0) RevokeDeviceForLocalUserOrBust(t, config2Dev2, uid2, 0) RevokeDeviceForLocalUserOrBust(t, config2Dev3, uid2, 0) // rekey again config1.SetKeybaseDaemon(measuredDaemon) err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } config1.SetKeybaseDaemon(localDaemon) // Only u2 should be identified again as part of the rekey. if g, e := measuredDaemon.identifyTimer.Count(), int64(2); g != e { t.Errorf("Expected %d identify calls, but got %d", e, g) } // force re-encryption of the root dir _, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "b", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } err = kbfsOps2Dev2.SyncFromServer(ctx, root2Dev2.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } // device 2 should still work rootNode2, _, err := kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } children, err := kbfsOps2Dev2.GetDirChildren(ctx, rootNode2) if _, ok := children["b"]; !ok { t.Fatalf("Device 2 couldn't see the new dir entry") } // but device 1 should now fail kbfsOps2 := config2.KBFSOps() _, _, err = kbfsOps2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with revoked key: %v", err) } // meanwhile, device 3 should be able to read both the new and the // old files kbfsOps2Dev3 := config2Dev3.KBFSOps() rootNode2Dev3, _, err := kbfsOps2Dev3.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Device 3 couldn't read root: %v", err) } aNode, _, err := kbfsOps2Dev3.Lookup(ctx, rootNode2Dev3, "a") if err != nil { t.Fatalf("Device 3 couldn't lookup a: %v", err) } buf := []byte{0} _, err = kbfsOps2Dev3.Read(ctx, aNode, buf, 0) if err != nil { t.Fatalf("Device 3 couldn't read a: %v", err) } bNode, _, err := kbfsOps2Dev3.Lookup(ctx, rootNode2Dev3, "b") if err != nil { t.Fatalf("Device 3 couldn't lookup b: %v", err) } _, err = kbfsOps2Dev3.Read(ctx, bNode, buf, 0) if err != nil { t.Fatalf("Device 3 couldn't read b: %v", err) } // Make sure the server-side keys for the revoked device are gone // for all keygens. rmd, err := config1.MDOps().GetForTLF(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't get latest md: %v", err) } currKeyGen := rmd.LatestKeyGeneration() // clear the key cache config2.SetKeyCache(NewKeyCacheStandard(5000)) km2, ok := config2.KeyManager().(*KeyManagerStandard) if !ok { t.Fatal("Wrong kind of key manager for config2") } for keyGen := KeyGen(FirstValidKeyGen); keyGen <= currKeyGen; keyGen++ { _, err = km2.getTLFCryptKeyUsingCurrentDevice(ctx, rmd, keyGen, true) if err == nil { t.Errorf("User 2 could still fetch a key for keygen %d", keyGen) } } } func TestKeyManagerRekeyAddWriterAndReaderDevice(t *testing.T) { var u1, u2, u3 libkb.NormalizedUsername = "u1", "u2", "u3" config1, _, ctx := kbfsOpsConcurInit(t, u1, u2, u3) defer CheckConfigAndShutdown(t, config1) config2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2) uid2, err := config2.KBPKI().GetCurrentUID(context.Background()) if err != nil { t.Fatal(err) } config3 := ConfigAsUser(config1.(*ConfigLocal), u3) defer CheckConfigAndShutdown(t, config3) uid3, err := config3.KBPKI().GetCurrentUID(context.Background()) if err != nil { t.Fatal(err) } // Create a shared folder name := u1.String() + "," + u2.String() + ReaderSep + u3.String() kbfsOps1 := config1.KBFSOps() rootNode1, _, err := kbfsOps1.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Couldn't create folder: %v", err) } // user 1 creates a file _, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } config2Dev2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2Dev2) config3Dev2 := ConfigAsUser(config1.(*ConfigLocal), u3) defer CheckConfigAndShutdown(t, config3Dev2) // Now give u2 and u3 new devices. The configs don't share a // Keybase Daemon so we have to do it in all places. AddDeviceForLocalUserOrBust(t, config1, uid2) AddDeviceForLocalUserOrBust(t, config2, uid2) AddDeviceForLocalUserOrBust(t, config3, uid2) devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2) SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex) AddDeviceForLocalUserOrBust(t, config1, uid3) AddDeviceForLocalUserOrBust(t, config2, uid3) AddDeviceForLocalUserOrBust(t, config3, uid3) devIndex = AddDeviceForLocalUserOrBust(t, config3Dev2, uid3) SwitchDeviceForLocalUserOrBust(t, config3Dev2, devIndex) // Users 2 and 3 should be unable to read the data now since its // device wasn't registered when the folder was originally // created. kbfsOps2Dev2 := config2Dev2.KBFSOps() _, _, err = kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with new key: %v", err) } kbfsOps3Dev2 := config3Dev2.KBFSOps() _, _, err = kbfsOps3Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with new key: %v", err) } // Set the KBPKI so we can count the identify calls localDaemon := config1.KeybaseDaemon() measuredDaemon := NewKeybaseDaemonMeasured(localDaemon, config1.MetricsRegistry()) config1.SetKeybaseDaemon(measuredDaemon) // Force the FBO to forget about its previous identify. kbfsOps1.(*KBFSOpsStandard).getOps( rootNode1.GetFolderBranch()).identifyDone = false // now user 1 should rekey err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // Set the local daemon back since there are later dependencies on it. config1.SetKeybaseDaemon(localDaemon) // u2 and u3 should be identified as part of the rekey. if g, e := measuredDaemon.identifyTimer.Count(), int64(2); g != e { t.Errorf("Expected %d identify calls, but got %d", e, g) } // The new devices should be able to read now. _, _, err = kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } _, _, err = kbfsOps3Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } } func TestKeyManagerSelfRekeyAcrossDevices(t *testing.T) { var u1, u2 libkb.NormalizedUsername = "u1", "u2" config1, _, ctx := kbfsOpsConcurInit(t, u1, u2) defer CheckConfigAndShutdown(t, config1) config2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2) uid2, err := config2.KBPKI().GetCurrentUID(context.Background()) if err != nil { t.Fatal(err) } t.Log("Create a shared folder") name := u1.String() + "," + u2.String() kbfsOps1 := config1.KBFSOps() rootNode1, _, err := kbfsOps1.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Couldn't create folder: %v", err) } t.Log("User 1 creates a file") _, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } t.Log("User 2 adds a device") // The configs don't share a Keybase Daemon so we have to do it in all // places. AddDeviceForLocalUserOrBust(t, config1, uid2) devIndex := AddDeviceForLocalUserOrBust(t, config2, uid2) config2Dev2 := ConfigAsUser(config2, u2) defer CheckConfigAndShutdown(t, config2Dev2) SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex) t.Log("Check that user 2 device 2 is unable to read the file") // user 2 device 2 should be unable to read the data now since its device // wasn't registered when the folder was originally created. kbfsOps2Dev2 := config2Dev2.KBFSOps() _, _, err = kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with new key: %v", err) } t.Log("User 2 rekeys from device 1") kbfsOps2 := config2.KBFSOps() root2dev1, _, err := kbfsOps2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Couldn't obtain folder: %#v", err) } err = kbfsOps2.Rekey(ctx, root2dev1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } t.Log("User 2 device 2 should be able to read now") root2dev2, _, err := kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } t.Log("User 2 device 2 reads user 1's file") children2, err := kbfsOps2Dev2.GetDirChildren(ctx, root2dev2) if _, ok := children2["a"]; !ok { t.Fatalf("Device 2 couldn't see user 1's dir entry") } t.Log("User 2 device 2 creates a file") _, _, err = kbfsOps2Dev2.CreateFile(ctx, root2dev2, "b", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } t.Log("User 1 syncs from the server") err = kbfsOps1.SyncFromServer(ctx, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } t.Log("User 1 should be able to read the file that user 2 device 2 created") children1, err := kbfsOps1.GetDirChildren(ctx, rootNode1) if _, ok := children1["b"]; !ok { t.Fatalf("Device 1 couldn't see the new dir entry") } } // This tests 2 variations of the situation where clients w/o the folder key set the rekey bit. // In one case the client is a writer and in the other a reader. They both blindly copy the existing // metadata and simply set the rekey bit. Then another participant rekeys the folder and they try to read. func TestKeyManagerRekeyBit(t *testing.T) { var u1, u2, u3 libkb.NormalizedUsername = "u1", "u2", "u3" config1, _, ctx := kbfsOpsConcurInit(t, u1, u2, u3) doShutdown1 := true defer func() { if doShutdown1 { CheckConfigAndShutdown(t, config1) } }() config1.MDServer().DisableRekeyUpdatesForTesting() config2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2) uid2, err := config2.KBPKI().GetCurrentUID(context.Background()) if err != nil { t.Fatal(err) } config2.MDServer().DisableRekeyUpdatesForTesting() config3 := ConfigAsUser(config1.(*ConfigLocal), u3) defer CheckConfigAndShutdown(t, config3) uid3, err := config3.KBPKI().GetCurrentUID(context.Background()) if err != nil { t.Fatal(err) } config3.MDServer().DisableRekeyUpdatesForTesting() // 2 writers 1 reader name := u1.String() + "," + u2.String() + "#" + u3.String() kbfsOps1 := config1.KBFSOps() rootNode1, _, err := kbfsOps1.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Couldn't create folder: %v", err) } // user 1 creates a file _, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } config2Dev2 := ConfigAsUser(config1.(*ConfigLocal), u2) // we don't check the config because this device can't read all of the md blocks. defer config2Dev2.Shutdown() config2Dev2.MDServer().DisableRekeyUpdatesForTesting() // Now give u2 a new device. The configs don't share a Keybase // Daemon so we have to do it in all places. AddDeviceForLocalUserOrBust(t, config1, uid2) AddDeviceForLocalUserOrBust(t, config2, uid2) AddDeviceForLocalUserOrBust(t, config3, uid2) devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2) SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex) // user 2 should be unable to read the data now since its device // wasn't registered when the folder was originally created. kbfsOps2Dev2 := config2Dev2.KBFSOps() _, _, err = kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with new key: %v", err) } // now user 2 should set the rekey bit err = kbfsOps2Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // user 1 syncs from server err = kbfsOps1.SyncFromServer(ctx, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } // user 1 should try to rekey err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // user 2 syncs from server err = kbfsOps2Dev2.SyncFromServer(ctx, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } // this device should be able to read now rootNode2Dev2, _, err := kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } // look for the file aNode, _, err := kbfsOps2Dev2.Lookup(ctx, rootNode2Dev2, "a") if err != nil { t.Fatalf("Device 2 couldn't lookup a: %v", err) } // read it buf := []byte{0} _, err = kbfsOps2Dev2.Read(ctx, aNode, buf, 0) if err != nil { t.Fatalf("Device 2 couldn't read a: %v", err) } config3Dev2 := ConfigAsUser(config1.(*ConfigLocal), u3) // we don't check the config because this device can't read all of the md blocks. defer config3Dev2.Shutdown() config3Dev2.MDServer().DisableRekeyUpdatesForTesting() // Now give u3 a new device. AddDeviceForLocalUserOrBust(t, config1, uid3) AddDeviceForLocalUserOrBust(t, config2, uid3) AddDeviceForLocalUserOrBust(t, config2Dev2, uid3) AddDeviceForLocalUserOrBust(t, config3, uid3) devIndex = AddDeviceForLocalUserOrBust(t, config3Dev2, uid3) SwitchDeviceForLocalUserOrBust(t, config3Dev2, devIndex) // user 3 dev 2 should be unable to read the data now since its device // wasn't registered when the folder was originally created. kbfsOps3Dev2 := config3Dev2.KBFSOps() _, _, err = kbfsOps3Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with new key: %v", err) } // now user 3 dev 2 should set the rekey bit err = kbfsOps3Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // user 2 dev 2 syncs from server err = kbfsOps2Dev2.SyncFromServer(ctx, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } // user 2 dev 2 should try to rekey err = kbfsOps2Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // user 3 dev 2 syncs from server err = kbfsOps3Dev2.SyncFromServer(ctx, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } // this device should be able to read now rootNode3Dev2, _, err := kbfsOps3Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } // look for the file a2Node, _, err := kbfsOps3Dev2.Lookup(ctx, rootNode3Dev2, "a") if err != nil { t.Fatalf("Device 3 couldn't lookup a: %v", err) } // read it buf = []byte{0} _, err = kbfsOps3Dev2.Read(ctx, a2Node, buf, 0) if err != nil { t.Fatalf("Device 3 couldn't read a: %v", err) } // Explicitly run the checks with config1 before the deferred shutdowns begin. // This way the shared mdserver hasn't been shutdown. CheckConfigAndShutdown(t, config1) doShutdown1 = false } // Two devices conflict when revoking a 3rd device. // Test that after this both can still read the latest version of the folder. func TestKeyManagerRekeyAddAndRevokeDeviceWithConflict(t *testing.T) { var u1, u2 libkb.NormalizedUsername = "u1", "u2" config1, _, ctx := kbfsOpsConcurInit(t, u1, u2) defer CheckConfigAndShutdown(t, config1) config2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2) uid2, err := config2.KBPKI().GetCurrentUID(context.Background()) if err != nil { t.Fatal(err) } // create a shared folder name := u1.String() + "," + u2.String() kbfsOps1 := config1.KBFSOps() rootNode1, _, err := kbfsOps1.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Couldn't create folder: %v", err) } // user 1 creates a file _, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } config2Dev2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2Dev2) // give user 2 a new device AddDeviceForLocalUserOrBust(t, config1, uid2) AddDeviceForLocalUserOrBust(t, config2, uid2) devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2) SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex) // user 2 should be unable to read the data now since its device // wasn't registered when the folder was originally created. kbfsOps2Dev2 := config2Dev2.KBFSOps() root2Dev2, _, err := kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with new key: %v", err) } // now user 1 should rekey err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // this device should be able to read now root2Dev2, _, err = kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } // Now revoke the original user 2 device RevokeDeviceForLocalUserOrBust(t, config1, uid2, 0) RevokeDeviceForLocalUserOrBust(t, config2Dev2, uid2, 0) // disable updates on user 1 c, err := DisableUpdatesForTesting(config1, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't disable updates: %v", err) } // rekey again but with user 2 device 2 err = kbfsOps2Dev2.Rekey(ctx, root2Dev2.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // have user 1 also try to rekey but fail due to conflict err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if _, isConflict := err.(MDServerErrorConflictRevision); !isConflict { t.Fatalf("Expected failure due to conflict") } // device 1 re-enables updates c <- struct{}{} err = kbfsOps1.SyncFromServer(ctx, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } err = kbfsOps2Dev2.SyncFromServer(ctx, root2Dev2.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } // force re-encryption of the root dir _, _, err = kbfsOps2Dev2.CreateFile(ctx, root2Dev2, "b", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } // device 1 should still work err = kbfsOps1.SyncFromServer(ctx, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } rootNode1, _, err = kbfsOps1.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } children, err := kbfsOps1.GetDirChildren(ctx, rootNode1) if _, ok := children["b"]; !ok { t.Fatalf("Device 1 couldn't see the new dir entry") } } key_manager_test: don't use keybase_measured_daemon for counting It might be going away. Requested by @akalin-keybase. Issue: #676 package libkbfs import ( "errors" "testing" "github.com/golang/mock/gomock" "github.com/keybase/client/go/libkb" keybase1 "github.com/keybase/client/go/protocol" "golang.org/x/net/context" ) func keyManagerInit(t *testing.T) (mockCtrl *gomock.Controller, config *ConfigMock, ctx context.Context) { ctr := NewSafeTestReporter(t) mockCtrl = gomock.NewController(ctr) config = NewConfigMock(mockCtrl, ctr) keyman := NewKeyManagerStandard(config) config.SetKeyManager(keyman) ctx = context.Background() return } func keyManagerShutdown(mockCtrl *gomock.Controller, config *ConfigMock) { config.ctr.CheckForFailures() mockCtrl.Finish() } func expectCachedGetTLFCryptKey(config *ConfigMock, rmd *RootMetadata, keyGen KeyGen) { config.mockKcache.EXPECT().GetTLFCryptKey(rmd.ID, keyGen).Return(TLFCryptKey{}, nil) } func expectUncachedGetTLFCryptKey(config *ConfigMock, rmd *RootMetadata, keyGen KeyGen, uid keybase1.UID, subkey CryptPublicKey, encrypt bool) { config.mockKcache.EXPECT().GetTLFCryptKey(rmd.ID, keyGen). Return(TLFCryptKey{}, errors.New("NONE")) // get the xor'd key out of the metadata config.mockKbpki.EXPECT().GetCurrentCryptPublicKey(gomock.Any()). Return(subkey, nil) config.mockCrypto.EXPECT().DecryptTLFCryptKeyClientHalf(gomock.Any(), TLFEphemeralPublicKey{}, gomock.Any()). Return(TLFCryptKeyClientHalf{}, nil) // get the server-side half and retrieve the real secret key config.mockKops.EXPECT().GetTLFCryptKeyServerHalf(gomock.Any(), gomock.Any(), gomock.Any()).Return(TLFCryptKeyServerHalf{}, nil) config.mockCrypto.EXPECT().UnmaskTLFCryptKey(TLFCryptKeyServerHalf{}, TLFCryptKeyClientHalf{}).Return(TLFCryptKey{}, nil) // now put the key into the cache if !encrypt { config.mockKcache.EXPECT().PutTLFCryptKey(rmd.ID, keyGen, TLFCryptKey{}). Return(nil) } } func expectUncachedGetTLFCryptKeyAnyDevice(config *ConfigMock, rmd *RootMetadata, keyGen KeyGen, uid keybase1.UID, subkey CryptPublicKey, encrypt bool) { config.mockKcache.EXPECT().GetTLFCryptKey(rmd.ID, keyGen). Return(TLFCryptKey{}, errors.New("NONE")) // get the xor'd key out of the metadata config.mockKbpki.EXPECT().GetCryptPublicKeys(gomock.Any(), uid). Return([]CryptPublicKey{subkey}, nil) config.mockCrypto.EXPECT().DecryptTLFCryptKeyClientHalfAny(gomock.Any(), gomock.Any()).Return(TLFCryptKeyClientHalf{}, 0, nil) // get the server-side half and retrieve the real secret key config.mockKops.EXPECT().GetTLFCryptKeyServerHalf(gomock.Any(), gomock.Any(), gomock.Any()).Return(TLFCryptKeyServerHalf{}, nil) config.mockCrypto.EXPECT().UnmaskTLFCryptKey(TLFCryptKeyServerHalf{}, TLFCryptKeyClientHalf{}).Return(TLFCryptKey{}, nil) // now put the key into the cache if !encrypt { config.mockKcache.EXPECT().PutTLFCryptKey(rmd.ID, keyGen, TLFCryptKey{}). Return(nil) } } func expectRekey(config *ConfigMock, rmd *RootMetadata) { // generate new keys config.mockCrypto.EXPECT().MakeRandomTLFKeys().Return(TLFPublicKey{}, TLFPrivateKey{}, TLFEphemeralPublicKey{}, TLFEphemeralPrivateKey{}, TLFCryptKey{}, nil) config.mockCrypto.EXPECT().MakeRandomTLFCryptKeyServerHalf().Return(TLFCryptKeyServerHalf{}, nil) subkey := MakeFakeCryptPublicKeyOrBust("crypt public key") config.mockKbpki.EXPECT().GetCryptPublicKeys(gomock.Any(), gomock.Any()). Return([]CryptPublicKey{subkey}, nil) // make keys for the one device config.mockCrypto.EXPECT().MaskTLFCryptKey(TLFCryptKeyServerHalf{}, TLFCryptKey{}).Return(TLFCryptKeyClientHalf{}, nil) config.mockCrypto.EXPECT().EncryptTLFCryptKeyClientHalf(TLFEphemeralPrivateKey{}, subkey, TLFCryptKeyClientHalf{}).Return(EncryptedTLFCryptKeyClientHalf{}, nil) config.mockKops.EXPECT().PutTLFCryptKeyServerHalves(gomock.Any(), gomock.Any()).Return(nil) config.mockCrypto.EXPECT().GetTLFCryptKeyServerHalfID(gomock.Any(), gomock.Any(), gomock.Any()).Return(TLFCryptKeyServerHalfID{}, nil) // Ignore Notify calls for now config.mockRep.EXPECT().Notify(gomock.Any(), gomock.Any()).AnyTimes() } func TestKeyManagerPublicTLFCryptKey(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) id, h, _ := newDir(t, config, 1, false, true) rmd := NewRootMetadataForTest(h, id) tlfCryptKey, err := config.KeyManager(). GetTLFCryptKeyForEncryption(ctx, rmd) if err != nil { t.Error(err) } if tlfCryptKey != PublicTLFCryptKey { t.Errorf("got %v, expected %v", tlfCryptKey, PublicTLFCryptKey) } tlfCryptKey, err = config.KeyManager(). GetTLFCryptKeyForMDDecryption(ctx, rmd) if err != nil { t.Error(err) } if tlfCryptKey != PublicTLFCryptKey { t.Errorf("got %v, expected %v", tlfCryptKey, PublicTLFCryptKey) } tlfCryptKey, err = config.KeyManager(). GetTLFCryptKeyForBlockDecryption(ctx, rmd, BlockPointer{}) if err != nil { t.Error(err) } if tlfCryptKey != PublicTLFCryptKey { t.Errorf("got %v, expected %v", tlfCryptKey, PublicTLFCryptKey) } } func TestKeyManagerCachedSecretKeyForEncryptionSuccess(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) _, id, h := makeID(t, config, false) rmd := NewRootMetadataForTest(h, id) AddNewKeysOrBust(t, rmd, *NewTLFKeyBundle()) expectCachedGetTLFCryptKey(config, rmd, rmd.LatestKeyGeneration()) if _, err := config.KeyManager(). GetTLFCryptKeyForEncryption(ctx, rmd); err != nil { t.Errorf("Got error on GetTLFCryptKeyForEncryption: %v", err) } } func TestKeyManagerCachedSecretKeyForMDDecryptionSuccess(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) _, id, h := makeID(t, config, false) rmd := NewRootMetadataForTest(h, id) AddNewKeysOrBust(t, rmd, *NewTLFKeyBundle()) expectCachedGetTLFCryptKey(config, rmd, rmd.LatestKeyGeneration()) if _, err := config.KeyManager(). GetTLFCryptKeyForMDDecryption(ctx, rmd); err != nil { t.Errorf("Got error on GetTLFCryptKeyForMDDecryption: %v", err) } } func TestKeyManagerCachedSecretKeyForBlockDecryptionSuccess(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) _, id, h := makeID(t, config, false) rmd := NewRootMetadataForTest(h, id) AddNewKeysOrBust(t, rmd, *NewTLFKeyBundle()) AddNewKeysOrBust(t, rmd, *NewTLFKeyBundle()) keyGen := rmd.LatestKeyGeneration() - 1 expectCachedGetTLFCryptKey(config, rmd, keyGen) if _, err := config.KeyManager().GetTLFCryptKeyForBlockDecryption( ctx, rmd, BlockPointer{KeyGen: keyGen}); err != nil { t.Errorf("Got error on GetTLFCryptKeyForBlockDecryption: %v", err) } } func TestKeyManagerUncachedSecretKeyForEncryptionSuccess(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) uid, id, h := makeID(t, config, false) rmd := NewRootMetadataForTest(h, id) subkey := MakeFakeCryptPublicKeyOrBust("crypt public key") AddNewKeysOrBust(t, rmd, MakeDirRKeyBundle(uid, subkey)) expectUncachedGetTLFCryptKey(config, rmd, rmd.LatestKeyGeneration(), uid, subkey, true) if _, err := config.KeyManager(). GetTLFCryptKeyForEncryption(ctx, rmd); err != nil { t.Errorf("Got error on GetTLFCryptKeyForEncryption: %v", err) } } func TestKeyManagerUncachedSecretKeyForMDDecryptionSuccess(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) uid, id, h := makeID(t, config, false) rmd := NewRootMetadataForTest(h, id) subkey := MakeFakeCryptPublicKeyOrBust("crypt public key") AddNewKeysOrBust(t, rmd, MakeDirRKeyBundle(uid, subkey)) expectUncachedGetTLFCryptKeyAnyDevice(config, rmd, rmd.LatestKeyGeneration(), uid, subkey, false) if _, err := config.KeyManager(). GetTLFCryptKeyForMDDecryption(ctx, rmd); err != nil { t.Errorf("Got error on GetTLFCryptKeyForMDDecryption: %v", err) } } func TestKeyManagerUncachedSecretKeyForBlockDecryptionSuccess(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) uid, id, h := makeID(t, config, false) rmd := NewRootMetadataForTest(h, id) subkey := MakeFakeCryptPublicKeyOrBust("crypt public key") AddNewKeysOrBust(t, rmd, MakeDirRKeyBundle(uid, subkey)) AddNewKeysOrBust(t, rmd, MakeDirRKeyBundle(uid, subkey)) keyGen := rmd.LatestKeyGeneration() - 1 expectUncachedGetTLFCryptKey(config, rmd, keyGen, uid, subkey, false) if _, err := config.KeyManager().GetTLFCryptKeyForBlockDecryption( ctx, rmd, BlockPointer{KeyGen: keyGen}); err != nil { t.Errorf("Got error on GetTLFCryptKeyForBlockDecryption: %v", err) } } func TestKeyManagerRekeyFailurePublic(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) _, id, h := makeID(t, config, true) rmd := NewRootMetadataForTest(h, id) if rmd.LatestKeyGeneration() != PublicKeyGen { t.Errorf("Expected %d, got %d", rmd.LatestKeyGeneration(), PublicKeyGen) } if _, _, err := config.KeyManager(). Rekey(ctx, rmd); err != (InvalidPublicTLFOperation{id, "rekey"}) { t.Errorf("Got unexpected error on rekey: %v", err) } if rmd.LatestKeyGeneration() != PublicKeyGen { t.Errorf("Expected %d, got %d", rmd.LatestKeyGeneration(), PublicKeyGen) } } func TestKeyManagerRekeySuccessPrivate(t *testing.T) { mockCtrl, config, ctx := keyManagerInit(t) defer keyManagerShutdown(mockCtrl, config) _, id, h := makeID(t, config, false) rmd := NewRootMetadataForTest(h, id) oldKeyGen := rmd.LatestKeyGeneration() expectRekey(config, rmd) if done, _, err := config.KeyManager().Rekey(ctx, rmd); !done || err != nil { t.Errorf("Got error on rekey: %t, %v", done, err) } else if rmd.LatestKeyGeneration() != oldKeyGen+1 { t.Errorf("Bad key generation after rekey: %d", rmd.LatestKeyGeneration()) } } func TestKeyManagerRekeyAddAndRevokeDevice(t *testing.T) { var u1, u2 libkb.NormalizedUsername = "u1", "u2" config1, _, ctx := kbfsOpsConcurInit(t, u1, u2) defer CheckConfigAndShutdown(t, config1) config2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2) uid2, err := config2.KBPKI().GetCurrentUID(context.Background()) if err != nil { t.Fatal(err) } // Create a shared folder name := u1.String() + "," + u2.String() kbfsOps1 := config1.KBFSOps() rootNode1, _, err := kbfsOps1.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Couldn't create folder: %v", err) } // user 1 creates a file _, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } config2Dev2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2Dev2) // Now give u2 a new device. The configs don't share a Keybase // Daemon so we have to do it in all places. AddDeviceForLocalUserOrBust(t, config1, uid2) AddDeviceForLocalUserOrBust(t, config2, uid2) devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2) SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex) // user 2 should be unable to read the data now since its device // wasn't registered when the folder was originally created. kbfsOps2Dev2 := config2Dev2.KBFSOps() _, _, err = kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with new key: %v", err) } // Set the KBPKI so we can count the identify calls countKBPKI := &daemonKBPKI{ KBPKI: config1.KBPKI(), daemon: config1.KeybaseDaemon(), } config1.SetKBPKI(countKBPKI) // Force the FBO to forget about its previous identify. kbfsOps1.(*KBFSOpsStandard).getOps( rootNode1.GetFolderBranch()).identifyDone = false // now user 1 should rekey err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // Only u2 should be identified as part of the rekey. if g, e := countKBPKI.getIdentifyCalls(), 1; g != e { t.Errorf("Expected %d identify calls, but got %d", e, g) } // this device should be able to read now root2Dev2, _, err := kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } // add a third device for user 2 config2Dev3 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2Dev3) AddDeviceForLocalUserOrBust(t, config1, uid2) AddDeviceForLocalUserOrBust(t, config2, uid2) AddDeviceForLocalUserOrBust(t, config2Dev2, uid2) devIndex = AddDeviceForLocalUserOrBust(t, config2Dev3, uid2) SwitchDeviceForLocalUserOrBust(t, config2Dev3, devIndex) // Now revoke the original user 2 device RevokeDeviceForLocalUserOrBust(t, config1, uid2, 0) RevokeDeviceForLocalUserOrBust(t, config2Dev2, uid2, 0) RevokeDeviceForLocalUserOrBust(t, config2Dev3, uid2, 0) // rekey again err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // Only u2 should be identified again as part of the rekey. if g, e := countKBPKI.getIdentifyCalls(), 2; g != e { t.Errorf("Expected %d identify calls, but got %d", e, g) } // force re-encryption of the root dir _, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "b", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } err = kbfsOps2Dev2.SyncFromServer(ctx, root2Dev2.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } // device 2 should still work rootNode2, _, err := kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } children, err := kbfsOps2Dev2.GetDirChildren(ctx, rootNode2) if _, ok := children["b"]; !ok { t.Fatalf("Device 2 couldn't see the new dir entry") } // but device 1 should now fail kbfsOps2 := config2.KBFSOps() _, _, err = kbfsOps2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with revoked key: %v", err) } // meanwhile, device 3 should be able to read both the new and the // old files kbfsOps2Dev3 := config2Dev3.KBFSOps() rootNode2Dev3, _, err := kbfsOps2Dev3.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Device 3 couldn't read root: %v", err) } aNode, _, err := kbfsOps2Dev3.Lookup(ctx, rootNode2Dev3, "a") if err != nil { t.Fatalf("Device 3 couldn't lookup a: %v", err) } buf := []byte{0} _, err = kbfsOps2Dev3.Read(ctx, aNode, buf, 0) if err != nil { t.Fatalf("Device 3 couldn't read a: %v", err) } bNode, _, err := kbfsOps2Dev3.Lookup(ctx, rootNode2Dev3, "b") if err != nil { t.Fatalf("Device 3 couldn't lookup b: %v", err) } _, err = kbfsOps2Dev3.Read(ctx, bNode, buf, 0) if err != nil { t.Fatalf("Device 3 couldn't read b: %v", err) } // Make sure the server-side keys for the revoked device are gone // for all keygens. rmd, err := config1.MDOps().GetForTLF(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't get latest md: %v", err) } currKeyGen := rmd.LatestKeyGeneration() // clear the key cache config2.SetKeyCache(NewKeyCacheStandard(5000)) km2, ok := config2.KeyManager().(*KeyManagerStandard) if !ok { t.Fatal("Wrong kind of key manager for config2") } for keyGen := KeyGen(FirstValidKeyGen); keyGen <= currKeyGen; keyGen++ { _, err = km2.getTLFCryptKeyUsingCurrentDevice(ctx, rmd, keyGen, true) if err == nil { t.Errorf("User 2 could still fetch a key for keygen %d", keyGen) } } } func TestKeyManagerRekeyAddWriterAndReaderDevice(t *testing.T) { var u1, u2, u3 libkb.NormalizedUsername = "u1", "u2", "u3" config1, _, ctx := kbfsOpsConcurInit(t, u1, u2, u3) defer CheckConfigAndShutdown(t, config1) config2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2) uid2, err := config2.KBPKI().GetCurrentUID(context.Background()) if err != nil { t.Fatal(err) } config3 := ConfigAsUser(config1.(*ConfigLocal), u3) defer CheckConfigAndShutdown(t, config3) uid3, err := config3.KBPKI().GetCurrentUID(context.Background()) if err != nil { t.Fatal(err) } // Create a shared folder name := u1.String() + "," + u2.String() + ReaderSep + u3.String() kbfsOps1 := config1.KBFSOps() rootNode1, _, err := kbfsOps1.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Couldn't create folder: %v", err) } // user 1 creates a file _, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } config2Dev2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2Dev2) config3Dev2 := ConfigAsUser(config1.(*ConfigLocal), u3) defer CheckConfigAndShutdown(t, config3Dev2) // Now give u2 and u3 new devices. The configs don't share a // Keybase Daemon so we have to do it in all places. AddDeviceForLocalUserOrBust(t, config1, uid2) AddDeviceForLocalUserOrBust(t, config2, uid2) AddDeviceForLocalUserOrBust(t, config3, uid2) devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2) SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex) AddDeviceForLocalUserOrBust(t, config1, uid3) AddDeviceForLocalUserOrBust(t, config2, uid3) AddDeviceForLocalUserOrBust(t, config3, uid3) devIndex = AddDeviceForLocalUserOrBust(t, config3Dev2, uid3) SwitchDeviceForLocalUserOrBust(t, config3Dev2, devIndex) // Users 2 and 3 should be unable to read the data now since its // device wasn't registered when the folder was originally // created. kbfsOps2Dev2 := config2Dev2.KBFSOps() _, _, err = kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with new key: %v", err) } kbfsOps3Dev2 := config3Dev2.KBFSOps() _, _, err = kbfsOps3Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with new key: %v", err) } // Set the KBPKI so we can count the identify calls countKBPKI := &daemonKBPKI{ KBPKI: config1.KBPKI(), daemon: config1.KeybaseDaemon(), } config1.SetKBPKI(countKBPKI) // Force the FBO to forget about its previous identify. kbfsOps1.(*KBFSOpsStandard).getOps( rootNode1.GetFolderBranch()).identifyDone = false // now user 1 should rekey err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // u2 and u3 should be identified as part of the rekey. if g, e := countKBPKI.getIdentifyCalls(), 2; g != e { t.Errorf("Expected %d identify calls, but got %d", e, g) } // The new devices should be able to read now. _, _, err = kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } _, _, err = kbfsOps3Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } } func TestKeyManagerSelfRekeyAcrossDevices(t *testing.T) { var u1, u2 libkb.NormalizedUsername = "u1", "u2" config1, _, ctx := kbfsOpsConcurInit(t, u1, u2) defer CheckConfigAndShutdown(t, config1) config2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2) uid2, err := config2.KBPKI().GetCurrentUID(context.Background()) if err != nil { t.Fatal(err) } t.Log("Create a shared folder") name := u1.String() + "," + u2.String() kbfsOps1 := config1.KBFSOps() rootNode1, _, err := kbfsOps1.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Couldn't create folder: %v", err) } t.Log("User 1 creates a file") _, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } t.Log("User 2 adds a device") // The configs don't share a Keybase Daemon so we have to do it in all // places. AddDeviceForLocalUserOrBust(t, config1, uid2) devIndex := AddDeviceForLocalUserOrBust(t, config2, uid2) config2Dev2 := ConfigAsUser(config2, u2) defer CheckConfigAndShutdown(t, config2Dev2) SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex) t.Log("Check that user 2 device 2 is unable to read the file") // user 2 device 2 should be unable to read the data now since its device // wasn't registered when the folder was originally created. kbfsOps2Dev2 := config2Dev2.KBFSOps() _, _, err = kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with new key: %v", err) } t.Log("User 2 rekeys from device 1") kbfsOps2 := config2.KBFSOps() root2dev1, _, err := kbfsOps2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Couldn't obtain folder: %#v", err) } err = kbfsOps2.Rekey(ctx, root2dev1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } t.Log("User 2 device 2 should be able to read now") root2dev2, _, err := kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } t.Log("User 2 device 2 reads user 1's file") children2, err := kbfsOps2Dev2.GetDirChildren(ctx, root2dev2) if _, ok := children2["a"]; !ok { t.Fatalf("Device 2 couldn't see user 1's dir entry") } t.Log("User 2 device 2 creates a file") _, _, err = kbfsOps2Dev2.CreateFile(ctx, root2dev2, "b", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } t.Log("User 1 syncs from the server") err = kbfsOps1.SyncFromServer(ctx, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } t.Log("User 1 should be able to read the file that user 2 device 2 created") children1, err := kbfsOps1.GetDirChildren(ctx, rootNode1) if _, ok := children1["b"]; !ok { t.Fatalf("Device 1 couldn't see the new dir entry") } } // This tests 2 variations of the situation where clients w/o the folder key set the rekey bit. // In one case the client is a writer and in the other a reader. They both blindly copy the existing // metadata and simply set the rekey bit. Then another participant rekeys the folder and they try to read. func TestKeyManagerRekeyBit(t *testing.T) { var u1, u2, u3 libkb.NormalizedUsername = "u1", "u2", "u3" config1, _, ctx := kbfsOpsConcurInit(t, u1, u2, u3) doShutdown1 := true defer func() { if doShutdown1 { CheckConfigAndShutdown(t, config1) } }() config1.MDServer().DisableRekeyUpdatesForTesting() config2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2) uid2, err := config2.KBPKI().GetCurrentUID(context.Background()) if err != nil { t.Fatal(err) } config2.MDServer().DisableRekeyUpdatesForTesting() config3 := ConfigAsUser(config1.(*ConfigLocal), u3) defer CheckConfigAndShutdown(t, config3) uid3, err := config3.KBPKI().GetCurrentUID(context.Background()) if err != nil { t.Fatal(err) } config3.MDServer().DisableRekeyUpdatesForTesting() // 2 writers 1 reader name := u1.String() + "," + u2.String() + "#" + u3.String() kbfsOps1 := config1.KBFSOps() rootNode1, _, err := kbfsOps1.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Couldn't create folder: %v", err) } // user 1 creates a file _, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } config2Dev2 := ConfigAsUser(config1.(*ConfigLocal), u2) // we don't check the config because this device can't read all of the md blocks. defer config2Dev2.Shutdown() config2Dev2.MDServer().DisableRekeyUpdatesForTesting() // Now give u2 a new device. The configs don't share a Keybase // Daemon so we have to do it in all places. AddDeviceForLocalUserOrBust(t, config1, uid2) AddDeviceForLocalUserOrBust(t, config2, uid2) AddDeviceForLocalUserOrBust(t, config3, uid2) devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2) SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex) // user 2 should be unable to read the data now since its device // wasn't registered when the folder was originally created. kbfsOps2Dev2 := config2Dev2.KBFSOps() _, _, err = kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with new key: %v", err) } // now user 2 should set the rekey bit err = kbfsOps2Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // user 1 syncs from server err = kbfsOps1.SyncFromServer(ctx, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } // user 1 should try to rekey err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // user 2 syncs from server err = kbfsOps2Dev2.SyncFromServer(ctx, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } // this device should be able to read now rootNode2Dev2, _, err := kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } // look for the file aNode, _, err := kbfsOps2Dev2.Lookup(ctx, rootNode2Dev2, "a") if err != nil { t.Fatalf("Device 2 couldn't lookup a: %v", err) } // read it buf := []byte{0} _, err = kbfsOps2Dev2.Read(ctx, aNode, buf, 0) if err != nil { t.Fatalf("Device 2 couldn't read a: %v", err) } config3Dev2 := ConfigAsUser(config1.(*ConfigLocal), u3) // we don't check the config because this device can't read all of the md blocks. defer config3Dev2.Shutdown() config3Dev2.MDServer().DisableRekeyUpdatesForTesting() // Now give u3 a new device. AddDeviceForLocalUserOrBust(t, config1, uid3) AddDeviceForLocalUserOrBust(t, config2, uid3) AddDeviceForLocalUserOrBust(t, config2Dev2, uid3) AddDeviceForLocalUserOrBust(t, config3, uid3) devIndex = AddDeviceForLocalUserOrBust(t, config3Dev2, uid3) SwitchDeviceForLocalUserOrBust(t, config3Dev2, devIndex) // user 3 dev 2 should be unable to read the data now since its device // wasn't registered when the folder was originally created. kbfsOps3Dev2 := config3Dev2.KBFSOps() _, _, err = kbfsOps3Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with new key: %v", err) } // now user 3 dev 2 should set the rekey bit err = kbfsOps3Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // user 2 dev 2 syncs from server err = kbfsOps2Dev2.SyncFromServer(ctx, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } // user 2 dev 2 should try to rekey err = kbfsOps2Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // user 3 dev 2 syncs from server err = kbfsOps3Dev2.SyncFromServer(ctx, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } // this device should be able to read now rootNode3Dev2, _, err := kbfsOps3Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } // look for the file a2Node, _, err := kbfsOps3Dev2.Lookup(ctx, rootNode3Dev2, "a") if err != nil { t.Fatalf("Device 3 couldn't lookup a: %v", err) } // read it buf = []byte{0} _, err = kbfsOps3Dev2.Read(ctx, a2Node, buf, 0) if err != nil { t.Fatalf("Device 3 couldn't read a: %v", err) } // Explicitly run the checks with config1 before the deferred shutdowns begin. // This way the shared mdserver hasn't been shutdown. CheckConfigAndShutdown(t, config1) doShutdown1 = false } // Two devices conflict when revoking a 3rd device. // Test that after this both can still read the latest version of the folder. func TestKeyManagerRekeyAddAndRevokeDeviceWithConflict(t *testing.T) { var u1, u2 libkb.NormalizedUsername = "u1", "u2" config1, _, ctx := kbfsOpsConcurInit(t, u1, u2) defer CheckConfigAndShutdown(t, config1) config2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2) uid2, err := config2.KBPKI().GetCurrentUID(context.Background()) if err != nil { t.Fatal(err) } // create a shared folder name := u1.String() + "," + u2.String() kbfsOps1 := config1.KBFSOps() rootNode1, _, err := kbfsOps1.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Couldn't create folder: %v", err) } // user 1 creates a file _, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } config2Dev2 := ConfigAsUser(config1.(*ConfigLocal), u2) defer CheckConfigAndShutdown(t, config2Dev2) // give user 2 a new device AddDeviceForLocalUserOrBust(t, config1, uid2) AddDeviceForLocalUserOrBust(t, config2, uid2) devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2) SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex) // user 2 should be unable to read the data now since its device // wasn't registered when the folder was originally created. kbfsOps2Dev2 := config2Dev2.KBFSOps() root2Dev2, _, err := kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if _, ok := err.(ReadAccessError); !ok { t.Fatalf("Got unexpected error when reading with new key: %v", err) } // now user 1 should rekey err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // this device should be able to read now root2Dev2, _, err = kbfsOps2Dev2.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } // Now revoke the original user 2 device RevokeDeviceForLocalUserOrBust(t, config1, uid2, 0) RevokeDeviceForLocalUserOrBust(t, config2Dev2, uid2, 0) // disable updates on user 1 c, err := DisableUpdatesForTesting(config1, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't disable updates: %v", err) } // rekey again but with user 2 device 2 err = kbfsOps2Dev2.Rekey(ctx, root2Dev2.GetFolderBranch().Tlf) if err != nil { t.Fatalf("Couldn't rekey: %v", err) } // have user 1 also try to rekey but fail due to conflict err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf) if _, isConflict := err.(MDServerErrorConflictRevision); !isConflict { t.Fatalf("Expected failure due to conflict") } // device 1 re-enables updates c <- struct{}{} err = kbfsOps1.SyncFromServer(ctx, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } err = kbfsOps2Dev2.SyncFromServer(ctx, root2Dev2.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } // force re-encryption of the root dir _, _, err = kbfsOps2Dev2.CreateFile(ctx, root2Dev2, "b", false) if err != nil { t.Fatalf("Couldn't create file: %v", err) } // device 1 should still work err = kbfsOps1.SyncFromServer(ctx, rootNode1.GetFolderBranch()) if err != nil { t.Fatalf("Couldn't sync from server: %v", err) } rootNode1, _, err = kbfsOps1.GetOrCreateRootNode(ctx, name, false, MasterBranch) if err != nil { t.Fatalf("Got unexpected error after rekey: %v", err) } children, err := kbfsOps1.GetDirChildren(ctx, rootNode1) if _, ok := children["b"]; !ok { t.Fatalf("Device 1 couldn't see the new dir entry") } }
// Copyright ©2014 The gonum Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package optimize import "github.com/gonum/floats" // GradientDescent is a Method that performs gradient-based optimization. // Gradient Descent performs successive steps along the direction of the // gradient. The Linesearcher specifies the kind of linesearch to be done, and // StepSizer determines the initial step size of each direction. If either // Linesearcher or StepSizer are nil, a reasonable value will be chosen. type GradientDescent struct { Linesearcher Linesearcher StepSizer StepSizer ls *LinesearchMethod } func (g *GradientDescent) Init(loc *Location) (Operation, error) { if g.StepSizer == nil { g.StepSizer = &QuadraticStepSize{} } if g.Linesearcher == nil { g.Linesearcher = &Backtracking{} } if g.ls == nil { g.ls = &LinesearchMethod{} } g.ls.Linesearcher = g.Linesearcher g.ls.NextDirectioner = g return g.ls.Init(loc) } func (g *GradientDescent) Iterate(loc *Location) (Operation, error) { return g.ls.Iterate(loc) } func (g *GradientDescent) InitDirection(loc *Location, dir []float64) (stepSize float64) { copy(dir, loc.Gradient) floats.Scale(-1, dir) return g.StepSizer.Init(loc, dir) } func (g *GradientDescent) NextDirection(loc *Location, dir []float64) (stepSize float64) { copy(dir, loc.Gradient) floats.Scale(-1, dir) return g.StepSizer.StepSize(loc, dir) } func (*GradientDescent) Needs() struct { Gradient bool Hessian bool } { return struct { Gradient bool Hessian bool }{true, false} } Clean up docs for GradientDescent // Copyright ©2014 The gonum Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package optimize import "github.com/gonum/floats" // GradientDescent implements the steepest descent optimization method that // performs successive steps along the direction of the negative gradient. type GradientDescent struct { // Linesearcher selects suitable steps along the descent direction. // If Linesearcher is nil, a reasonable default will be chosen. Linesearcher Linesearcher // StepSizer determines the initial step size along each direction. // If StepSizer is nil, a reasonable default will be chosen. StepSizer StepSizer ls *LinesearchMethod } func (g *GradientDescent) Init(loc *Location) (Operation, error) { if g.Linesearcher == nil { g.Linesearcher = &Backtracking{} } if g.StepSizer == nil { g.StepSizer = &QuadraticStepSize{} } if g.ls == nil { g.ls = &LinesearchMethod{} } g.ls.Linesearcher = g.Linesearcher g.ls.NextDirectioner = g return g.ls.Init(loc) } func (g *GradientDescent) Iterate(loc *Location) (Operation, error) { return g.ls.Iterate(loc) } func (g *GradientDescent) InitDirection(loc *Location, dir []float64) (stepSize float64) { copy(dir, loc.Gradient) floats.Scale(-1, dir) return g.StepSizer.Init(loc, dir) } func (g *GradientDescent) NextDirection(loc *Location, dir []float64) (stepSize float64) { copy(dir, loc.Gradient) floats.Scale(-1, dir) return g.StepSizer.StepSize(loc, dir) } func (*GradientDescent) Needs() struct { Gradient bool Hessian bool } { return struct { Gradient bool Hessian bool }{true, false} }
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "sort" "sync" "time" "github.com/keybase/client/go/logger" keybase1 "github.com/keybase/client/go/protocol" "golang.org/x/net/context" ) // TlfEditNotificationType indicates what type of edit happened to a // file. type TlfEditNotificationType int const ( // FileCreated indicates a new file. FileCreated TlfEditNotificationType = iota // FileModified indicates an existing file that was written to. FileModified // FileDeleted indicates an existing file that was deleted. It // doesn't appear in the edit history, only in individual edit // updates. FileDeleted ) // TlfEdit represents an individual update about a file edit within a // TLF. type TlfEdit struct { Filepath string // relative to the TLF root Type TlfEditNotificationType LocalTime time.Time // reflects difference between server and local clock } const ( // How many edits per writer we want to return in the complete history? desiredEditsPerWriter = 20 // How far back we're willing to go to get the complete history. maxMDsToInspect = 1000 ) // TlfEditList is a list of edits by a particular user, that can be // sort by increasing timestamp. type TlfEditList []TlfEdit // Len implements sort.Interface for TlfEditList func (tel TlfEditList) Len() int { return len(tel) } // Less implements sort.Interface for TlfEditList func (tel TlfEditList) Less(i, j int) bool { return tel[i].LocalTime.Before(tel[j].LocalTime) } // Swap implements sort.Interface for TlfEditList func (tel TlfEditList) Swap(i, j int) { tel[j], tel[i] = tel[i], tel[j] } // TlfWriterEdits is a map of a writer name to the most recent file // edits in a given folder by that writer. type TlfWriterEdits map[keybase1.UID]TlfEditList func (we TlfWriterEdits) isComplete() bool { for _, edits := range we { if len(edits) < desiredEditsPerWriter { return false } } return true } type writerEditEstimates map[keybase1.UID]int func (wee writerEditEstimates) isComplete() bool { for _, count := range wee { if count < desiredEditsPerWriter { return false } } return true } func (wee *writerEditEstimates) update(rmds []ImmutableRootMetadata) { for i := len(rmds) - 1; i >= 0; i-- { rmd := rmds[i] if rmd.IsWriterMetadataCopiedSet() { continue } writer := rmd.LastModifyingWriter for _, op := range rmd.data.Changes.Ops { // Estimate the number of writes just based on operations // (without yet taking into account whether the same file // is being edited more than once). switch realOp := op.(type) { case *createOp: if realOp.Type == Dir || realOp.Type == Sym { continue } (*wee)[writer]++ case *syncOp: (*wee)[writer]++ } } } } func (wee *writerEditEstimates) reset(edits TlfWriterEdits) { for writer := range *wee { (*wee)[writer] = len(edits[writer]) } } // TlfEditHistory allows you to get the update history about a // particular TLF. type TlfEditHistory struct { config Config fbo *folderBranchOps log logger.Logger lock sync.Mutex edits TlfWriterEdits } func (teh *TlfEditHistory) getEditsCopy() TlfWriterEdits { teh.lock.Lock() defer teh.lock.Unlock() if teh.edits == nil { return nil } edits := make(TlfWriterEdits) for user, userEdits := range teh.edits { userEditsCopy := make([]TlfEdit, 0, len(userEdits)) copy(userEditsCopy, userEdits) edits[user] = userEditsCopy } return edits } func (teh *TlfEditHistory) updateRmds(rmds []ImmutableRootMetadata, olderRmds []ImmutableRootMetadata) ([]ImmutableRootMetadata, error) { newRmds := make([]ImmutableRootMetadata, len(olderRmds)+len(rmds)) copy(newRmds[:len(olderRmds)], olderRmds) copy(newRmds[len(olderRmds):], rmds) return newRmds, nil } func (teh *TlfEditHistory) calculateEditCounts(ctx context.Context, rmds []ImmutableRootMetadata) (TlfWriterEdits, error) { chains, err := newCRChains(ctx, teh.config, rmds, &teh.fbo.blocks, false) if err != nil { return nil, err } // Set the paths on all the ops _, err = chains.getPaths(ctx, &teh.fbo.blocks, teh.config.MakeLogger(""), teh.fbo.nodeCache, teh.fbo.nodeCache, false) if err != nil { return nil, err } edits := make(TlfWriterEdits) for _, writer := range rmds[len(rmds)-1].GetTlfHandle().ResolvedWriters() { edits[writer] = nil } outer: for ptr, chain := range chains.byOriginal { if chains.isDeleted(ptr) { continue } for _, op := range chain.ops { // Is this a create? switch realOp := op.(type) { case *createOp: if realOp.renamed { // Ignore renames for now. TODO: notify about renames? continue } if realOp.Type == Dir || realOp.Type == Sym { // Ignore directories and symlinks. Because who // wants notifications for those? continue } // The pointer is actually the newly-referenced Block for _, ref := range op.Refs() { ptr = ref break } // If a chain exists for the file, ignore this create. if _, ok := chains.byOriginal[ptr]; ok { continue } writer := op.getWriterInfo().uid createdPath := op.getFinalPath().ChildPathNoPtr(realOp.NewName) edits[writer] = append(edits[writer], TlfEdit{ Filepath: createdPath.String(), Type: FileCreated, LocalTime: op.getLocalTimestamp(), }) case *syncOp: // Only the final writer matters. lastOp := chain.ops[len(chain.ops)-1] writer := lastOp.getWriterInfo().uid t := FileModified if chains.isCreated(ptr) { t = FileCreated } edits[writer] = append(edits[writer], TlfEdit{ Filepath: lastOp.getFinalPath().String(), Type: t, LocalTime: op.getLocalTimestamp(), }) continue outer default: continue } } } return edits, nil } // GetComplete returns the most recently known set of clustered edit // history for this TLF. func (teh *TlfEditHistory) GetComplete(ctx context.Context) ( TlfWriterEdits, error) { currEdits := teh.getEditsCopy() if currEdits != nil { return currEdits, nil } // We have no history -- fetch from the server until we have a // complete history. // Get current head for this folder. head, err := teh.fbo.getMDForExternalUse(ctx) if err != nil { return nil, err } estimates := make(writerEditEstimates) for _, writer := range head.GetTlfHandle().ResolvedWriters() { estimates[writer] = 0 } rmds := []ImmutableRootMetadata{head} estimates.update(rmds) // If unmerged, get all the unmerged updates. if head.MergedStatus() == Unmerged { _, unmergedRmds, err := getUnmergedMDUpdates(ctx, teh.config, head.ID, head.BID, head.Revision-1) if err != nil { return nil, err } estimates.update(unmergedRmds) rmds, err = teh.updateRmds(rmds, unmergedRmds) if err != nil { return nil, err } } for (currEdits == nil || !currEdits.isComplete()) && len(rmds) < maxMDsToInspect && rmds[0].Revision > MetadataRevisionInitial { teh.log.CDebugf(ctx, "Edits not complete after %d revisions", len(rmds)) if estimates.isComplete() { // Once the estimate hits the threshold for each writer, // calculate the chains using all those MDs, and build the // real edit map (discounting deleted files, etc). currEdits, err = teh.calculateEditCounts(ctx, rmds) if err != nil { return nil, err } if currEdits.isComplete() { break } // Set the estimates to their exact known values estimates.reset(currEdits) } for !estimates.isComplete() && len(rmds) < maxMDsToInspect && rmds[0].Revision > MetadataRevisionInitial { // Starting from the head/branchpoint, work backwards // mdMax revisions at a time. endRev := rmds[0].Revision - 1 startRev := endRev - maxMDsAtATime + 1 if startRev < MetadataRevisionInitial { startRev = MetadataRevisionInitial } olderRmds, err := getMDRange(ctx, teh.config, head.ID, NullBranchID, startRev, endRev, Merged) if err != nil { return nil, err } // Estimate the number of per-writer file operations by // keeping a count of the createOps and syncOps found. estimates.update(olderRmds) rmds, err = teh.updateRmds(rmds, olderRmds) if err != nil { return nil, err } } } if currEdits == nil { // We broke out of the loop early. currEdits, err = teh.calculateEditCounts(ctx, rmds) if err != nil { return nil, err } } // Sort each of the edit lists by timestamp for w, list := range currEdits { sort.Sort(list) if len(list) > desiredEditsPerWriter { list = list[len(list)-desiredEditsPerWriter:] } currEdits[w] = list } teh.log.CDebugf(ctx, "Edits complete: %d revisions, starting from "+ "revision %d", len(rmds), rmds[0].Revision) teh.lock.Lock() defer teh.lock.Unlock() teh.edits = currEdits return currEdits, nil } tlf_edit_history: disable cached edit history for now Until we listen for and respond to updates. Issue: KBFS-1301 // Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "sort" "sync" "time" "github.com/keybase/client/go/logger" keybase1 "github.com/keybase/client/go/protocol" "golang.org/x/net/context" ) // TlfEditNotificationType indicates what type of edit happened to a // file. type TlfEditNotificationType int const ( // FileCreated indicates a new file. FileCreated TlfEditNotificationType = iota // FileModified indicates an existing file that was written to. FileModified // FileDeleted indicates an existing file that was deleted. It // doesn't appear in the edit history, only in individual edit // updates. FileDeleted ) // TlfEdit represents an individual update about a file edit within a // TLF. type TlfEdit struct { Filepath string // relative to the TLF root Type TlfEditNotificationType LocalTime time.Time // reflects difference between server and local clock } const ( // How many edits per writer we want to return in the complete history? desiredEditsPerWriter = 20 // How far back we're willing to go to get the complete history. maxMDsToInspect = 1000 ) // TlfEditList is a list of edits by a particular user, that can be // sort by increasing timestamp. type TlfEditList []TlfEdit // Len implements sort.Interface for TlfEditList func (tel TlfEditList) Len() int { return len(tel) } // Less implements sort.Interface for TlfEditList func (tel TlfEditList) Less(i, j int) bool { return tel[i].LocalTime.Before(tel[j].LocalTime) } // Swap implements sort.Interface for TlfEditList func (tel TlfEditList) Swap(i, j int) { tel[j], tel[i] = tel[i], tel[j] } // TlfWriterEdits is a map of a writer name to the most recent file // edits in a given folder by that writer. type TlfWriterEdits map[keybase1.UID]TlfEditList func (we TlfWriterEdits) isComplete() bool { for _, edits := range we { if len(edits) < desiredEditsPerWriter { return false } } return true } type writerEditEstimates map[keybase1.UID]int func (wee writerEditEstimates) isComplete() bool { for _, count := range wee { if count < desiredEditsPerWriter { return false } } return true } func (wee *writerEditEstimates) update(rmds []ImmutableRootMetadata) { for i := len(rmds) - 1; i >= 0; i-- { rmd := rmds[i] if rmd.IsWriterMetadataCopiedSet() { continue } writer := rmd.LastModifyingWriter for _, op := range rmd.data.Changes.Ops { // Estimate the number of writes just based on operations // (without yet taking into account whether the same file // is being edited more than once). switch realOp := op.(type) { case *createOp: if realOp.Type == Dir || realOp.Type == Sym { continue } (*wee)[writer]++ case *syncOp: (*wee)[writer]++ } } } } func (wee *writerEditEstimates) reset(edits TlfWriterEdits) { for writer := range *wee { (*wee)[writer] = len(edits[writer]) } } // TlfEditHistory allows you to get the update history about a // particular TLF. type TlfEditHistory struct { config Config fbo *folderBranchOps log logger.Logger lock sync.Mutex edits TlfWriterEdits } func (teh *TlfEditHistory) getEditsCopy() TlfWriterEdits { teh.lock.Lock() defer teh.lock.Unlock() if teh.edits == nil { return nil } edits := make(TlfWriterEdits) for user, userEdits := range teh.edits { userEditsCopy := make([]TlfEdit, 0, len(userEdits)) copy(userEditsCopy, userEdits) edits[user] = userEditsCopy } return edits } func (teh *TlfEditHistory) updateRmds(rmds []ImmutableRootMetadata, olderRmds []ImmutableRootMetadata) ([]ImmutableRootMetadata, error) { newRmds := make([]ImmutableRootMetadata, len(olderRmds)+len(rmds)) copy(newRmds[:len(olderRmds)], olderRmds) copy(newRmds[len(olderRmds):], rmds) return newRmds, nil } func (teh *TlfEditHistory) calculateEditCounts(ctx context.Context, rmds []ImmutableRootMetadata) (TlfWriterEdits, error) { chains, err := newCRChains(ctx, teh.config, rmds, &teh.fbo.blocks, false) if err != nil { return nil, err } // Set the paths on all the ops _, err = chains.getPaths(ctx, &teh.fbo.blocks, teh.config.MakeLogger(""), teh.fbo.nodeCache, teh.fbo.nodeCache, false) if err != nil { return nil, err } edits := make(TlfWriterEdits) for _, writer := range rmds[len(rmds)-1].GetTlfHandle().ResolvedWriters() { edits[writer] = nil } outer: for ptr, chain := range chains.byOriginal { if chains.isDeleted(ptr) { continue } for _, op := range chain.ops { // Is this a create? switch realOp := op.(type) { case *createOp: if realOp.renamed { // Ignore renames for now. TODO: notify about renames? continue } if realOp.Type == Dir || realOp.Type == Sym { // Ignore directories and symlinks. Because who // wants notifications for those? continue } // The pointer is actually the newly-referenced Block for _, ref := range op.Refs() { ptr = ref break } // If a chain exists for the file, ignore this create. if _, ok := chains.byOriginal[ptr]; ok { continue } writer := op.getWriterInfo().uid createdPath := op.getFinalPath().ChildPathNoPtr(realOp.NewName) edits[writer] = append(edits[writer], TlfEdit{ Filepath: createdPath.String(), Type: FileCreated, LocalTime: op.getLocalTimestamp(), }) case *syncOp: // Only the final writer matters. lastOp := chain.ops[len(chain.ops)-1] writer := lastOp.getWriterInfo().uid t := FileModified if chains.isCreated(ptr) { t = FileCreated } edits[writer] = append(edits[writer], TlfEdit{ Filepath: lastOp.getFinalPath().String(), Type: t, LocalTime: op.getLocalTimestamp(), }) continue outer default: continue } } } return edits, nil } // GetComplete returns the most recently known set of clustered edit // history for this TLF. func (teh *TlfEditHistory) GetComplete(ctx context.Context) ( TlfWriterEdits, error) { var currEdits TlfWriterEdits /** * Once we update currEdits based on notifications, we can uncomment this. currEdits := teh.getEditsCopy() if currEdits != nil { return currEdits, nil } */ // We have no history -- fetch from the server until we have a // complete history. // Get current head for this folder. head, err := teh.fbo.getMDForExternalUse(ctx) if err != nil { return nil, err } estimates := make(writerEditEstimates) for _, writer := range head.GetTlfHandle().ResolvedWriters() { estimates[writer] = 0 } rmds := []ImmutableRootMetadata{head} estimates.update(rmds) // If unmerged, get all the unmerged updates. if head.MergedStatus() == Unmerged { _, unmergedRmds, err := getUnmergedMDUpdates(ctx, teh.config, head.ID, head.BID, head.Revision-1) if err != nil { return nil, err } estimates.update(unmergedRmds) rmds, err = teh.updateRmds(rmds, unmergedRmds) if err != nil { return nil, err } } for (currEdits == nil || !currEdits.isComplete()) && len(rmds) < maxMDsToInspect && rmds[0].Revision > MetadataRevisionInitial { teh.log.CDebugf(ctx, "Edits not complete after %d revisions", len(rmds)) if estimates.isComplete() { // Once the estimate hits the threshold for each writer, // calculate the chains using all those MDs, and build the // real edit map (discounting deleted files, etc). currEdits, err = teh.calculateEditCounts(ctx, rmds) if err != nil { return nil, err } if currEdits.isComplete() { break } // Set the estimates to their exact known values estimates.reset(currEdits) } for !estimates.isComplete() && len(rmds) < maxMDsToInspect && rmds[0].Revision > MetadataRevisionInitial { // Starting from the head/branchpoint, work backwards // mdMax revisions at a time. endRev := rmds[0].Revision - 1 startRev := endRev - maxMDsAtATime + 1 if startRev < MetadataRevisionInitial { startRev = MetadataRevisionInitial } olderRmds, err := getMDRange(ctx, teh.config, head.ID, NullBranchID, startRev, endRev, Merged) if err != nil { return nil, err } // Estimate the number of per-writer file operations by // keeping a count of the createOps and syncOps found. estimates.update(olderRmds) rmds, err = teh.updateRmds(rmds, olderRmds) if err != nil { return nil, err } } } if currEdits == nil { // We broke out of the loop early. currEdits, err = teh.calculateEditCounts(ctx, rmds) if err != nil { return nil, err } } // Sort each of the edit lists by timestamp for w, list := range currEdits { sort.Sort(list) if len(list) > desiredEditsPerWriter { list = list[len(list)-desiredEditsPerWriter:] } currEdits[w] = list } teh.log.CDebugf(ctx, "Edits complete: %d revisions, starting from "+ "revision %d", len(rmds), rmds[0].Revision) teh.lock.Lock() defer teh.lock.Unlock() teh.edits = currEdits return currEdits, nil }
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "math" "os" "reflect" "sync" "testing" "time" "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/kbfs/ioutil" "github.com/keybase/kbfs/kbfsblock" "github.com/keybase/kbfs/kbfscodec" "github.com/keybase/kbfs/kbfscrypto" "github.com/keybase/kbfs/kbfshash" "github.com/keybase/kbfs/kbfsmd" "github.com/keybase/kbfs/tlf" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/context" ) // testBWDelegate is a delegate we pass to tlfJournal to get info // about its state transitions. type testBWDelegate struct { t *testing.T // Store a context so that the tlfJournal's background context // will also obey the test timeout. testCtx context.Context stateCh chan bwState shutdownCh chan struct{} } func (d testBWDelegate) GetBackgroundContext() context.Context { return d.testCtx } func (d testBWDelegate) OnNewState(ctx context.Context, bws bwState) { select { case d.stateCh <- bws: case <-ctx.Done(): assert.Fail(d.t, ctx.Err().Error()) } } func (d testBWDelegate) OnShutdown(ctx context.Context) { select { case d.shutdownCh <- struct{}{}: case <-ctx.Done(): assert.Fail(d.t, ctx.Err().Error()) } } func (d testBWDelegate) requireNextState( ctx context.Context, expectedState bwState) { select { case bws := <-d.stateCh: require.Equal(d.t, expectedState, bws) case <-ctx.Done(): assert.Fail(d.t, ctx.Err().Error()) } } // testTLFJournalConfig is the config we pass to the tlfJournal, and // also contains some helper functions for testing. type testTLFJournalConfig struct { codecGetter logMaker t *testing.T tlfID tlf.ID splitter BlockSplitter crypto CryptoLocal bcache BlockCache bops BlockOps mdcache MDCache ver MetadataVer reporter Reporter uid keybase1.UID verifyingKey kbfscrypto.VerifyingKey ekg singleEncryptionKeyGetter nug normalizedUsernameGetter mdserver MDServer dlTimeout time.Duration } func (c testTLFJournalConfig) BlockSplitter() BlockSplitter { return c.splitter } func (c testTLFJournalConfig) Clock() Clock { return wallClock{} } func (c testTLFJournalConfig) Crypto() Crypto { return c.crypto } func (c testTLFJournalConfig) BlockCache() BlockCache { return c.bcache } func (c testTLFJournalConfig) BlockOps() BlockOps { return c.bops } func (c testTLFJournalConfig) MDCache() MDCache { return c.mdcache } func (c testTLFJournalConfig) MetadataVersion() MetadataVer { return c.ver } func (c testTLFJournalConfig) Reporter() Reporter { return c.reporter } func (c testTLFJournalConfig) cryptoPure() cryptoPure { return c.crypto } func (c testTLFJournalConfig) encryptionKeyGetter() encryptionKeyGetter { return c.ekg } func (c testTLFJournalConfig) mdDecryptionKeyGetter() mdDecryptionKeyGetter { return c.ekg } func (c testTLFJournalConfig) usernameGetter() normalizedUsernameGetter { return c.nug } func (c testTLFJournalConfig) MDServer() MDServer { return c.mdserver } func (c testTLFJournalConfig) teamMembershipChecker() TeamMembershipChecker { // TODO: support team TLF tests. return nil } func (c testTLFJournalConfig) diskLimitTimeout() time.Duration { return c.dlTimeout } func (c testTLFJournalConfig) BGFlushDirOpBatchSize() int { return 1 } func (c testTLFJournalConfig) makeBlock(data []byte) ( kbfsblock.ID, kbfsblock.Context, kbfscrypto.BlockCryptKeyServerHalf) { id, err := kbfsblock.MakePermanentID(data) require.NoError(c.t, err) bCtx := kbfsblock.MakeFirstContext( c.uid.AsUserOrTeam(), keybase1.BlockType_DATA) serverHalf, err := kbfscrypto.MakeRandomBlockCryptKeyServerHalf() require.NoError(c.t, err) return id, bCtx, serverHalf } func (c testTLFJournalConfig) makeMD( revision kbfsmd.Revision, prevRoot kbfsmd.ID) *RootMetadata { return makeMDForTest(c.t, c.ver, c.tlfID, revision, c.uid, c.crypto, prevRoot) } func (c testTLFJournalConfig) checkMD(rmds *RootMetadataSigned, extra ExtraMetadata, expectedRevision kbfsmd.Revision, expectedPrevRoot kbfsmd.ID, expectedMergeStatus MergeStatus, expectedBranchID BranchID) { verifyingKey := c.crypto.SigningKeySigner.Key.GetVerifyingKey() checkBRMD(c.t, c.uid, verifyingKey, c.Codec(), c.Crypto(), rmds.MD, extra, expectedRevision, expectedPrevRoot, expectedMergeStatus, expectedBranchID) err := rmds.IsValidAndSigned( context.Background(), c.Codec(), c.Crypto(), nil, extra) require.NoError(c.t, err) err = rmds.IsLastModifiedBy(c.uid, verifyingKey) require.NoError(c.t, err) } func (c testTLFJournalConfig) checkRange(rmdses []rmdsWithExtra, firstRevision kbfsmd.Revision, firstPrevRoot kbfsmd.ID, mStatus MergeStatus, bid BranchID) { c.checkMD(rmdses[0].rmds, rmdses[0].extra, firstRevision, firstPrevRoot, mStatus, bid) for i := 1; i < len(rmdses); i++ { prevID, err := kbfsmd.MakeID(c.Codec(), rmdses[i-1].rmds.MD) require.NoError(c.t, err) c.checkMD(rmdses[i].rmds, rmdses[i].extra, firstRevision+kbfsmd.Revision(i), prevID, mStatus, bid) err = rmdses[i-1].rmds.MD.CheckValidSuccessor( prevID, rmdses[i].rmds.MD) require.NoError(c.t, err) } } func setupTLFJournalTest( t *testing.T, ver MetadataVer, bwStatus TLFJournalBackgroundWorkStatus) ( tempdir string, config *testTLFJournalConfig, ctx context.Context, cancel context.CancelFunc, tlfJournal *tlfJournal, delegate testBWDelegate) { // Set up config and dependencies. bsplitter := &BlockSplitterSimple{ 64 * 1024, int(64 * 1024 / bpSize), 8 * 1024} codec := kbfscodec.NewMsgpack() signingKey := kbfscrypto.MakeFakeSigningKeyOrBust("client sign") cryptPrivateKey := kbfscrypto.MakeFakeCryptPrivateKeyOrBust("client crypt private") crypto := NewCryptoLocal(codec, signingKey, cryptPrivateKey) uid := keybase1.MakeTestUID(1) verifyingKey := signingKey.GetVerifyingKey() ekg := singleEncryptionKeyGetter{kbfscrypto.MakeTLFCryptKey([32]byte{0x1})} cig := singleCurrentSessionGetter{ SessionInfo{ Name: "fake_user", UID: uid, VerifyingKey: verifyingKey, }, } mdserver, err := NewMDServerMemory(newTestMDServerLocalConfig(t, cig)) require.NoError(t, err) config = &testTLFJournalConfig{ newTestCodecGetter(), newTestLogMaker(t), t, tlf.FakeID(1, tlf.Private), bsplitter, crypto, nil, nil, NewMDCacheStandard(10), ver, NewReporterSimple(newTestClockNow(), 10), uid, verifyingKey, ekg, nil, mdserver, defaultDiskLimitMaxDelay + time.Second, } ctx, cancel = context.WithTimeout( context.Background(), individualTestTimeout) // Clean up the context if the rest of the setup fails. setupSucceeded := false defer func() { if !setupSucceeded { cancel() } }() delegate = testBWDelegate{ t: t, testCtx: ctx, stateCh: make(chan bwState), shutdownCh: make(chan struct{}), } tempdir, err = ioutil.TempDir(os.TempDir(), "tlf_journal") require.NoError(t, err) // Clean up the tempdir if anything in the rest of the setup // fails. defer func() { if !setupSucceeded { err := ioutil.RemoveAll(tempdir) assert.NoError(t, err) } }() delegateBlockServer := NewBlockServerMemory(config.MakeLogger("")) diskLimitSemaphore := newSemaphoreDiskLimiter( math.MaxInt64, math.MaxInt64, math.MaxInt64) tlfJournal, err = makeTLFJournal(ctx, uid, verifyingKey, tempdir, config.tlfID, uid.AsUserOrTeam(), config, delegateBlockServer, bwStatus, delegate, nil, nil, diskLimitSemaphore) require.NoError(t, err) switch bwStatus { case TLFJournalBackgroundWorkEnabled: // Same as the single op case. fallthrough case TLFJournalSingleOpBackgroundWorkEnabled: // Read the state changes triggered by the initial // work signal. delegate.requireNextState(ctx, bwIdle) delegate.requireNextState(ctx, bwBusy) delegate.requireNextState(ctx, bwIdle) case TLFJournalBackgroundWorkPaused: delegate.requireNextState(ctx, bwPaused) default: require.FailNow(t, "Unknown bwStatus %s", bwStatus) } setupSucceeded = true return tempdir, config, ctx, cancel, tlfJournal, delegate } func teardownTLFJournalTest( tempdir string, config *testTLFJournalConfig, ctx context.Context, cancel context.CancelFunc, tlfJournal *tlfJournal, delegate testBWDelegate) { // Shutdown first so we don't get the Done() signal (from the // cancel() call) spuriously. tlfJournal.shutdown(ctx) select { case <-delegate.shutdownCh: case <-ctx.Done(): assert.Fail(config.t, ctx.Err().Error()) } cancel() select { case bws := <-delegate.stateCh: assert.Fail(config.t, "Unexpected state %s", bws) default: } config.mdserver.Shutdown() tlfJournal.delegateBlockServer.Shutdown(ctx) err := ioutil.RemoveAll(tempdir) assert.NoError(config.t, err) } func putOneMD(ctx context.Context, config *testTLFJournalConfig, tlfJournal *tlfJournal) { md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{}) _, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(config.t, err) } // The tests below primarily test the background work thread's // behavior. func testTLFJournalBasic(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) putOneMD(ctx, config, tlfJournal) // Wait for it to be processed. delegate.requireNextState(ctx, bwBusy) delegate.requireNextState(ctx, bwIdle) } func testTLFJournalPauseResume(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.pauseBackgroundWork() delegate.requireNextState(ctx, bwPaused) putOneMD(ctx, config, tlfJournal) // Unpause and wait for it to be processed. tlfJournal.resumeBackgroundWork() delegate.requireNextState(ctx, bwIdle) delegate.requireNextState(ctx, bwBusy) delegate.requireNextState(ctx, bwIdle) } func testTLFJournalPauseShutdown(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.pauseBackgroundWork() delegate.requireNextState(ctx, bwPaused) putOneMD(ctx, config, tlfJournal) // Should still be able to shut down while paused. } type hangingBlockServer struct { BlockServer // Closed on put. onPutCh chan struct{} } func (bs hangingBlockServer) Put( ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error { close(bs.onPutCh) // Hang until the context is cancelled. <-ctx.Done() return ctx.Err() } func (bs hangingBlockServer) waitForPut(ctx context.Context, t *testing.T) { select { case <-bs.onPutCh: case <-ctx.Done(): require.FailNow(t, ctx.Err().Error()) } } func putBlock(ctx context.Context, t *testing.T, config *testTLFJournalConfig, tlfJournal *tlfJournal, data []byte) { id, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf) require.NoError(t, err) } func testTLFJournalBlockOpBasic(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4}) numFlushed, rev, converted, err := tlfJournal.flushBlockEntries(ctx, firstValidJournalOrdinal+1) require.NoError(t, err) require.Equal(t, 1, numFlushed) require.Equal(t, rev, kbfsmd.RevisionUninitialized) require.False(t, converted) } func testTLFJournalBlockOpBusyPause(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) bs := hangingBlockServer{tlfJournal.delegateBlockServer, make(chan struct{})} tlfJournal.delegateBlockServer = bs putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4}) bs.waitForPut(ctx, t) delegate.requireNextState(ctx, bwBusy) // Should still be able to pause while busy. tlfJournal.pauseBackgroundWork() delegate.requireNextState(ctx, bwPaused) } func testTLFJournalBlockOpBusyShutdown(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) bs := hangingBlockServer{tlfJournal.delegateBlockServer, make(chan struct{})} tlfJournal.delegateBlockServer = bs putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4}) bs.waitForPut(ctx, t) delegate.requireNextState(ctx, bwBusy) // Should still be able to shut down while busy. } func testTLFJournalSecondBlockOpWhileBusy(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) bs := hangingBlockServer{tlfJournal.delegateBlockServer, make(chan struct{})} tlfJournal.delegateBlockServer = bs putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4}) bs.waitForPut(ctx, t) delegate.requireNextState(ctx, bwBusy) // Should still be able to put a second block while busy. putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4, 5}) } func testTLFJournalBlockOpDiskByteLimit(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, math.MaxInt64-6, 0, 0, tlfJournal.uid.AsUserOrTeam()) putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4}) errCh := make(chan error, 1) go func() { data2 := []byte{5, 6, 7} id, bCtx, serverHalf := config.makeBlock(data2) errCh <- tlfJournal.putBlockData( ctx, id, bCtx, data2, serverHalf) }() numFlushed, rev, converted, err := tlfJournal.flushBlockEntries(ctx, firstValidJournalOrdinal+1) require.NoError(t, err) require.Equal(t, 1, numFlushed) require.Equal(t, rev, kbfsmd.RevisionUninitialized) require.False(t, converted) // Fake an MD flush. md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{}) err = tlfJournal.doOnMDFlushAndRemoveFlushedMDEntry( ctx, kbfsmd.ID{}, &RootMetadataSigned{MD: md.bareMd}) select { case err := <-errCh: require.NoError(t, err) case <-ctx.Done(): t.Fatal(ctx.Err()) } } func testTLFJournalBlockOpDiskFileLimit(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, 0, 0, math.MaxInt64-2*filesPerBlockMax+1, tlfJournal.uid.AsUserOrTeam()) putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4}) errCh := make(chan error, 1) go func() { data2 := []byte{5, 6, 7} id, bCtx, serverHalf := config.makeBlock(data2) errCh <- tlfJournal.putBlockData( ctx, id, bCtx, data2, serverHalf) }() numFlushed, rev, converted, err := tlfJournal.flushBlockEntries(ctx, firstValidJournalOrdinal+1) require.NoError(t, err) require.Equal(t, 1, numFlushed) require.Equal(t, rev, kbfsmd.RevisionUninitialized) require.False(t, converted) // Fake an MD flush. md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{}) err = tlfJournal.doOnMDFlushAndRemoveFlushedMDEntry( ctx, kbfsmd.ID{}, &RootMetadataSigned{MD: md.bareMd}) select { case err := <-errCh: require.NoError(t, err) case <-ctx.Done(): t.Fatal(ctx.Err()) } } func testTLFJournalBlockOpDiskQuotaLimit(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, 0, math.MaxInt64-6, 0, tlfJournal.uid.AsUserOrTeam()) data1 := []byte{1, 2, 3, 4} putBlock(ctx, t, config, tlfJournal, data1) usedQuotaBytes, quotaBytes := tlfJournal.diskLimiter.getQuotaInfo(tlfJournal.uid.AsUserOrTeam()) require.Equal(t, int64(math.MaxInt64-6)+int64(len(data1)), usedQuotaBytes) require.Equal(t, int64(math.MaxInt64), quotaBytes) data2 := []byte{5, 6, 7} errCh := make(chan error, 1) go func() { id, bCtx, serverHalf := config.makeBlock(data2) errCh <- tlfJournal.putBlockData( ctx, id, bCtx, data2, serverHalf) }() numFlushed, rev, converted, err := tlfJournal.flushBlockEntries(ctx, firstValidJournalOrdinal+1) require.NoError(t, err) require.Equal(t, 1, numFlushed) require.Equal(t, rev, kbfsmd.RevisionUninitialized) require.False(t, converted) select { case err := <-errCh: require.NoError(t, err) case <-ctx.Done(): t.Fatal(ctx.Err()) } usedQuotaBytes, quotaBytes = tlfJournal.diskLimiter.getQuotaInfo(tlfJournal.uid.AsUserOrTeam()) require.Equal(t, int64(math.MaxInt64-6)+int64(len(data2)), usedQuotaBytes) require.Equal(t, int64(math.MaxInt64), quotaBytes) } func testTLFJournalBlockOpDiskQuotaLimitResolve(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, 0, math.MaxInt64-6, 0, tlfJournal.uid.AsUserOrTeam()) data1 := []byte{1, 2, 3, 4} id1, bCtx1, serverHalf1 := config.makeBlock(data1) err := tlfJournal.putBlockData(ctx, id1, bCtx1, data1, serverHalf1) require.NoError(t, err) usedQuotaBytes, quotaBytes := tlfJournal.diskLimiter.getQuotaInfo(tlfJournal.uid.AsUserOrTeam()) require.Equal(t, int64(math.MaxInt64-6)+int64(len(data1)), usedQuotaBytes) require.Equal(t, int64(math.MaxInt64), quotaBytes) data2 := []byte{5, 6, 7} errCh := make(chan error, 1) go func() { id2, bCtx2, serverHalf2 := config.makeBlock(data2) errCh <- tlfJournal.putBlockData( ctx, id2, bCtx2, data2, serverHalf2) }() md1 := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{}) irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key) require.NoError(t, err) mdID1 := irmd.mdID err = tlfJournal.convertMDsToBranch(ctx) require.NoError(t, err) bid, err := tlfJournal.getBranchID() require.NoError(t, err) // Ignore the block instead of flushing it. md2 := config.makeMD(kbfsmd.RevisionInitial+1, mdID1) _, retry, err := tlfJournal.doResolveBranch( ctx, bid, []kbfsblock.ID{id1}, md2, unflushedPathMDInfo{}, unflushedPathsPerRevMap{}, tlfJournal.key) require.NoError(t, err) require.False(t, retry) select { case err := <-errCh: require.NoError(t, err) case <-ctx.Done(): t.Fatal(ctx.Err()) } usedQuotaBytes, quotaBytes = tlfJournal.diskLimiter.getQuotaInfo(tlfJournal.uid.AsUserOrTeam()) require.Equal(t, int64(math.MaxInt64-6)+int64(len(data2)), usedQuotaBytes) require.Equal(t, int64(math.MaxInt64), quotaBytes) } func testTLFJournalBlockOpDiskLimitDuplicate(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, math.MaxInt64-8, 0, math.MaxInt64-2*filesPerBlockMax, tlfJournal.uid.AsUserOrTeam()) data := []byte{1, 2, 3, 4} id, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf) require.NoError(t, err) // This should acquire some bytes and files, but then release // them. err = tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf) require.NoError(t, err) // If the above incorrectly does not release bytes or files, // this will hang. err = tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf) require.NoError(t, err) } func testTLFJournalBlockOpDiskLimitCancel(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, math.MaxInt64, 0, 0, tlfJournal.uid.AsUserOrTeam()) ctx2, cancel2 := context.WithCancel(ctx) cancel2() data := []byte{1, 2, 3, 4} id, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx2, id, bCtx, data, serverHalf) require.Equal(t, context.Canceled, errors.Cause(err)) } func testTLFJournalBlockOpDiskLimitTimeout(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, math.MaxInt64, 0, math.MaxInt64-1, tlfJournal.uid.AsUserOrTeam()) config.dlTimeout = 3 * time.Microsecond data := []byte{1, 2, 3, 4} id, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf) timeoutErr, ok := errors.Cause(err).(*ErrDiskLimitTimeout) require.True(t, ok) require.Error(t, timeoutErr.err) timeoutErr.err = nil require.Equal(t, ErrDiskLimitTimeout{ 3 * time.Microsecond, int64(len(data)), filesPerBlockMax, 0, 1, 0, 1, math.MaxInt64, math.MaxInt64, nil, false, }, *timeoutErr) } func testTLFJournalBlockOpDiskLimitPutFailure(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, math.MaxInt64-6, 0, math.MaxInt64-filesPerBlockMax, tlfJournal.uid.AsUserOrTeam()) data := []byte{1, 2, 3, 4} id, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, id, bCtx, []byte{1}, serverHalf) require.IsType(t, kbfshash.HashMismatchError{}, errors.Cause(err)) // If the above incorrectly does not release bytes or files from // diskLimiter on error, this will hang. err = tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf) require.NoError(t, err) } type hangingMDServer struct { MDServer // Closed on put. onPutCh chan struct{} } func (md hangingMDServer) Put(ctx context.Context, rmds *RootMetadataSigned, _ ExtraMetadata, _ *keybase1.LockContext, _ keybase1.MDPriority) error { close(md.onPutCh) // Hang until the context is cancelled. <-ctx.Done() return ctx.Err() } func (md hangingMDServer) waitForPut(ctx context.Context, t *testing.T) { select { case <-md.onPutCh: case <-ctx.Done(): require.FailNow(t, ctx.Err().Error()) } } func testTLFJournalMDServerBusyPause(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) mdserver := hangingMDServer{config.MDServer(), make(chan struct{})} config.mdserver = mdserver md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{}) _, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) mdserver.waitForPut(ctx, t) delegate.requireNextState(ctx, bwBusy) // Should still be able to pause while busy. tlfJournal.pauseBackgroundWork() delegate.requireNextState(ctx, bwPaused) } func testTLFJournalMDServerBusyShutdown(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) mdserver := hangingMDServer{config.MDServer(), make(chan struct{})} config.mdserver = mdserver md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{}) _, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) mdserver.waitForPut(ctx, t) delegate.requireNextState(ctx, bwBusy) // Should still be able to shutdown while busy. } func testTLFJournalBlockOpWhileBusy(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) mdserver := hangingMDServer{config.MDServer(), make(chan struct{})} config.mdserver = mdserver md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{}) _, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) mdserver.waitForPut(ctx, t) delegate.requireNextState(ctx, bwBusy) // Should still be able to put a block while busy. putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4}) } type rmdsWithExtra struct { rmds *RootMetadataSigned extra ExtraMetadata } type shimMDServer struct { MDServer rmdses []rmdsWithExtra nextGetRange []*RootMetadataSigned nextErr error getForTLFCalled bool } func (s *shimMDServer) GetRange( ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus, start, stop kbfsmd.Revision, _ *keybase1.LockID) ([]*RootMetadataSigned, error) { rmdses := s.nextGetRange s.nextGetRange = nil return rmdses, nil } func (s *shimMDServer) Put(ctx context.Context, rmds *RootMetadataSigned, extra ExtraMetadata, _ *keybase1.LockContext, _ keybase1.MDPriority) error { if s.nextErr != nil { err := s.nextErr s.nextErr = nil return err } s.rmdses = append(s.rmdses, rmdsWithExtra{rmds, extra}) // Pretend all cancels happen after the actual put. select { case <-ctx.Done(): return ctx.Err() default: } return nil } func (s *shimMDServer) GetForTLF( ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus, _ *keybase1.LockID) ( *RootMetadataSigned, error) { s.getForTLFCalled = true if len(s.rmdses) == 0 { return nil, nil } return s.rmdses[len(s.rmdses)-1].rmds, nil } func (s *shimMDServer) IsConnected() bool { return true } func (s *shimMDServer) Shutdown() { } func requireJournalEntryCounts(t *testing.T, j *tlfJournal, expectedBlockEntryCount, expectedMDEntryCount uint64) { blockEntryCount, mdEntryCount, err := j.getJournalEntryCounts() require.NoError(t, err) require.Equal(t, expectedBlockEntryCount, blockEntryCount) require.Equal(t, expectedMDEntryCount, mdEntryCount) } // The tests below test tlfJournal's MD flushing behavior. func testTLFJournalFlushMDBasic(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) firstRevision := kbfsmd.Revision(10) firstPrevRoot := kbfsmd.FakeID(1) mdCount := 10 prevRoot := firstPrevRoot for i := 0; i < mdCount; i++ { revision := firstRevision + kbfsmd.Revision(i) md := config.makeMD(revision, prevRoot) irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } // Flush all entries. var mdserver shimMDServer config.mdserver = &mdserver _, mdEnd, err := tlfJournal.getJournalEnds(ctx) require.NoError(t, err) for i := 0; i < mdCount; i++ { flushed, err := tlfJournal.flushOneMDOp(ctx, mdEnd) require.NoError(t, err) require.True(t, flushed) } flushed, err := tlfJournal.flushOneMDOp(ctx, mdEnd) require.NoError(t, err) require.False(t, flushed) requireJournalEntryCounts(t, tlfJournal, uint64(mdCount), 0) testMDJournalGCd(t, tlfJournal.mdJournal) // Check RMDSes on the server. rmdses := mdserver.rmdses require.Equal(t, mdCount, len(rmdses)) config.checkRange( rmdses, firstRevision, firstPrevRoot, Merged, NullBranchID) } func testTLFJournalFlushMDConflict(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) firstRevision := kbfsmd.Revision(10) firstPrevRoot := kbfsmd.FakeID(1) mdCount := 10 prevRoot := firstPrevRoot for i := 0; i < mdCount/2; i++ { revision := firstRevision + kbfsmd.Revision(i) md := config.makeMD(revision, prevRoot) irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } var mdserver shimMDServer mdserver.nextErr = kbfsmd.ServerErrorConflictRevision{} config.mdserver = &mdserver _, mdEnd, err := tlfJournal.getJournalEnds(ctx) require.NoError(t, err) // Simulate a flush with a conflict error halfway through. { flushed, err := tlfJournal.flushOneMDOp(ctx, mdEnd) require.NoError(t, err) require.False(t, flushed) revision := firstRevision + kbfsmd.Revision(mdCount/2) md := config.makeMD(revision, prevRoot) _, err = tlfJournal.putMD(ctx, md, tlfJournal.key) require.IsType(t, MDJournalConflictError{}, err) md.SetUnmerged() irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } for i := mdCount/2 + 1; i < mdCount; i++ { revision := firstRevision + kbfsmd.Revision(i) md := config.makeMD(revision, prevRoot) md.SetUnmerged() irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } // The journal won't flush anything while on a branch. requireJournalEntryCounts(t, tlfJournal, uint64(mdCount), uint64(mdCount)) } // orderedBlockServer and orderedMDServer appends onto their shared // puts slice when their Put() methods are called. type orderedBlockServer struct { BlockServer lock *sync.Mutex puts *[]interface{} onceOnPut func() } func (s *orderedBlockServer) Put( ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error { s.lock.Lock() defer s.lock.Unlock() *s.puts = append(*s.puts, id) if s.onceOnPut != nil { s.onceOnPut() s.onceOnPut = nil } return nil } func (s *orderedBlockServer) Shutdown(context.Context) {} type orderedMDServer struct { MDServer lock *sync.Mutex puts *[]interface{} onceOnPut func() error } func (s *orderedMDServer) Put( ctx context.Context, rmds *RootMetadataSigned, _ ExtraMetadata, _ *keybase1.LockContext, _ keybase1.MDPriority) error { s.lock.Lock() defer s.lock.Unlock() *s.puts = append(*s.puts, rmds.MD.RevisionNumber()) if s.onceOnPut != nil { err := s.onceOnPut() s.onceOnPut = nil if err != nil { return err } } return nil } func (s *orderedMDServer) Shutdown() {} func testTLFJournalGCd(t *testing.T, tlfJournal *tlfJournal) { // The root dir shouldn't exist. _, err := ioutil.Stat(tlfJournal.dir) require.True(t, ioutil.IsNotExist(err)) func() { tlfJournal.journalLock.Lock() defer tlfJournal.journalLock.Unlock() unflushedPaths := tlfJournal.unflushedPaths.getUnflushedPaths() require.Nil(t, unflushedPaths) require.Equal(t, uint64(0), tlfJournal.unsquashedBytes) require.Equal(t, 0, len(tlfJournal.flushingBlocks)) }() requireJournalEntryCounts(t, tlfJournal, 0, 0) // Check child journals. testBlockJournalGCd(t, tlfJournal.blockJournal) testMDJournalGCd(t, tlfJournal.mdJournal) } // testTLFJournalFlushOrdering tests that we respect the relative // orderings of blocks and MD ops when flushing, i.e. if a block op // was added to the block journal before an MD op was added to the MD // journal, then that block op will be flushed before that MD op. func testTLFJournalFlushOrdering(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) bid1, bCtx1, serverHalf1 := config.makeBlock([]byte{1}) bid2, bCtx2, serverHalf2 := config.makeBlock([]byte{2}) bid3, bCtx3, serverHalf3 := config.makeBlock([]byte{3}) md1 := config.makeMD(kbfsmd.Revision(10), kbfsmd.FakeID(1)) var lock sync.Mutex var puts []interface{} bserver := orderedBlockServer{ lock: &lock, puts: &puts, } tlfJournal.delegateBlockServer.Shutdown(ctx) tlfJournal.delegateBlockServer = &bserver mdserver := orderedMDServer{ lock: &lock, puts: &puts, } config.mdserver = &mdserver // bid1 is-put-before kbfsmd.Revision(10). err := tlfJournal.putBlockData( ctx, bid1, bCtx1, []byte{1}, serverHalf1) require.NoError(t, err) irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key) require.NoError(t, err) prevRoot := irmd.mdID bserver.onceOnPut = func() { // bid2 is-put-before kbfsmd.Revision(11). err := tlfJournal.putBlockData( ctx, bid2, bCtx2, []byte{2}, serverHalf2) require.NoError(t, err) md2 := config.makeMD(kbfsmd.Revision(11), prevRoot) irmd, err := tlfJournal.putMD(ctx, md2, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } mdserver.onceOnPut = func() error { // bid3 is-put-before kbfsmd.Revision(12). err := tlfJournal.putBlockData( ctx, bid3, bCtx3, []byte{3}, serverHalf3) require.NoError(t, err) md3 := config.makeMD(kbfsmd.Revision(12), prevRoot) irmd, err := tlfJournal.putMD(ctx, md3, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID return nil } err = tlfJournal.flush(ctx) require.NoError(t, err) testTLFJournalGCd(t, tlfJournal) // These two orderings depend on the exact flushing process, // but there are other possible orderings which respect the // above is-put-before constraints and also respect the // kbfsmd.Revision ordering. expectedPuts1 := []interface{}{ bid1, kbfsmd.Revision(10), bid2, bid3, kbfsmd.Revision(11), kbfsmd.Revision(12), } // This is possible since block puts are done in parallel. expectedPuts2 := []interface{}{ bid1, kbfsmd.Revision(10), bid3, bid2, kbfsmd.Revision(11), kbfsmd.Revision(12), } require.True(t, reflect.DeepEqual(puts, expectedPuts1) || reflect.DeepEqual(puts, expectedPuts2), "Expected %v or %v, got %v", expectedPuts1, expectedPuts2, puts) } // testTLFJournalFlushOrderingAfterSquashAndCR tests that after a // branch is squashed multiple times, and then hits a conflict, the // blocks are flushed completely before the conflict-resolving MD. func testTLFJournalFlushOrderingAfterSquashAndCR( t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.forcedSquashByBytes = 20 firstRev := kbfsmd.Revision(10) firstPrevRoot := kbfsmd.FakeID(1) md1 := config.makeMD(firstRev, firstPrevRoot) var lock sync.Mutex var puts []interface{} bserver := orderedBlockServer{ lock: &lock, puts: &puts, } tlfJournal.delegateBlockServer.Shutdown(ctx) tlfJournal.delegateBlockServer = &bserver var mdserverShim shimMDServer mdserver := orderedMDServer{ MDServer: &mdserverShim, lock: &lock, puts: &puts, } config.mdserver = &mdserver // Put almost a full batch worth of block before revs 10 and 11. blockEnd := uint64(maxJournalBlockFlushBatchSize - 1) for i := uint64(0); i < blockEnd; i++ { data := []byte{byte(i)} bid, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) } irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key) require.NoError(t, err) prevRoot := irmd.mdID md2 := config.makeMD(firstRev+1, prevRoot) require.NoError(t, err) irmd, err = tlfJournal.putMD(ctx, md2, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID // Squash revs 10 and 11. No blocks should actually be flushed // yet. err = tlfJournal.flush(ctx) require.NoError(t, err) require.Equal( t, PendingLocalSquashBranchID, tlfJournal.mdJournal.getBranchID()) requireJournalEntryCounts(t, tlfJournal, blockEnd+2, 2) squashMD := config.makeMD(firstRev, firstPrevRoot) irmd, err = tlfJournal.resolveBranch(ctx, PendingLocalSquashBranchID, []kbfsblock.ID{}, squashMD, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID requireJournalEntryCounts(t, tlfJournal, blockEnd+3, 1) // Another revision 11, with a squashable number of blocks to // complete the initial batch. for i := blockEnd; i < blockEnd+20; i++ { data := []byte{byte(i)} bid, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) } blockEnd += 20 md2 = config.makeMD(firstRev+1, prevRoot) require.NoError(t, err) irmd, err = tlfJournal.putMD(ctx, md2, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID // Let it squash (avoiding a branch this time since there's only one MD). err = tlfJournal.flush(ctx) require.NoError(t, err) require.Equal(t, NullBranchID, tlfJournal.mdJournal.getBranchID()) requireJournalEntryCounts(t, tlfJournal, blockEnd+4, 2) // Simulate an MD conflict and try to flush again. This will // flush a full batch of blocks before hitting the conflict, as // well as the marker for rev 10. mdserver.onceOnPut = func() error { return kbfsmd.ServerErrorConflictRevision{} } mergedBare := config.makeMD(md2.Revision(), firstPrevRoot).bareMd mergedBare.SetSerializedPrivateMetadata([]byte{1}) rmds, err := SignBareRootMetadata( ctx, config.Codec(), config.Crypto(), config.Crypto(), mergedBare, time.Now()) require.NoError(t, err) mdserverShim.nextGetRange = []*RootMetadataSigned{rmds} err = tlfJournal.flush(ctx) require.NoError(t, err) branchID := tlfJournal.mdJournal.getBranchID() require.NotEqual(t, PendingLocalSquashBranchID, branchID) require.NotEqual(t, NullBranchID, branchID) // Blocks: All the unflushed blocks, plus two unflushed rev markers. requireJournalEntryCounts( t, tlfJournal, blockEnd-maxJournalBlockFlushBatchSize+2, 2) // More blocks that are part of the resolution. blockEnd2 := blockEnd + maxJournalBlockFlushBatchSize + 2 for i := blockEnd; i < blockEnd2; i++ { data := []byte{byte(i)} bid, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) } // Use revision 11 (as if two revisions had been merged by another // device). resolveMD := config.makeMD(md2.Revision(), firstPrevRoot) _, err = tlfJournal.resolveBranch( ctx, branchID, []kbfsblock.ID{}, resolveMD, tlfJournal.key) require.NoError(t, err) // Blocks: the ones from the last check, plus the new blocks, plus // the resolve rev marker. requireJournalEntryCounts( t, tlfJournal, blockEnd2-maxJournalBlockFlushBatchSize+3, 1) // Flush everything remaining. All blocks should be flushed after // `resolveMD`. err = tlfJournal.flush(ctx) require.NoError(t, err) testTLFJournalGCd(t, tlfJournal) require.Equal(t, resolveMD.Revision(), puts[len(puts)-1]) } // testTLFJournalFlushInterleaving tests that we interleave block and // MD ops while respecting the relative orderings of blocks and MD ops // when flushing. func testTLFJournalFlushInterleaving(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) var lock sync.Mutex var puts []interface{} bserver := orderedBlockServer{ lock: &lock, puts: &puts, } tlfJournal.delegateBlockServer.Shutdown(ctx) tlfJournal.delegateBlockServer = &bserver var mdserverShim shimMDServer mdserver := orderedMDServer{ MDServer: &mdserverShim, lock: &lock, puts: &puts, } config.mdserver = &mdserver // Revision 1 var bids []kbfsblock.ID rev1BlockEnd := maxJournalBlockFlushBatchSize * 2 for i := 0; i < rev1BlockEnd; i++ { data := []byte{byte(i)} bid, bCtx, serverHalf := config.makeBlock(data) bids = append(bids, bid) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) } md1 := config.makeMD(kbfsmd.Revision(10), kbfsmd.FakeID(1)) irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key) require.NoError(t, err) prevRoot := irmd.mdID // Revision 2 rev2BlockEnd := rev1BlockEnd + maxJournalBlockFlushBatchSize*2 for i := rev1BlockEnd; i < rev2BlockEnd; i++ { data := []byte{byte(i)} bid, bCtx, serverHalf := config.makeBlock(data) bids = append(bids, bid) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) } md2 := config.makeMD(kbfsmd.Revision(11), prevRoot) irmd, err = tlfJournal.putMD(ctx, md2, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID err = tlfJournal.flush(ctx) require.NoError(t, err) testTLFJournalGCd(t, tlfJournal) // Make sure the flusher checks in between block flushes for // conflicting MDs on the server. require.True(t, mdserverShim.getForTLFCalled) // Make sure that: before revision 1, all the rev1 blocks were // put; rev2 comes last; some blocks are put between the two. bidsSeen := make(map[kbfsblock.ID]bool) md1Slot := 0 md2Slot := 0 for i, put := range puts { if bid, ok := put.(kbfsblock.ID); ok { t.Logf("Saw bid %s at %d", bid, i) bidsSeen[bid] = true continue } mdID, ok := put.(kbfsmd.Revision) require.True(t, ok) if mdID == md1.Revision() { md1Slot = i for j := 0; j < rev1BlockEnd; j++ { t.Logf("Checking bid %s at %d", bids[j], i) require.True(t, bidsSeen[bids[j]]) } } else if mdID == md2.Revision() { md2Slot = i require.NotZero(t, md1Slot) require.True(t, md1Slot+1 < i) require.Equal(t, i, len(puts)-1) } } require.NotZero(t, md1Slot) require.NotZero(t, md2Slot) } type testBranchChangeListener struct { c chan<- struct{} } func (tbcl testBranchChangeListener) onTLFBranchChange(_ tlf.ID, _ BranchID) { tbcl.c <- struct{}{} } func testTLFJournalPauseBlocksAndConvertBranch(t *testing.T, ctx context.Context, tlfJournal *tlfJournal, config *testTLFJournalConfig) ( firstRev kbfsmd.Revision, firstRoot kbfsmd.ID, retUnpauseBlockPutCh chan<- struct{}, retErrCh <-chan error, blocksLeftAfterFlush uint64, mdsLeftAfterFlush uint64) { branchCh := make(chan struct{}, 1) tlfJournal.onBranchChange = testBranchChangeListener{branchCh} var lock sync.Mutex var puts []interface{} unpauseBlockPutCh := make(chan struct{}) bserver := orderedBlockServer{ lock: &lock, puts: &puts, onceOnPut: func() { <-unpauseBlockPutCh }, } tlfJournal.delegateBlockServer.Shutdown(ctx) tlfJournal.delegateBlockServer = &bserver // Revision 1 var bids []kbfsblock.ID rev1BlockEnd := maxJournalBlockFlushBatchSize * 2 for i := 0; i < rev1BlockEnd; i++ { data := []byte{byte(i)} bid, bCtx, serverHalf := config.makeBlock(data) bids = append(bids, bid) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) } firstRev = kbfsmd.Revision(10) firstRoot = kbfsmd.FakeID(1) md1 := config.makeMD(firstRev, firstRoot) irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key) require.NoError(t, err) prevRoot := irmd.mdID rev := firstRev // Now start the blocks flushing. One of the block puts will be // stuck. During that time, put a lot more MD revisions, enough // to trigger branch conversion. However, no pause should be // called. errCh := make(chan error, 1) go func() { errCh <- tlfJournal.flush(ctx) }() markers := uint64(1) for i := 0; i < ForcedBranchSquashRevThreshold+1; i++ { rev++ md := config.makeMD(rev, prevRoot) irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) if isRevisionConflict(err) { // Branch conversion is done, we can stop now. break } require.NoError(t, err) prevRoot = irmd.mdID markers++ } // Wait for the local squash branch to appear. select { case <-branchCh: case <-ctx.Done(): t.Fatalf("Timeout while waiting for branch change") } return firstRev, firstRoot, unpauseBlockPutCh, errCh, maxJournalBlockFlushBatchSize + markers, markers } // testTLFJournalConvertWhileFlushing tests that we can do branch // conversion while blocks are still flushing. func testTLFJournalConvertWhileFlushing(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) _, _, unpauseBlockPutCh, errCh, blocksLeftAfterFlush, mdsLeftAfterFlush := testTLFJournalPauseBlocksAndConvertBranch(t, ctx, tlfJournal, config) // Now finish the block put, and let the flush finish. We should // be on a local squash branch now. unpauseBlockPutCh <- struct{}{} err := <-errCh require.NoError(t, err) // Should be a full batch worth of blocks left, plus all the // revision markers above. No squash has actually happened yet, // so all the revisions should be there now, just on a branch. requireJournalEntryCounts( t, tlfJournal, blocksLeftAfterFlush, mdsLeftAfterFlush) require.Equal( t, PendingLocalSquashBranchID, tlfJournal.mdJournal.getBranchID()) } // testTLFJournalSquashWhileFlushing tests that we can do journal // coalescing while blocks are still flushing. func testTLFJournalSquashWhileFlushing(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) firstRev, firstPrevRoot, unpauseBlockPutCh, errCh, blocksLeftAfterFlush, _ := testTLFJournalPauseBlocksAndConvertBranch(t, ctx, tlfJournal, config) // While it's paused, resolve the branch. resolveMD := config.makeMD(firstRev, firstPrevRoot) _, err := tlfJournal.resolveBranch(ctx, tlfJournal.mdJournal.getBranchID(), []kbfsblock.ID{}, resolveMD, tlfJournal.key) require.NoError(t, err) requireJournalEntryCounts( t, tlfJournal, blocksLeftAfterFlush+maxJournalBlockFlushBatchSize+1, 1) // Now finish the block put, and let the flush finish. We // shouldn't be on a branch anymore. unpauseBlockPutCh <- struct{}{} err = <-errCh require.NoError(t, err) // Since flush() never saw the branch in conflict, it will finish // flushing everything. testTLFJournalGCd(t, tlfJournal) require.Equal(t, NullBranchID, tlfJournal.mdJournal.getBranchID()) } type testImmediateBackOff struct { numBackOffs int resetCh chan<- struct{} } func (t *testImmediateBackOff) NextBackOff() time.Duration { t.numBackOffs++ return 1 * time.Nanosecond } func (t *testImmediateBackOff) Reset() { close(t.resetCh) } func testTLFJournalFlushRetry(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) // Stop the current background loop; replace with one that retries // immediately. tlfJournal.needShutdownCh <- struct{}{} <-tlfJournal.backgroundShutdownCh resetCh := make(chan struct{}) b := &testImmediateBackOff{resetCh: resetCh} tlfJournal.backgroundShutdownCh = make(chan struct{}) go tlfJournal.doBackgroundWorkLoop(TLFJournalBackgroundWorkPaused, b) select { case <-delegate.shutdownCh: case <-ctx.Done(): assert.Fail(config.t, ctx.Err().Error()) } firstRevision := kbfsmd.Revision(10) firstPrevRoot := kbfsmd.FakeID(1) mdCount := 10 prevRoot := firstPrevRoot for i := 0; i < mdCount; i++ { revision := firstRevision + kbfsmd.Revision(i) md := config.makeMD(revision, prevRoot) irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } var mdserver shimMDServer mdserver.nextErr = errors.New("Error to force a retry") config.mdserver = &mdserver delegate.requireNextState(ctx, bwPaused) tlfJournal.resumeBackgroundWork() delegate.requireNextState(ctx, bwIdle) delegate.requireNextState(ctx, bwBusy) delegate.requireNextState(ctx, bwIdle) delegate.requireNextState(ctx, bwBusy) delegate.requireNextState(ctx, bwIdle) <-resetCh require.Equal(t, b.numBackOffs, 1) testTLFJournalGCd(t, tlfJournal) } func testTLFJournalResolveBranch(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) var bids []kbfsblock.ID for i := 0; i < 3; i++ { data := []byte{byte(i)} bid, bCtx, serverHalf := config.makeBlock(data) bids = append(bids, bid) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) } firstRevision := kbfsmd.Revision(10) firstPrevRoot := kbfsmd.FakeID(1) mdCount := 3 prevRoot := firstPrevRoot for i := 0; i < mdCount; i++ { revision := firstRevision + kbfsmd.Revision(i) md := config.makeMD(revision, prevRoot) irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } var mdserver shimMDServer mdserver.nextErr = kbfsmd.ServerErrorConflictRevision{} config.mdserver = &mdserver _, mdEnd, err := tlfJournal.getJournalEnds(ctx) require.NoError(t, err) // This will convert to a branch. flushed, err := tlfJournal.flushOneMDOp(ctx, mdEnd) require.NoError(t, err) require.False(t, flushed) // The background worker was already paused, so we won't get a // paused signal here. But resume the background work now so that // later when the conflict resolves, it will be able to send a // resume signal. tlfJournal.resumeBackgroundWork() // Resolve the branch. resolveMD := config.makeMD(firstRevision, firstPrevRoot) _, err = tlfJournal.resolveBranch(ctx, tlfJournal.mdJournal.getBranchID(), []kbfsblock.ID{bids[1]}, resolveMD, tlfJournal.key) require.NoError(t, err) blockEnd, newMDEnd, err := tlfJournal.getJournalEnds(ctx) require.NoError(t, err) require.Equal(t, firstRevision+1, newMDEnd) blocks, maxMD, err := tlfJournal.getNextBlockEntriesToFlush(ctx, blockEnd) require.NoError(t, err) require.Equal(t, firstRevision, maxMD) // 3 blocks, 3 old MD markers, 1 new MD marker require.Equal(t, 7, blocks.length()) require.Len(t, blocks.puts.blockStates, 2) require.Len(t, blocks.adds.blockStates, 0) // 1 ignored block, 3 ignored MD markers, 1 real MD marker require.Len(t, blocks.other, 5) require.Equal(t, bids[0], blocks.puts.blockStates[0].blockPtr.ID) require.Equal(t, bids[2], blocks.puts.blockStates[1].blockPtr.ID) // resolveBranch resumes background work. delegate.requireNextState(ctx, bwIdle) delegate.requireNextState(ctx, bwBusy) } func testTLFJournalSquashByBytes(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.forcedSquashByBytes = 10 data := make([]byte, tlfJournal.forcedSquashByBytes+1) bid, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) firstRevision := kbfsmd.Revision(10) firstPrevRoot := kbfsmd.FakeID(1) mdCount := 3 prevRoot := firstPrevRoot for i := 0; i < mdCount; i++ { revision := firstRevision + kbfsmd.Revision(i) md := config.makeMD(revision, prevRoot) irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } // This should convert it to a branch, based on the number of // outstanding bytes. err = tlfJournal.flush(ctx) require.NoError(t, err) require.Equal( t, PendingLocalSquashBranchID, tlfJournal.mdJournal.getBranchID()) } // Test that the first revision of a TLF doesn't get squashed. func testTLFJournalFirstRevNoSquash(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.forcedSquashByBytes = 10 data := make([]byte, tlfJournal.forcedSquashByBytes+1) bid, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) firstRevision := kbfsmd.RevisionInitial mdCount := 4 var firstMdID, prevRoot kbfsmd.ID for i := 0; i < mdCount; i++ { revision := firstRevision + kbfsmd.Revision(i) md := config.makeMD(revision, prevRoot) irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID if i == 0 { firstMdID = irmd.mdID } } // This should convert it to a branch, based on the number of // outstanding bytes. err = tlfJournal.flush(ctx) require.NoError(t, err) require.Equal( t, PendingLocalSquashBranchID, tlfJournal.mdJournal.getBranchID()) requireJournalEntryCounts(t, tlfJournal, 5, 4) unsquashedRange, err := tlfJournal.getMDRange( ctx, NullBranchID, firstRevision, firstRevision+3) require.NoError(t, err) require.Len(t, unsquashedRange, 1) require.Equal(t, firstRevision, unsquashedRange[0].RevisionNumber()) require.Equal(t, firstMdID, unsquashedRange[0].mdID) squashRange, err := tlfJournal.getMDRange( ctx, PendingLocalSquashBranchID, firstRevision, firstRevision+3) require.NoError(t, err) require.Len(t, squashRange, 3) require.Equal(t, firstRevision+1, squashRange[0].RevisionNumber()) } // testTLFJournalSingleOp tests that when the journal is in single op // mode, it doesn't flush any MDs until `finishSingleOp()` is called, // and then it only flushes one squashed MD. func testTLFJournalSingleOp(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalSingleOpBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) var mdserver shimMDServer config.mdserver = &mdserver tlfJournal.pauseBackgroundWork() delegate.requireNextState(ctx, bwPaused) putBlock(ctx, t, config, tlfJournal, []byte{1, 2}) putBlock(ctx, t, config, tlfJournal, []byte{3, 4}) putBlock(ctx, t, config, tlfJournal, []byte{5, 6}) md1 := config.makeMD(kbfsmd.Revision(10), kbfsmd.FakeID(1)) irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key) require.NoError(t, err) prevRoot := irmd.mdID putBlock(ctx, t, config, tlfJournal, []byte{7, 8}) putBlock(ctx, t, config, tlfJournal, []byte{9, 10}) md2 := config.makeMD(kbfsmd.Revision(11), prevRoot) _, err = tlfJournal.putMD(ctx, md2, tlfJournal.key) require.NoError(t, err) tlfJournal.resumeBackgroundWork() delegate.requireNextState(ctx, bwIdle) delegate.requireNextState(ctx, bwBusy) delegate.requireNextState(ctx, bwIdle) requireJournalEntryCounts(t, tlfJournal, 0, 2) // The `finishSingleOp` call below blocks, so we have to do it in // a background goroutine to avoid deadlock. errCh := make(chan error, 1) go func() { errCh <- tlfJournal.finishSingleOp(ctx) }() // Background loop awakens after the finish is signaled. // Should now be on a conflict branch. delegate.requireNextState(ctx, bwBusy) delegate.requireNextState(ctx, bwPaused) require.Equal( t, PendingLocalSquashBranchID, tlfJournal.mdJournal.getBranchID()) resolveMD := config.makeMD(kbfsmd.Revision(10), kbfsmd.FakeID(1)) _, err = tlfJournal.resolveBranch(ctx, tlfJournal.mdJournal.getBranchID(), nil, resolveMD, tlfJournal.key) require.NoError(t, err) // Now the flushing should complete. delegate.requireNextState(ctx, bwIdle) delegate.requireNextState(ctx, bwBusy) delegate.requireNextState(ctx, bwIdle) select { case err := <-errCh: require.NoError(t, err) case <-ctx.Done(): t.Fatal(ctx.Err().Error()) } requireJournalEntryCounts(t, tlfJournal, 0, 0) require.Len(t, mdserver.rmdses, 1) } func TestTLFJournal(t *testing.T) { tests := []func(*testing.T, MetadataVer){ testTLFJournalBasic, testTLFJournalPauseResume, testTLFJournalPauseShutdown, testTLFJournalBlockOpBasic, testTLFJournalBlockOpBusyPause, testTLFJournalBlockOpBusyShutdown, testTLFJournalSecondBlockOpWhileBusy, testTLFJournalMDServerBusyPause, testTLFJournalMDServerBusyShutdown, testTLFJournalBlockOpWhileBusy, testTLFJournalBlockOpDiskByteLimit, testTLFJournalBlockOpDiskFileLimit, testTLFJournalBlockOpDiskQuotaLimit, testTLFJournalBlockOpDiskQuotaLimitResolve, testTLFJournalBlockOpDiskLimitDuplicate, testTLFJournalBlockOpDiskLimitCancel, testTLFJournalBlockOpDiskLimitTimeout, testTLFJournalBlockOpDiskLimitPutFailure, testTLFJournalFlushMDBasic, testTLFJournalFlushMDConflict, testTLFJournalFlushOrdering, testTLFJournalFlushOrderingAfterSquashAndCR, testTLFJournalFlushInterleaving, testTLFJournalConvertWhileFlushing, testTLFJournalSquashWhileFlushing, testTLFJournalFlushRetry, testTLFJournalResolveBranch, testTLFJournalSquashByBytes, testTLFJournalFirstRevNoSquash, testTLFJournalSingleOp, } runTestsOverMetadataVers(t, "testTLFJournal", tests) } tlf_journal_test: sidestep signal race in singleOp test When the background worker converts the MD journal to a branch, it sends out a pause signal before it finishes and sends a `nil` error back to the background loop. Most of the time the background loop processes the pause first, but sometimes it apparently sees the `nil` first and goes into idle mode before going into pause mode. I didn't see a way to force the loop to process one or the other first, so I just made it possible to expect a set of signals rather than just a single one. Issue: KBFS-2440 // Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "math" "os" "reflect" "sync" "testing" "time" "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/kbfs/ioutil" "github.com/keybase/kbfs/kbfsblock" "github.com/keybase/kbfs/kbfscodec" "github.com/keybase/kbfs/kbfscrypto" "github.com/keybase/kbfs/kbfshash" "github.com/keybase/kbfs/kbfsmd" "github.com/keybase/kbfs/tlf" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/context" ) // testBWDelegate is a delegate we pass to tlfJournal to get info // about its state transitions. type testBWDelegate struct { t *testing.T // Store a context so that the tlfJournal's background context // will also obey the test timeout. testCtx context.Context stateCh chan bwState shutdownCh chan struct{} } func (d testBWDelegate) GetBackgroundContext() context.Context { return d.testCtx } func (d testBWDelegate) OnNewState(ctx context.Context, bws bwState) { select { case d.stateCh <- bws: case <-ctx.Done(): assert.Fail(d.t, ctx.Err().Error()) } } func (d testBWDelegate) OnShutdown(ctx context.Context) { select { case d.shutdownCh <- struct{}{}: case <-ctx.Done(): assert.Fail(d.t, ctx.Err().Error()) } } func (d testBWDelegate) requireNextState( ctx context.Context, expectedState ...bwState) bwState { select { case bws := <-d.stateCh: require.Contains(d.t, expectedState, bws) return bws case <-ctx.Done(): assert.Fail(d.t, ctx.Err().Error()) return bwIdle } } // testTLFJournalConfig is the config we pass to the tlfJournal, and // also contains some helper functions for testing. type testTLFJournalConfig struct { codecGetter logMaker t *testing.T tlfID tlf.ID splitter BlockSplitter crypto CryptoLocal bcache BlockCache bops BlockOps mdcache MDCache ver MetadataVer reporter Reporter uid keybase1.UID verifyingKey kbfscrypto.VerifyingKey ekg singleEncryptionKeyGetter nug normalizedUsernameGetter mdserver MDServer dlTimeout time.Duration } func (c testTLFJournalConfig) BlockSplitter() BlockSplitter { return c.splitter } func (c testTLFJournalConfig) Clock() Clock { return wallClock{} } func (c testTLFJournalConfig) Crypto() Crypto { return c.crypto } func (c testTLFJournalConfig) BlockCache() BlockCache { return c.bcache } func (c testTLFJournalConfig) BlockOps() BlockOps { return c.bops } func (c testTLFJournalConfig) MDCache() MDCache { return c.mdcache } func (c testTLFJournalConfig) MetadataVersion() MetadataVer { return c.ver } func (c testTLFJournalConfig) Reporter() Reporter { return c.reporter } func (c testTLFJournalConfig) cryptoPure() cryptoPure { return c.crypto } func (c testTLFJournalConfig) encryptionKeyGetter() encryptionKeyGetter { return c.ekg } func (c testTLFJournalConfig) mdDecryptionKeyGetter() mdDecryptionKeyGetter { return c.ekg } func (c testTLFJournalConfig) usernameGetter() normalizedUsernameGetter { return c.nug } func (c testTLFJournalConfig) MDServer() MDServer { return c.mdserver } func (c testTLFJournalConfig) teamMembershipChecker() TeamMembershipChecker { // TODO: support team TLF tests. return nil } func (c testTLFJournalConfig) diskLimitTimeout() time.Duration { return c.dlTimeout } func (c testTLFJournalConfig) BGFlushDirOpBatchSize() int { return 1 } func (c testTLFJournalConfig) makeBlock(data []byte) ( kbfsblock.ID, kbfsblock.Context, kbfscrypto.BlockCryptKeyServerHalf) { id, err := kbfsblock.MakePermanentID(data) require.NoError(c.t, err) bCtx := kbfsblock.MakeFirstContext( c.uid.AsUserOrTeam(), keybase1.BlockType_DATA) serverHalf, err := kbfscrypto.MakeRandomBlockCryptKeyServerHalf() require.NoError(c.t, err) return id, bCtx, serverHalf } func (c testTLFJournalConfig) makeMD( revision kbfsmd.Revision, prevRoot kbfsmd.ID) *RootMetadata { return makeMDForTest(c.t, c.ver, c.tlfID, revision, c.uid, c.crypto, prevRoot) } func (c testTLFJournalConfig) checkMD(rmds *RootMetadataSigned, extra ExtraMetadata, expectedRevision kbfsmd.Revision, expectedPrevRoot kbfsmd.ID, expectedMergeStatus MergeStatus, expectedBranchID BranchID) { verifyingKey := c.crypto.SigningKeySigner.Key.GetVerifyingKey() checkBRMD(c.t, c.uid, verifyingKey, c.Codec(), c.Crypto(), rmds.MD, extra, expectedRevision, expectedPrevRoot, expectedMergeStatus, expectedBranchID) err := rmds.IsValidAndSigned( context.Background(), c.Codec(), c.Crypto(), nil, extra) require.NoError(c.t, err) err = rmds.IsLastModifiedBy(c.uid, verifyingKey) require.NoError(c.t, err) } func (c testTLFJournalConfig) checkRange(rmdses []rmdsWithExtra, firstRevision kbfsmd.Revision, firstPrevRoot kbfsmd.ID, mStatus MergeStatus, bid BranchID) { c.checkMD(rmdses[0].rmds, rmdses[0].extra, firstRevision, firstPrevRoot, mStatus, bid) for i := 1; i < len(rmdses); i++ { prevID, err := kbfsmd.MakeID(c.Codec(), rmdses[i-1].rmds.MD) require.NoError(c.t, err) c.checkMD(rmdses[i].rmds, rmdses[i].extra, firstRevision+kbfsmd.Revision(i), prevID, mStatus, bid) err = rmdses[i-1].rmds.MD.CheckValidSuccessor( prevID, rmdses[i].rmds.MD) require.NoError(c.t, err) } } func setupTLFJournalTest( t *testing.T, ver MetadataVer, bwStatus TLFJournalBackgroundWorkStatus) ( tempdir string, config *testTLFJournalConfig, ctx context.Context, cancel context.CancelFunc, tlfJournal *tlfJournal, delegate testBWDelegate) { // Set up config and dependencies. bsplitter := &BlockSplitterSimple{ 64 * 1024, int(64 * 1024 / bpSize), 8 * 1024} codec := kbfscodec.NewMsgpack() signingKey := kbfscrypto.MakeFakeSigningKeyOrBust("client sign") cryptPrivateKey := kbfscrypto.MakeFakeCryptPrivateKeyOrBust("client crypt private") crypto := NewCryptoLocal(codec, signingKey, cryptPrivateKey) uid := keybase1.MakeTestUID(1) verifyingKey := signingKey.GetVerifyingKey() ekg := singleEncryptionKeyGetter{kbfscrypto.MakeTLFCryptKey([32]byte{0x1})} cig := singleCurrentSessionGetter{ SessionInfo{ Name: "fake_user", UID: uid, VerifyingKey: verifyingKey, }, } mdserver, err := NewMDServerMemory(newTestMDServerLocalConfig(t, cig)) require.NoError(t, err) config = &testTLFJournalConfig{ newTestCodecGetter(), newTestLogMaker(t), t, tlf.FakeID(1, tlf.Private), bsplitter, crypto, nil, nil, NewMDCacheStandard(10), ver, NewReporterSimple(newTestClockNow(), 10), uid, verifyingKey, ekg, nil, mdserver, defaultDiskLimitMaxDelay + time.Second, } ctx, cancel = context.WithTimeout( context.Background(), individualTestTimeout) // Clean up the context if the rest of the setup fails. setupSucceeded := false defer func() { if !setupSucceeded { cancel() } }() delegate = testBWDelegate{ t: t, testCtx: ctx, stateCh: make(chan bwState), shutdownCh: make(chan struct{}), } tempdir, err = ioutil.TempDir(os.TempDir(), "tlf_journal") require.NoError(t, err) // Clean up the tempdir if anything in the rest of the setup // fails. defer func() { if !setupSucceeded { err := ioutil.RemoveAll(tempdir) assert.NoError(t, err) } }() delegateBlockServer := NewBlockServerMemory(config.MakeLogger("")) diskLimitSemaphore := newSemaphoreDiskLimiter( math.MaxInt64, math.MaxInt64, math.MaxInt64) tlfJournal, err = makeTLFJournal(ctx, uid, verifyingKey, tempdir, config.tlfID, uid.AsUserOrTeam(), config, delegateBlockServer, bwStatus, delegate, nil, nil, diskLimitSemaphore) require.NoError(t, err) switch bwStatus { case TLFJournalBackgroundWorkEnabled: // Same as the single op case. fallthrough case TLFJournalSingleOpBackgroundWorkEnabled: // Read the state changes triggered by the initial // work signal. delegate.requireNextState(ctx, bwIdle) delegate.requireNextState(ctx, bwBusy) delegate.requireNextState(ctx, bwIdle) case TLFJournalBackgroundWorkPaused: delegate.requireNextState(ctx, bwPaused) default: require.FailNow(t, "Unknown bwStatus %s", bwStatus) } setupSucceeded = true return tempdir, config, ctx, cancel, tlfJournal, delegate } func teardownTLFJournalTest( tempdir string, config *testTLFJournalConfig, ctx context.Context, cancel context.CancelFunc, tlfJournal *tlfJournal, delegate testBWDelegate) { // Shutdown first so we don't get the Done() signal (from the // cancel() call) spuriously. tlfJournal.shutdown(ctx) select { case <-delegate.shutdownCh: case <-ctx.Done(): assert.Fail(config.t, ctx.Err().Error()) } cancel() select { case bws := <-delegate.stateCh: assert.Fail(config.t, "Unexpected state %s", bws) default: } config.mdserver.Shutdown() tlfJournal.delegateBlockServer.Shutdown(ctx) err := ioutil.RemoveAll(tempdir) assert.NoError(config.t, err) } func putOneMD(ctx context.Context, config *testTLFJournalConfig, tlfJournal *tlfJournal) { md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{}) _, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(config.t, err) } // The tests below primarily test the background work thread's // behavior. func testTLFJournalBasic(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) putOneMD(ctx, config, tlfJournal) // Wait for it to be processed. delegate.requireNextState(ctx, bwBusy) delegate.requireNextState(ctx, bwIdle) } func testTLFJournalPauseResume(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.pauseBackgroundWork() delegate.requireNextState(ctx, bwPaused) putOneMD(ctx, config, tlfJournal) // Unpause and wait for it to be processed. tlfJournal.resumeBackgroundWork() delegate.requireNextState(ctx, bwIdle) delegate.requireNextState(ctx, bwBusy) delegate.requireNextState(ctx, bwIdle) } func testTLFJournalPauseShutdown(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.pauseBackgroundWork() delegate.requireNextState(ctx, bwPaused) putOneMD(ctx, config, tlfJournal) // Should still be able to shut down while paused. } type hangingBlockServer struct { BlockServer // Closed on put. onPutCh chan struct{} } func (bs hangingBlockServer) Put( ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error { close(bs.onPutCh) // Hang until the context is cancelled. <-ctx.Done() return ctx.Err() } func (bs hangingBlockServer) waitForPut(ctx context.Context, t *testing.T) { select { case <-bs.onPutCh: case <-ctx.Done(): require.FailNow(t, ctx.Err().Error()) } } func putBlock(ctx context.Context, t *testing.T, config *testTLFJournalConfig, tlfJournal *tlfJournal, data []byte) { id, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf) require.NoError(t, err) } func testTLFJournalBlockOpBasic(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4}) numFlushed, rev, converted, err := tlfJournal.flushBlockEntries(ctx, firstValidJournalOrdinal+1) require.NoError(t, err) require.Equal(t, 1, numFlushed) require.Equal(t, rev, kbfsmd.RevisionUninitialized) require.False(t, converted) } func testTLFJournalBlockOpBusyPause(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) bs := hangingBlockServer{tlfJournal.delegateBlockServer, make(chan struct{})} tlfJournal.delegateBlockServer = bs putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4}) bs.waitForPut(ctx, t) delegate.requireNextState(ctx, bwBusy) // Should still be able to pause while busy. tlfJournal.pauseBackgroundWork() delegate.requireNextState(ctx, bwPaused) } func testTLFJournalBlockOpBusyShutdown(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) bs := hangingBlockServer{tlfJournal.delegateBlockServer, make(chan struct{})} tlfJournal.delegateBlockServer = bs putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4}) bs.waitForPut(ctx, t) delegate.requireNextState(ctx, bwBusy) // Should still be able to shut down while busy. } func testTLFJournalSecondBlockOpWhileBusy(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) bs := hangingBlockServer{tlfJournal.delegateBlockServer, make(chan struct{})} tlfJournal.delegateBlockServer = bs putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4}) bs.waitForPut(ctx, t) delegate.requireNextState(ctx, bwBusy) // Should still be able to put a second block while busy. putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4, 5}) } func testTLFJournalBlockOpDiskByteLimit(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, math.MaxInt64-6, 0, 0, tlfJournal.uid.AsUserOrTeam()) putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4}) errCh := make(chan error, 1) go func() { data2 := []byte{5, 6, 7} id, bCtx, serverHalf := config.makeBlock(data2) errCh <- tlfJournal.putBlockData( ctx, id, bCtx, data2, serverHalf) }() numFlushed, rev, converted, err := tlfJournal.flushBlockEntries(ctx, firstValidJournalOrdinal+1) require.NoError(t, err) require.Equal(t, 1, numFlushed) require.Equal(t, rev, kbfsmd.RevisionUninitialized) require.False(t, converted) // Fake an MD flush. md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{}) err = tlfJournal.doOnMDFlushAndRemoveFlushedMDEntry( ctx, kbfsmd.ID{}, &RootMetadataSigned{MD: md.bareMd}) select { case err := <-errCh: require.NoError(t, err) case <-ctx.Done(): t.Fatal(ctx.Err()) } } func testTLFJournalBlockOpDiskFileLimit(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, 0, 0, math.MaxInt64-2*filesPerBlockMax+1, tlfJournal.uid.AsUserOrTeam()) putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4}) errCh := make(chan error, 1) go func() { data2 := []byte{5, 6, 7} id, bCtx, serverHalf := config.makeBlock(data2) errCh <- tlfJournal.putBlockData( ctx, id, bCtx, data2, serverHalf) }() numFlushed, rev, converted, err := tlfJournal.flushBlockEntries(ctx, firstValidJournalOrdinal+1) require.NoError(t, err) require.Equal(t, 1, numFlushed) require.Equal(t, rev, kbfsmd.RevisionUninitialized) require.False(t, converted) // Fake an MD flush. md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{}) err = tlfJournal.doOnMDFlushAndRemoveFlushedMDEntry( ctx, kbfsmd.ID{}, &RootMetadataSigned{MD: md.bareMd}) select { case err := <-errCh: require.NoError(t, err) case <-ctx.Done(): t.Fatal(ctx.Err()) } } func testTLFJournalBlockOpDiskQuotaLimit(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, 0, math.MaxInt64-6, 0, tlfJournal.uid.AsUserOrTeam()) data1 := []byte{1, 2, 3, 4} putBlock(ctx, t, config, tlfJournal, data1) usedQuotaBytes, quotaBytes := tlfJournal.diskLimiter.getQuotaInfo(tlfJournal.uid.AsUserOrTeam()) require.Equal(t, int64(math.MaxInt64-6)+int64(len(data1)), usedQuotaBytes) require.Equal(t, int64(math.MaxInt64), quotaBytes) data2 := []byte{5, 6, 7} errCh := make(chan error, 1) go func() { id, bCtx, serverHalf := config.makeBlock(data2) errCh <- tlfJournal.putBlockData( ctx, id, bCtx, data2, serverHalf) }() numFlushed, rev, converted, err := tlfJournal.flushBlockEntries(ctx, firstValidJournalOrdinal+1) require.NoError(t, err) require.Equal(t, 1, numFlushed) require.Equal(t, rev, kbfsmd.RevisionUninitialized) require.False(t, converted) select { case err := <-errCh: require.NoError(t, err) case <-ctx.Done(): t.Fatal(ctx.Err()) } usedQuotaBytes, quotaBytes = tlfJournal.diskLimiter.getQuotaInfo(tlfJournal.uid.AsUserOrTeam()) require.Equal(t, int64(math.MaxInt64-6)+int64(len(data2)), usedQuotaBytes) require.Equal(t, int64(math.MaxInt64), quotaBytes) } func testTLFJournalBlockOpDiskQuotaLimitResolve(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, 0, math.MaxInt64-6, 0, tlfJournal.uid.AsUserOrTeam()) data1 := []byte{1, 2, 3, 4} id1, bCtx1, serverHalf1 := config.makeBlock(data1) err := tlfJournal.putBlockData(ctx, id1, bCtx1, data1, serverHalf1) require.NoError(t, err) usedQuotaBytes, quotaBytes := tlfJournal.diskLimiter.getQuotaInfo(tlfJournal.uid.AsUserOrTeam()) require.Equal(t, int64(math.MaxInt64-6)+int64(len(data1)), usedQuotaBytes) require.Equal(t, int64(math.MaxInt64), quotaBytes) data2 := []byte{5, 6, 7} errCh := make(chan error, 1) go func() { id2, bCtx2, serverHalf2 := config.makeBlock(data2) errCh <- tlfJournal.putBlockData( ctx, id2, bCtx2, data2, serverHalf2) }() md1 := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{}) irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key) require.NoError(t, err) mdID1 := irmd.mdID err = tlfJournal.convertMDsToBranch(ctx) require.NoError(t, err) bid, err := tlfJournal.getBranchID() require.NoError(t, err) // Ignore the block instead of flushing it. md2 := config.makeMD(kbfsmd.RevisionInitial+1, mdID1) _, retry, err := tlfJournal.doResolveBranch( ctx, bid, []kbfsblock.ID{id1}, md2, unflushedPathMDInfo{}, unflushedPathsPerRevMap{}, tlfJournal.key) require.NoError(t, err) require.False(t, retry) select { case err := <-errCh: require.NoError(t, err) case <-ctx.Done(): t.Fatal(ctx.Err()) } usedQuotaBytes, quotaBytes = tlfJournal.diskLimiter.getQuotaInfo(tlfJournal.uid.AsUserOrTeam()) require.Equal(t, int64(math.MaxInt64-6)+int64(len(data2)), usedQuotaBytes) require.Equal(t, int64(math.MaxInt64), quotaBytes) } func testTLFJournalBlockOpDiskLimitDuplicate(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, math.MaxInt64-8, 0, math.MaxInt64-2*filesPerBlockMax, tlfJournal.uid.AsUserOrTeam()) data := []byte{1, 2, 3, 4} id, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf) require.NoError(t, err) // This should acquire some bytes and files, but then release // them. err = tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf) require.NoError(t, err) // If the above incorrectly does not release bytes or files, // this will hang. err = tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf) require.NoError(t, err) } func testTLFJournalBlockOpDiskLimitCancel(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, math.MaxInt64, 0, 0, tlfJournal.uid.AsUserOrTeam()) ctx2, cancel2 := context.WithCancel(ctx) cancel2() data := []byte{1, 2, 3, 4} id, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx2, id, bCtx, data, serverHalf) require.Equal(t, context.Canceled, errors.Cause(err)) } func testTLFJournalBlockOpDiskLimitTimeout(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, math.MaxInt64, 0, math.MaxInt64-1, tlfJournal.uid.AsUserOrTeam()) config.dlTimeout = 3 * time.Microsecond data := []byte{1, 2, 3, 4} id, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf) timeoutErr, ok := errors.Cause(err).(*ErrDiskLimitTimeout) require.True(t, ok) require.Error(t, timeoutErr.err) timeoutErr.err = nil require.Equal(t, ErrDiskLimitTimeout{ 3 * time.Microsecond, int64(len(data)), filesPerBlockMax, 0, 1, 0, 1, math.MaxInt64, math.MaxInt64, nil, false, }, *timeoutErr) } func testTLFJournalBlockOpDiskLimitPutFailure(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.diskLimiter.onJournalEnable( ctx, math.MaxInt64-6, 0, math.MaxInt64-filesPerBlockMax, tlfJournal.uid.AsUserOrTeam()) data := []byte{1, 2, 3, 4} id, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, id, bCtx, []byte{1}, serverHalf) require.IsType(t, kbfshash.HashMismatchError{}, errors.Cause(err)) // If the above incorrectly does not release bytes or files from // diskLimiter on error, this will hang. err = tlfJournal.putBlockData(ctx, id, bCtx, data, serverHalf) require.NoError(t, err) } type hangingMDServer struct { MDServer // Closed on put. onPutCh chan struct{} } func (md hangingMDServer) Put(ctx context.Context, rmds *RootMetadataSigned, _ ExtraMetadata, _ *keybase1.LockContext, _ keybase1.MDPriority) error { close(md.onPutCh) // Hang until the context is cancelled. <-ctx.Done() return ctx.Err() } func (md hangingMDServer) waitForPut(ctx context.Context, t *testing.T) { select { case <-md.onPutCh: case <-ctx.Done(): require.FailNow(t, ctx.Err().Error()) } } func testTLFJournalMDServerBusyPause(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) mdserver := hangingMDServer{config.MDServer(), make(chan struct{})} config.mdserver = mdserver md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{}) _, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) mdserver.waitForPut(ctx, t) delegate.requireNextState(ctx, bwBusy) // Should still be able to pause while busy. tlfJournal.pauseBackgroundWork() delegate.requireNextState(ctx, bwPaused) } func testTLFJournalMDServerBusyShutdown(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) mdserver := hangingMDServer{config.MDServer(), make(chan struct{})} config.mdserver = mdserver md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{}) _, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) mdserver.waitForPut(ctx, t) delegate.requireNextState(ctx, bwBusy) // Should still be able to shutdown while busy. } func testTLFJournalBlockOpWhileBusy(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) mdserver := hangingMDServer{config.MDServer(), make(chan struct{})} config.mdserver = mdserver md := config.makeMD(kbfsmd.RevisionInitial, kbfsmd.ID{}) _, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) mdserver.waitForPut(ctx, t) delegate.requireNextState(ctx, bwBusy) // Should still be able to put a block while busy. putBlock(ctx, t, config, tlfJournal, []byte{1, 2, 3, 4}) } type rmdsWithExtra struct { rmds *RootMetadataSigned extra ExtraMetadata } type shimMDServer struct { MDServer rmdses []rmdsWithExtra nextGetRange []*RootMetadataSigned nextErr error getForTLFCalled bool } func (s *shimMDServer) GetRange( ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus, start, stop kbfsmd.Revision, _ *keybase1.LockID) ([]*RootMetadataSigned, error) { rmdses := s.nextGetRange s.nextGetRange = nil return rmdses, nil } func (s *shimMDServer) Put(ctx context.Context, rmds *RootMetadataSigned, extra ExtraMetadata, _ *keybase1.LockContext, _ keybase1.MDPriority) error { if s.nextErr != nil { err := s.nextErr s.nextErr = nil return err } s.rmdses = append(s.rmdses, rmdsWithExtra{rmds, extra}) // Pretend all cancels happen after the actual put. select { case <-ctx.Done(): return ctx.Err() default: } return nil } func (s *shimMDServer) GetForTLF( ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus, _ *keybase1.LockID) ( *RootMetadataSigned, error) { s.getForTLFCalled = true if len(s.rmdses) == 0 { return nil, nil } return s.rmdses[len(s.rmdses)-1].rmds, nil } func (s *shimMDServer) IsConnected() bool { return true } func (s *shimMDServer) Shutdown() { } func requireJournalEntryCounts(t *testing.T, j *tlfJournal, expectedBlockEntryCount, expectedMDEntryCount uint64) { blockEntryCount, mdEntryCount, err := j.getJournalEntryCounts() require.NoError(t, err) require.Equal(t, expectedBlockEntryCount, blockEntryCount) require.Equal(t, expectedMDEntryCount, mdEntryCount) } // The tests below test tlfJournal's MD flushing behavior. func testTLFJournalFlushMDBasic(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) firstRevision := kbfsmd.Revision(10) firstPrevRoot := kbfsmd.FakeID(1) mdCount := 10 prevRoot := firstPrevRoot for i := 0; i < mdCount; i++ { revision := firstRevision + kbfsmd.Revision(i) md := config.makeMD(revision, prevRoot) irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } // Flush all entries. var mdserver shimMDServer config.mdserver = &mdserver _, mdEnd, err := tlfJournal.getJournalEnds(ctx) require.NoError(t, err) for i := 0; i < mdCount; i++ { flushed, err := tlfJournal.flushOneMDOp(ctx, mdEnd) require.NoError(t, err) require.True(t, flushed) } flushed, err := tlfJournal.flushOneMDOp(ctx, mdEnd) require.NoError(t, err) require.False(t, flushed) requireJournalEntryCounts(t, tlfJournal, uint64(mdCount), 0) testMDJournalGCd(t, tlfJournal.mdJournal) // Check RMDSes on the server. rmdses := mdserver.rmdses require.Equal(t, mdCount, len(rmdses)) config.checkRange( rmdses, firstRevision, firstPrevRoot, Merged, NullBranchID) } func testTLFJournalFlushMDConflict(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) firstRevision := kbfsmd.Revision(10) firstPrevRoot := kbfsmd.FakeID(1) mdCount := 10 prevRoot := firstPrevRoot for i := 0; i < mdCount/2; i++ { revision := firstRevision + kbfsmd.Revision(i) md := config.makeMD(revision, prevRoot) irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } var mdserver shimMDServer mdserver.nextErr = kbfsmd.ServerErrorConflictRevision{} config.mdserver = &mdserver _, mdEnd, err := tlfJournal.getJournalEnds(ctx) require.NoError(t, err) // Simulate a flush with a conflict error halfway through. { flushed, err := tlfJournal.flushOneMDOp(ctx, mdEnd) require.NoError(t, err) require.False(t, flushed) revision := firstRevision + kbfsmd.Revision(mdCount/2) md := config.makeMD(revision, prevRoot) _, err = tlfJournal.putMD(ctx, md, tlfJournal.key) require.IsType(t, MDJournalConflictError{}, err) md.SetUnmerged() irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } for i := mdCount/2 + 1; i < mdCount; i++ { revision := firstRevision + kbfsmd.Revision(i) md := config.makeMD(revision, prevRoot) md.SetUnmerged() irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } // The journal won't flush anything while on a branch. requireJournalEntryCounts(t, tlfJournal, uint64(mdCount), uint64(mdCount)) } // orderedBlockServer and orderedMDServer appends onto their shared // puts slice when their Put() methods are called. type orderedBlockServer struct { BlockServer lock *sync.Mutex puts *[]interface{} onceOnPut func() } func (s *orderedBlockServer) Put( ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error { s.lock.Lock() defer s.lock.Unlock() *s.puts = append(*s.puts, id) if s.onceOnPut != nil { s.onceOnPut() s.onceOnPut = nil } return nil } func (s *orderedBlockServer) Shutdown(context.Context) {} type orderedMDServer struct { MDServer lock *sync.Mutex puts *[]interface{} onceOnPut func() error } func (s *orderedMDServer) Put( ctx context.Context, rmds *RootMetadataSigned, _ ExtraMetadata, _ *keybase1.LockContext, _ keybase1.MDPriority) error { s.lock.Lock() defer s.lock.Unlock() *s.puts = append(*s.puts, rmds.MD.RevisionNumber()) if s.onceOnPut != nil { err := s.onceOnPut() s.onceOnPut = nil if err != nil { return err } } return nil } func (s *orderedMDServer) Shutdown() {} func testTLFJournalGCd(t *testing.T, tlfJournal *tlfJournal) { // The root dir shouldn't exist. _, err := ioutil.Stat(tlfJournal.dir) require.True(t, ioutil.IsNotExist(err)) func() { tlfJournal.journalLock.Lock() defer tlfJournal.journalLock.Unlock() unflushedPaths := tlfJournal.unflushedPaths.getUnflushedPaths() require.Nil(t, unflushedPaths) require.Equal(t, uint64(0), tlfJournal.unsquashedBytes) require.Equal(t, 0, len(tlfJournal.flushingBlocks)) }() requireJournalEntryCounts(t, tlfJournal, 0, 0) // Check child journals. testBlockJournalGCd(t, tlfJournal.blockJournal) testMDJournalGCd(t, tlfJournal.mdJournal) } // testTLFJournalFlushOrdering tests that we respect the relative // orderings of blocks and MD ops when flushing, i.e. if a block op // was added to the block journal before an MD op was added to the MD // journal, then that block op will be flushed before that MD op. func testTLFJournalFlushOrdering(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) bid1, bCtx1, serverHalf1 := config.makeBlock([]byte{1}) bid2, bCtx2, serverHalf2 := config.makeBlock([]byte{2}) bid3, bCtx3, serverHalf3 := config.makeBlock([]byte{3}) md1 := config.makeMD(kbfsmd.Revision(10), kbfsmd.FakeID(1)) var lock sync.Mutex var puts []interface{} bserver := orderedBlockServer{ lock: &lock, puts: &puts, } tlfJournal.delegateBlockServer.Shutdown(ctx) tlfJournal.delegateBlockServer = &bserver mdserver := orderedMDServer{ lock: &lock, puts: &puts, } config.mdserver = &mdserver // bid1 is-put-before kbfsmd.Revision(10). err := tlfJournal.putBlockData( ctx, bid1, bCtx1, []byte{1}, serverHalf1) require.NoError(t, err) irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key) require.NoError(t, err) prevRoot := irmd.mdID bserver.onceOnPut = func() { // bid2 is-put-before kbfsmd.Revision(11). err := tlfJournal.putBlockData( ctx, bid2, bCtx2, []byte{2}, serverHalf2) require.NoError(t, err) md2 := config.makeMD(kbfsmd.Revision(11), prevRoot) irmd, err := tlfJournal.putMD(ctx, md2, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } mdserver.onceOnPut = func() error { // bid3 is-put-before kbfsmd.Revision(12). err := tlfJournal.putBlockData( ctx, bid3, bCtx3, []byte{3}, serverHalf3) require.NoError(t, err) md3 := config.makeMD(kbfsmd.Revision(12), prevRoot) irmd, err := tlfJournal.putMD(ctx, md3, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID return nil } err = tlfJournal.flush(ctx) require.NoError(t, err) testTLFJournalGCd(t, tlfJournal) // These two orderings depend on the exact flushing process, // but there are other possible orderings which respect the // above is-put-before constraints and also respect the // kbfsmd.Revision ordering. expectedPuts1 := []interface{}{ bid1, kbfsmd.Revision(10), bid2, bid3, kbfsmd.Revision(11), kbfsmd.Revision(12), } // This is possible since block puts are done in parallel. expectedPuts2 := []interface{}{ bid1, kbfsmd.Revision(10), bid3, bid2, kbfsmd.Revision(11), kbfsmd.Revision(12), } require.True(t, reflect.DeepEqual(puts, expectedPuts1) || reflect.DeepEqual(puts, expectedPuts2), "Expected %v or %v, got %v", expectedPuts1, expectedPuts2, puts) } // testTLFJournalFlushOrderingAfterSquashAndCR tests that after a // branch is squashed multiple times, and then hits a conflict, the // blocks are flushed completely before the conflict-resolving MD. func testTLFJournalFlushOrderingAfterSquashAndCR( t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.forcedSquashByBytes = 20 firstRev := kbfsmd.Revision(10) firstPrevRoot := kbfsmd.FakeID(1) md1 := config.makeMD(firstRev, firstPrevRoot) var lock sync.Mutex var puts []interface{} bserver := orderedBlockServer{ lock: &lock, puts: &puts, } tlfJournal.delegateBlockServer.Shutdown(ctx) tlfJournal.delegateBlockServer = &bserver var mdserverShim shimMDServer mdserver := orderedMDServer{ MDServer: &mdserverShim, lock: &lock, puts: &puts, } config.mdserver = &mdserver // Put almost a full batch worth of block before revs 10 and 11. blockEnd := uint64(maxJournalBlockFlushBatchSize - 1) for i := uint64(0); i < blockEnd; i++ { data := []byte{byte(i)} bid, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) } irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key) require.NoError(t, err) prevRoot := irmd.mdID md2 := config.makeMD(firstRev+1, prevRoot) require.NoError(t, err) irmd, err = tlfJournal.putMD(ctx, md2, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID // Squash revs 10 and 11. No blocks should actually be flushed // yet. err = tlfJournal.flush(ctx) require.NoError(t, err) require.Equal( t, PendingLocalSquashBranchID, tlfJournal.mdJournal.getBranchID()) requireJournalEntryCounts(t, tlfJournal, blockEnd+2, 2) squashMD := config.makeMD(firstRev, firstPrevRoot) irmd, err = tlfJournal.resolveBranch(ctx, PendingLocalSquashBranchID, []kbfsblock.ID{}, squashMD, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID requireJournalEntryCounts(t, tlfJournal, blockEnd+3, 1) // Another revision 11, with a squashable number of blocks to // complete the initial batch. for i := blockEnd; i < blockEnd+20; i++ { data := []byte{byte(i)} bid, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) } blockEnd += 20 md2 = config.makeMD(firstRev+1, prevRoot) require.NoError(t, err) irmd, err = tlfJournal.putMD(ctx, md2, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID // Let it squash (avoiding a branch this time since there's only one MD). err = tlfJournal.flush(ctx) require.NoError(t, err) require.Equal(t, NullBranchID, tlfJournal.mdJournal.getBranchID()) requireJournalEntryCounts(t, tlfJournal, blockEnd+4, 2) // Simulate an MD conflict and try to flush again. This will // flush a full batch of blocks before hitting the conflict, as // well as the marker for rev 10. mdserver.onceOnPut = func() error { return kbfsmd.ServerErrorConflictRevision{} } mergedBare := config.makeMD(md2.Revision(), firstPrevRoot).bareMd mergedBare.SetSerializedPrivateMetadata([]byte{1}) rmds, err := SignBareRootMetadata( ctx, config.Codec(), config.Crypto(), config.Crypto(), mergedBare, time.Now()) require.NoError(t, err) mdserverShim.nextGetRange = []*RootMetadataSigned{rmds} err = tlfJournal.flush(ctx) require.NoError(t, err) branchID := tlfJournal.mdJournal.getBranchID() require.NotEqual(t, PendingLocalSquashBranchID, branchID) require.NotEqual(t, NullBranchID, branchID) // Blocks: All the unflushed blocks, plus two unflushed rev markers. requireJournalEntryCounts( t, tlfJournal, blockEnd-maxJournalBlockFlushBatchSize+2, 2) // More blocks that are part of the resolution. blockEnd2 := blockEnd + maxJournalBlockFlushBatchSize + 2 for i := blockEnd; i < blockEnd2; i++ { data := []byte{byte(i)} bid, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) } // Use revision 11 (as if two revisions had been merged by another // device). resolveMD := config.makeMD(md2.Revision(), firstPrevRoot) _, err = tlfJournal.resolveBranch( ctx, branchID, []kbfsblock.ID{}, resolveMD, tlfJournal.key) require.NoError(t, err) // Blocks: the ones from the last check, plus the new blocks, plus // the resolve rev marker. requireJournalEntryCounts( t, tlfJournal, blockEnd2-maxJournalBlockFlushBatchSize+3, 1) // Flush everything remaining. All blocks should be flushed after // `resolveMD`. err = tlfJournal.flush(ctx) require.NoError(t, err) testTLFJournalGCd(t, tlfJournal) require.Equal(t, resolveMD.Revision(), puts[len(puts)-1]) } // testTLFJournalFlushInterleaving tests that we interleave block and // MD ops while respecting the relative orderings of blocks and MD ops // when flushing. func testTLFJournalFlushInterleaving(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) var lock sync.Mutex var puts []interface{} bserver := orderedBlockServer{ lock: &lock, puts: &puts, } tlfJournal.delegateBlockServer.Shutdown(ctx) tlfJournal.delegateBlockServer = &bserver var mdserverShim shimMDServer mdserver := orderedMDServer{ MDServer: &mdserverShim, lock: &lock, puts: &puts, } config.mdserver = &mdserver // Revision 1 var bids []kbfsblock.ID rev1BlockEnd := maxJournalBlockFlushBatchSize * 2 for i := 0; i < rev1BlockEnd; i++ { data := []byte{byte(i)} bid, bCtx, serverHalf := config.makeBlock(data) bids = append(bids, bid) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) } md1 := config.makeMD(kbfsmd.Revision(10), kbfsmd.FakeID(1)) irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key) require.NoError(t, err) prevRoot := irmd.mdID // Revision 2 rev2BlockEnd := rev1BlockEnd + maxJournalBlockFlushBatchSize*2 for i := rev1BlockEnd; i < rev2BlockEnd; i++ { data := []byte{byte(i)} bid, bCtx, serverHalf := config.makeBlock(data) bids = append(bids, bid) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) } md2 := config.makeMD(kbfsmd.Revision(11), prevRoot) irmd, err = tlfJournal.putMD(ctx, md2, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID err = tlfJournal.flush(ctx) require.NoError(t, err) testTLFJournalGCd(t, tlfJournal) // Make sure the flusher checks in between block flushes for // conflicting MDs on the server. require.True(t, mdserverShim.getForTLFCalled) // Make sure that: before revision 1, all the rev1 blocks were // put; rev2 comes last; some blocks are put between the two. bidsSeen := make(map[kbfsblock.ID]bool) md1Slot := 0 md2Slot := 0 for i, put := range puts { if bid, ok := put.(kbfsblock.ID); ok { t.Logf("Saw bid %s at %d", bid, i) bidsSeen[bid] = true continue } mdID, ok := put.(kbfsmd.Revision) require.True(t, ok) if mdID == md1.Revision() { md1Slot = i for j := 0; j < rev1BlockEnd; j++ { t.Logf("Checking bid %s at %d", bids[j], i) require.True(t, bidsSeen[bids[j]]) } } else if mdID == md2.Revision() { md2Slot = i require.NotZero(t, md1Slot) require.True(t, md1Slot+1 < i) require.Equal(t, i, len(puts)-1) } } require.NotZero(t, md1Slot) require.NotZero(t, md2Slot) } type testBranchChangeListener struct { c chan<- struct{} } func (tbcl testBranchChangeListener) onTLFBranchChange(_ tlf.ID, _ BranchID) { tbcl.c <- struct{}{} } func testTLFJournalPauseBlocksAndConvertBranch(t *testing.T, ctx context.Context, tlfJournal *tlfJournal, config *testTLFJournalConfig) ( firstRev kbfsmd.Revision, firstRoot kbfsmd.ID, retUnpauseBlockPutCh chan<- struct{}, retErrCh <-chan error, blocksLeftAfterFlush uint64, mdsLeftAfterFlush uint64) { branchCh := make(chan struct{}, 1) tlfJournal.onBranchChange = testBranchChangeListener{branchCh} var lock sync.Mutex var puts []interface{} unpauseBlockPutCh := make(chan struct{}) bserver := orderedBlockServer{ lock: &lock, puts: &puts, onceOnPut: func() { <-unpauseBlockPutCh }, } tlfJournal.delegateBlockServer.Shutdown(ctx) tlfJournal.delegateBlockServer = &bserver // Revision 1 var bids []kbfsblock.ID rev1BlockEnd := maxJournalBlockFlushBatchSize * 2 for i := 0; i < rev1BlockEnd; i++ { data := []byte{byte(i)} bid, bCtx, serverHalf := config.makeBlock(data) bids = append(bids, bid) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) } firstRev = kbfsmd.Revision(10) firstRoot = kbfsmd.FakeID(1) md1 := config.makeMD(firstRev, firstRoot) irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key) require.NoError(t, err) prevRoot := irmd.mdID rev := firstRev // Now start the blocks flushing. One of the block puts will be // stuck. During that time, put a lot more MD revisions, enough // to trigger branch conversion. However, no pause should be // called. errCh := make(chan error, 1) go func() { errCh <- tlfJournal.flush(ctx) }() markers := uint64(1) for i := 0; i < ForcedBranchSquashRevThreshold+1; i++ { rev++ md := config.makeMD(rev, prevRoot) irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) if isRevisionConflict(err) { // Branch conversion is done, we can stop now. break } require.NoError(t, err) prevRoot = irmd.mdID markers++ } // Wait for the local squash branch to appear. select { case <-branchCh: case <-ctx.Done(): t.Fatalf("Timeout while waiting for branch change") } return firstRev, firstRoot, unpauseBlockPutCh, errCh, maxJournalBlockFlushBatchSize + markers, markers } // testTLFJournalConvertWhileFlushing tests that we can do branch // conversion while blocks are still flushing. func testTLFJournalConvertWhileFlushing(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) _, _, unpauseBlockPutCh, errCh, blocksLeftAfterFlush, mdsLeftAfterFlush := testTLFJournalPauseBlocksAndConvertBranch(t, ctx, tlfJournal, config) // Now finish the block put, and let the flush finish. We should // be on a local squash branch now. unpauseBlockPutCh <- struct{}{} err := <-errCh require.NoError(t, err) // Should be a full batch worth of blocks left, plus all the // revision markers above. No squash has actually happened yet, // so all the revisions should be there now, just on a branch. requireJournalEntryCounts( t, tlfJournal, blocksLeftAfterFlush, mdsLeftAfterFlush) require.Equal( t, PendingLocalSquashBranchID, tlfJournal.mdJournal.getBranchID()) } // testTLFJournalSquashWhileFlushing tests that we can do journal // coalescing while blocks are still flushing. func testTLFJournalSquashWhileFlushing(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) firstRev, firstPrevRoot, unpauseBlockPutCh, errCh, blocksLeftAfterFlush, _ := testTLFJournalPauseBlocksAndConvertBranch(t, ctx, tlfJournal, config) // While it's paused, resolve the branch. resolveMD := config.makeMD(firstRev, firstPrevRoot) _, err := tlfJournal.resolveBranch(ctx, tlfJournal.mdJournal.getBranchID(), []kbfsblock.ID{}, resolveMD, tlfJournal.key) require.NoError(t, err) requireJournalEntryCounts( t, tlfJournal, blocksLeftAfterFlush+maxJournalBlockFlushBatchSize+1, 1) // Now finish the block put, and let the flush finish. We // shouldn't be on a branch anymore. unpauseBlockPutCh <- struct{}{} err = <-errCh require.NoError(t, err) // Since flush() never saw the branch in conflict, it will finish // flushing everything. testTLFJournalGCd(t, tlfJournal) require.Equal(t, NullBranchID, tlfJournal.mdJournal.getBranchID()) } type testImmediateBackOff struct { numBackOffs int resetCh chan<- struct{} } func (t *testImmediateBackOff) NextBackOff() time.Duration { t.numBackOffs++ return 1 * time.Nanosecond } func (t *testImmediateBackOff) Reset() { close(t.resetCh) } func testTLFJournalFlushRetry(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) // Stop the current background loop; replace with one that retries // immediately. tlfJournal.needShutdownCh <- struct{}{} <-tlfJournal.backgroundShutdownCh resetCh := make(chan struct{}) b := &testImmediateBackOff{resetCh: resetCh} tlfJournal.backgroundShutdownCh = make(chan struct{}) go tlfJournal.doBackgroundWorkLoop(TLFJournalBackgroundWorkPaused, b) select { case <-delegate.shutdownCh: case <-ctx.Done(): assert.Fail(config.t, ctx.Err().Error()) } firstRevision := kbfsmd.Revision(10) firstPrevRoot := kbfsmd.FakeID(1) mdCount := 10 prevRoot := firstPrevRoot for i := 0; i < mdCount; i++ { revision := firstRevision + kbfsmd.Revision(i) md := config.makeMD(revision, prevRoot) irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } var mdserver shimMDServer mdserver.nextErr = errors.New("Error to force a retry") config.mdserver = &mdserver delegate.requireNextState(ctx, bwPaused) tlfJournal.resumeBackgroundWork() delegate.requireNextState(ctx, bwIdle) delegate.requireNextState(ctx, bwBusy) delegate.requireNextState(ctx, bwIdle) delegate.requireNextState(ctx, bwBusy) delegate.requireNextState(ctx, bwIdle) <-resetCh require.Equal(t, b.numBackOffs, 1) testTLFJournalGCd(t, tlfJournal) } func testTLFJournalResolveBranch(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) var bids []kbfsblock.ID for i := 0; i < 3; i++ { data := []byte{byte(i)} bid, bCtx, serverHalf := config.makeBlock(data) bids = append(bids, bid) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) } firstRevision := kbfsmd.Revision(10) firstPrevRoot := kbfsmd.FakeID(1) mdCount := 3 prevRoot := firstPrevRoot for i := 0; i < mdCount; i++ { revision := firstRevision + kbfsmd.Revision(i) md := config.makeMD(revision, prevRoot) irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } var mdserver shimMDServer mdserver.nextErr = kbfsmd.ServerErrorConflictRevision{} config.mdserver = &mdserver _, mdEnd, err := tlfJournal.getJournalEnds(ctx) require.NoError(t, err) // This will convert to a branch. flushed, err := tlfJournal.flushOneMDOp(ctx, mdEnd) require.NoError(t, err) require.False(t, flushed) // The background worker was already paused, so we won't get a // paused signal here. But resume the background work now so that // later when the conflict resolves, it will be able to send a // resume signal. tlfJournal.resumeBackgroundWork() // Resolve the branch. resolveMD := config.makeMD(firstRevision, firstPrevRoot) _, err = tlfJournal.resolveBranch(ctx, tlfJournal.mdJournal.getBranchID(), []kbfsblock.ID{bids[1]}, resolveMD, tlfJournal.key) require.NoError(t, err) blockEnd, newMDEnd, err := tlfJournal.getJournalEnds(ctx) require.NoError(t, err) require.Equal(t, firstRevision+1, newMDEnd) blocks, maxMD, err := tlfJournal.getNextBlockEntriesToFlush(ctx, blockEnd) require.NoError(t, err) require.Equal(t, firstRevision, maxMD) // 3 blocks, 3 old MD markers, 1 new MD marker require.Equal(t, 7, blocks.length()) require.Len(t, blocks.puts.blockStates, 2) require.Len(t, blocks.adds.blockStates, 0) // 1 ignored block, 3 ignored MD markers, 1 real MD marker require.Len(t, blocks.other, 5) require.Equal(t, bids[0], blocks.puts.blockStates[0].blockPtr.ID) require.Equal(t, bids[2], blocks.puts.blockStates[1].blockPtr.ID) // resolveBranch resumes background work. delegate.requireNextState(ctx, bwIdle) delegate.requireNextState(ctx, bwBusy) } func testTLFJournalSquashByBytes(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.forcedSquashByBytes = 10 data := make([]byte, tlfJournal.forcedSquashByBytes+1) bid, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) firstRevision := kbfsmd.Revision(10) firstPrevRoot := kbfsmd.FakeID(1) mdCount := 3 prevRoot := firstPrevRoot for i := 0; i < mdCount; i++ { revision := firstRevision + kbfsmd.Revision(i) md := config.makeMD(revision, prevRoot) irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID } // This should convert it to a branch, based on the number of // outstanding bytes. err = tlfJournal.flush(ctx) require.NoError(t, err) require.Equal( t, PendingLocalSquashBranchID, tlfJournal.mdJournal.getBranchID()) } // Test that the first revision of a TLF doesn't get squashed. func testTLFJournalFirstRevNoSquash(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalBackgroundWorkPaused) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) tlfJournal.forcedSquashByBytes = 10 data := make([]byte, tlfJournal.forcedSquashByBytes+1) bid, bCtx, serverHalf := config.makeBlock(data) err := tlfJournal.putBlockData(ctx, bid, bCtx, data, serverHalf) require.NoError(t, err) firstRevision := kbfsmd.RevisionInitial mdCount := 4 var firstMdID, prevRoot kbfsmd.ID for i := 0; i < mdCount; i++ { revision := firstRevision + kbfsmd.Revision(i) md := config.makeMD(revision, prevRoot) irmd, err := tlfJournal.putMD(ctx, md, tlfJournal.key) require.NoError(t, err) prevRoot = irmd.mdID if i == 0 { firstMdID = irmd.mdID } } // This should convert it to a branch, based on the number of // outstanding bytes. err = tlfJournal.flush(ctx) require.NoError(t, err) require.Equal( t, PendingLocalSquashBranchID, tlfJournal.mdJournal.getBranchID()) requireJournalEntryCounts(t, tlfJournal, 5, 4) unsquashedRange, err := tlfJournal.getMDRange( ctx, NullBranchID, firstRevision, firstRevision+3) require.NoError(t, err) require.Len(t, unsquashedRange, 1) require.Equal(t, firstRevision, unsquashedRange[0].RevisionNumber()) require.Equal(t, firstMdID, unsquashedRange[0].mdID) squashRange, err := tlfJournal.getMDRange( ctx, PendingLocalSquashBranchID, firstRevision, firstRevision+3) require.NoError(t, err) require.Len(t, squashRange, 3) require.Equal(t, firstRevision+1, squashRange[0].RevisionNumber()) } // testTLFJournalSingleOp tests that when the journal is in single op // mode, it doesn't flush any MDs until `finishSingleOp()` is called, // and then it only flushes one squashed MD. func testTLFJournalSingleOp(t *testing.T, ver MetadataVer) { tempdir, config, ctx, cancel, tlfJournal, delegate := setupTLFJournalTest(t, ver, TLFJournalSingleOpBackgroundWorkEnabled) defer teardownTLFJournalTest( tempdir, config, ctx, cancel, tlfJournal, delegate) var mdserver shimMDServer config.mdserver = &mdserver tlfJournal.pauseBackgroundWork() delegate.requireNextState(ctx, bwPaused) putBlock(ctx, t, config, tlfJournal, []byte{1, 2}) putBlock(ctx, t, config, tlfJournal, []byte{3, 4}) putBlock(ctx, t, config, tlfJournal, []byte{5, 6}) md1 := config.makeMD(kbfsmd.Revision(10), kbfsmd.FakeID(1)) irmd, err := tlfJournal.putMD(ctx, md1, tlfJournal.key) require.NoError(t, err) prevRoot := irmd.mdID putBlock(ctx, t, config, tlfJournal, []byte{7, 8}) putBlock(ctx, t, config, tlfJournal, []byte{9, 10}) md2 := config.makeMD(kbfsmd.Revision(11), prevRoot) _, err = tlfJournal.putMD(ctx, md2, tlfJournal.key) require.NoError(t, err) tlfJournal.resumeBackgroundWork() delegate.requireNextState(ctx, bwIdle) delegate.requireNextState(ctx, bwBusy) delegate.requireNextState(ctx, bwIdle) requireJournalEntryCounts(t, tlfJournal, 0, 2) // The `finishSingleOp` call below blocks, so we have to do it in // a background goroutine to avoid deadlock. errCh := make(chan error, 1) go func() { errCh <- tlfJournal.finishSingleOp(ctx) }() // Background loop awakens after the finish is signaled. Should // now be on a conflict branch. The pause signal sent by the // branch-converter races with the background work finishing // (KBFS-2440), and so the second state could be either idle or // paused, depending on what gets processed first. delegate.requireNextState(ctx, bwBusy) nextState := delegate.requireNextState(ctx, bwPaused, bwIdle) if nextState == bwIdle { delegate.requireNextState(ctx, bwPaused) } require.Equal( t, PendingLocalSquashBranchID, tlfJournal.mdJournal.getBranchID()) resolveMD := config.makeMD(kbfsmd.Revision(10), kbfsmd.FakeID(1)) _, err = tlfJournal.resolveBranch(ctx, tlfJournal.mdJournal.getBranchID(), nil, resolveMD, tlfJournal.key) require.NoError(t, err) // Now the flushing should complete. delegate.requireNextState(ctx, bwIdle) delegate.requireNextState(ctx, bwBusy) delegate.requireNextState(ctx, bwIdle) select { case err := <-errCh: require.NoError(t, err) case <-ctx.Done(): t.Fatal(ctx.Err().Error()) } requireJournalEntryCounts(t, tlfJournal, 0, 0) require.Len(t, mdserver.rmdses, 1) } func TestTLFJournal(t *testing.T) { tests := []func(*testing.T, MetadataVer){ testTLFJournalBasic, testTLFJournalPauseResume, testTLFJournalPauseShutdown, testTLFJournalBlockOpBasic, testTLFJournalBlockOpBusyPause, testTLFJournalBlockOpBusyShutdown, testTLFJournalSecondBlockOpWhileBusy, testTLFJournalMDServerBusyPause, testTLFJournalMDServerBusyShutdown, testTLFJournalBlockOpWhileBusy, testTLFJournalBlockOpDiskByteLimit, testTLFJournalBlockOpDiskFileLimit, testTLFJournalBlockOpDiskQuotaLimit, testTLFJournalBlockOpDiskQuotaLimitResolve, testTLFJournalBlockOpDiskLimitDuplicate, testTLFJournalBlockOpDiskLimitCancel, testTLFJournalBlockOpDiskLimitTimeout, testTLFJournalBlockOpDiskLimitPutFailure, testTLFJournalFlushMDBasic, testTLFJournalFlushMDConflict, testTLFJournalFlushOrdering, testTLFJournalFlushOrderingAfterSquashAndCR, testTLFJournalFlushInterleaving, testTLFJournalConvertWhileFlushing, testTLFJournalSquashWhileFlushing, testTLFJournalFlushRetry, testTLFJournalResolveBranch, testTLFJournalSquashByBytes, testTLFJournalFirstRevNoSquash, testTLFJournalSingleOp, } runTestsOverMetadataVers(t, "testTLFJournal", tests) }
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package serviceentry import ( "fmt" "reflect" "strconv" "sync" "go.uber.org/atomic" networking "istio.io/api/networking/v1alpha3" "istio.io/istio/pilot/pkg/features" "istio.io/istio/pilot/pkg/model" "istio.io/istio/pilot/pkg/model/status" "istio.io/istio/pilot/pkg/serviceregistry" "istio.io/istio/pilot/pkg/util/informermetric" "istio.io/istio/pkg/config" "istio.io/istio/pkg/config/constants" "istio.io/istio/pkg/config/host" "istio.io/istio/pkg/config/labels" "istio.io/istio/pkg/config/schema/gvk" "istio.io/pkg/log" ) var _ serviceregistry.Instance = &ServiceEntryStore{} // instancesKey acts as a key to identify all instances for a given hostname/namespace pair // This is mostly used as an index type instancesKey struct { hostname host.Name namespace string } func makeInstanceKey(i *model.ServiceInstance) instancesKey { return instancesKey{i.Service.Hostname, i.Service.Attributes.Namespace} } type externalConfigType int const ( serviceEntryConfigType externalConfigType = iota workloadEntryConfigType workloadInstanceConfigType ) // configKey unique identifies a config object managed by this registry (ServiceEntry and WorkloadEntry) type configKey struct { kind externalConfigType name string namespace string } // ServiceEntryStore communicates with ServiceEntry CRDs and monitors for changes type ServiceEntryStore struct { // nolint:golint XdsUpdater model.XDSUpdater store model.IstioConfigStore clusterID string storeMutex sync.RWMutex ip2instance map[string][]*model.ServiceInstance // Endpoints table instances map[instancesKey]map[configKey][]*model.ServiceInstance // workload instances from kubernetes pods - map of ip -> workload instance workloadInstancesByIP map[string]*model.WorkloadInstance // Stores a map of workload instance name/namespace to address workloadInstancesIPsByName map[string]string // seWithSelectorByNamespace keeps track of ServiceEntries with selectors, keyed by namespaces seWithSelectorByNamespace map[string][]servicesWithEntry // services keeps track of all services - mainly used to return from Services() to avoid reconversion. services []*model.Service refreshIndexes *atomic.Bool workloadHandlers []func(*model.WorkloadInstance, model.Event) processServiceEntry bool } type ServiceDiscoveryOption func(*ServiceEntryStore) func DisableServiceEntryProcessing() ServiceDiscoveryOption { return func(o *ServiceEntryStore) { o.processServiceEntry = false } } func WithClusterID(clusterID string) ServiceDiscoveryOption { return func(o *ServiceEntryStore) { o.clusterID = clusterID } } // NewServiceDiscovery creates a new ServiceEntry discovery service func NewServiceDiscovery( configController model.ConfigStoreCache, store model.IstioConfigStore, xdsUpdater model.XDSUpdater, options ...ServiceDiscoveryOption, ) *ServiceEntryStore { s := &ServiceEntryStore{ XdsUpdater: xdsUpdater, store: store, ip2instance: map[string][]*model.ServiceInstance{}, instances: map[instancesKey]map[configKey][]*model.ServiceInstance{}, workloadInstancesByIP: map[string]*model.WorkloadInstance{}, workloadInstancesIPsByName: map[string]string{}, refreshIndexes: atomic.NewBool(true), processServiceEntry: true, } for _, o := range options { o(s) } if configController != nil { if s.processServiceEntry { configController.RegisterEventHandler(gvk.ServiceEntry, s.serviceEntryHandler) } configController.RegisterEventHandler(gvk.WorkloadEntry, s.workloadEntryHandler) _ = configController.SetWatchErrorHandler(informermetric.ErrorHandlerForCluster(s.clusterID)) } return s } // workloadEntryHandler defines the handler for workload entries // kube registry controller also calls this function indirectly via the Share interface // When invoked via the kube registry controller, the old object is nil as the registry // controller does its own deduping and has no notion of object versions func (s *ServiceEntryStore) workloadEntryHandler(old, curr config.Config, event model.Event) { var oldWle *networking.WorkloadEntry if old.Spec != nil { oldWle = old.Spec.(*networking.WorkloadEntry) } wle := curr.Spec.(*networking.WorkloadEntry) key := configKey{ kind: workloadEntryConfigType, name: curr.Name, namespace: curr.Namespace, } // If an entry is unhealthy, we will mark this as a delete instead // This ensures we do not track unhealthy endpoints if features.WorkloadEntryHealthChecks && !isHealthy(curr) { event = model.EventDelete } // fire off the k8s handlers if len(s.workloadHandlers) > 0 { si := convertWorkloadEntryToWorkloadInstance(curr) if si != nil { for _, h := range s.workloadHandlers { h(si, event) } } } s.storeMutex.RLock() // We will only select entries in the same namespace entries := s.seWithSelectorByNamespace[curr.Namespace] s.storeMutex.RUnlock() // if there are no service entries, return now to avoid taking unnecessary locks if len(entries) == 0 { return } log.Debugf("Handle event %s for workload entry %s in namespace %s", event, curr.Name, curr.Namespace) instancesUpdated := []*model.ServiceInstance{} instancesDeleted := []*model.ServiceInstance{} workloadLabels := labels.Collection{wle.Labels} fullPush := false configsUpdated := map[model.ConfigKey]struct{}{} for _, se := range entries { selected := false if !workloadLabels.IsSupersetOf(se.entry.WorkloadSelector.Labels) { if oldWle != nil { oldWorkloadLabels := labels.Collection{oldWle.Labels} if oldWorkloadLabels.IsSupersetOf(se.entry.WorkloadSelector.Labels) { selected = true instance := convertWorkloadEntryToServiceInstances(oldWle, se.services, se.entry, &key) instancesDeleted = append(instancesDeleted, instance...) } } } else { selected = true instance := convertWorkloadEntryToServiceInstances(wle, se.services, se.entry, &key) instancesUpdated = append(instancesUpdated, instance...) } if selected { // If serviceentry's resolution is DNS, make a full push // TODO: maybe cds? if se.entry.Resolution == networking.ServiceEntry_DNS { fullPush = true for key, value := range getUpdatedConfigs(se.services) { configsUpdated[key] = value } } } } if len(instancesDeleted) > 0 { s.deleteExistingInstances(key, instancesDeleted) } if event != model.EventDelete { s.updateExistingInstances(key, instancesUpdated) } else { s.deleteExistingInstances(key, instancesUpdated) } if !fullPush { s.edsUpdate(append(instancesUpdated, instancesDeleted...), true) // trigger full xds push to the related sidecar proxy if event == model.EventAdd { s.XdsUpdater.ProxyUpdate(s.Cluster(), wle.Address) } return } // update eds cache only s.edsUpdate(append(instancesUpdated, instancesDeleted...), false) pushReq := &model.PushRequest{ Full: true, ConfigsUpdated: configsUpdated, Reason: []model.TriggerReason{model.EndpointUpdate}, } // trigger a full push s.XdsUpdater.ConfigUpdate(pushReq) } // getUpdatedConfigs returns related service entries when full push func getUpdatedConfigs(services []*model.Service) map[model.ConfigKey]struct{} { configsUpdated := map[model.ConfigKey]struct{}{} for _, svc := range services { configsUpdated[model.ConfigKey{ Kind: gvk.ServiceEntry, Name: string(svc.Hostname), Namespace: svc.Attributes.Namespace, }] = struct{}{} } return configsUpdated } // serviceEntryHandler defines the handler for service entries func (s *ServiceEntryStore) serviceEntryHandler(old, curr config.Config, event model.Event) { cs := convertServices(curr) configsUpdated := map[model.ConfigKey]struct{}{} // If it is add/delete event we should always do a full push. If it is update event, we should do full push, // only when services have changed - otherwise, just push endpoint updates. var addedSvcs, deletedSvcs, updatedSvcs, unchangedSvcs []*model.Service switch event { case model.EventUpdate: os := convertServices(old) if selectorChanged(old, curr) { // Consider all services are updated. mark := make(map[host.Name]*model.Service, len(cs)) for _, svc := range cs { mark[svc.Hostname] = svc updatedSvcs = append(updatedSvcs, svc) } for _, svc := range os { if _, f := mark[svc.Hostname]; !f { updatedSvcs = append(updatedSvcs, svc) } } } else { addedSvcs, deletedSvcs, updatedSvcs, unchangedSvcs = servicesDiff(os, cs) } case model.EventDelete: deletedSvcs = cs case model.EventAdd: addedSvcs = cs default: // this should not happen unchangedSvcs = cs } for _, svc := range addedSvcs { s.XdsUpdater.SvcUpdate(s.Cluster(), string(svc.Hostname), svc.Attributes.Namespace, model.EventAdd) configsUpdated[makeConfigKey(svc)] = struct{}{} } for _, svc := range updatedSvcs { s.XdsUpdater.SvcUpdate(s.Cluster(), string(svc.Hostname), svc.Attributes.Namespace, model.EventUpdate) configsUpdated[makeConfigKey(svc)] = struct{}{} } // If service entry is deleted, cleanup endpoint shards for services. for _, svc := range deletedSvcs { s.XdsUpdater.SvcUpdate(s.Cluster(), string(svc.Hostname), svc.Attributes.Namespace, model.EventDelete) configsUpdated[makeConfigKey(svc)] = struct{}{} } if len(unchangedSvcs) > 0 { currentServiceEntry := curr.Spec.(*networking.ServiceEntry) oldServiceEntry := old.Spec.(*networking.ServiceEntry) // If this service entry had endpoints with IPs (i.e. resolution STATIC), then we do EDS update. // If the service entry had endpoints with FQDNs (i.e. resolution DNS), then we need to do // full push (as fqdn endpoints go via strict_dns clusters in cds). // Non DNS service entries are sent via EDS. So we should compare and update if such endpoints change. if currentServiceEntry.Resolution == networking.ServiceEntry_DNS { if !reflect.DeepEqual(currentServiceEntry.Endpoints, oldServiceEntry.Endpoints) { // fqdn endpoints have changed. Need full push for _, svc := range unchangedSvcs { configsUpdated[makeConfigKey(svc)] = struct{}{} } } } } fullPush := len(configsUpdated) > 0 // if not full push needed, at least one service unchanged if !fullPush { // IP endpoints in a STATIC service entry has changed. We need EDS update // If will do full-push, leave the edsUpdate to that. // XXX We should do edsUpdate for all unchangedSvcs since we begin to calculate service // data according to this "configsUpdated" and thus remove the "!willFullPush" condition. instances := convertServiceEntryToInstances(curr, unchangedSvcs) key := configKey{ kind: serviceEntryConfigType, name: curr.Name, namespace: curr.Namespace, } // If only instances have changed, just update the indexes for the changed instances. s.updateExistingInstances(key, instances) s.edsUpdate(instances, true) return } // Recomputing the index here is too expensive - lazy build when it is needed. // Only recompute indexes if services have changed. s.storeMutex.Lock() s.refreshIndexes.Store(true) s.storeMutex.Unlock() // When doing a full push, the non DNS added, updated, unchanged services trigger an eds update // so that endpoint shards are updated. allServices := make([]*model.Service, 0, len(addedSvcs)+len(updatedSvcs)+len(unchangedSvcs)) nonDNSServices := make([]*model.Service, 0, len(addedSvcs)+len(updatedSvcs)+len(unchangedSvcs)) allServices = append(allServices, addedSvcs...) allServices = append(allServices, updatedSvcs...) allServices = append(allServices, unchangedSvcs...) for _, svc := range allServices { if svc.Resolution != model.DNSLB { nonDNSServices = append(nonDNSServices, svc) } } // non dns service instances keys := map[instancesKey]struct{}{} for _, svc := range nonDNSServices { keys[instancesKey{hostname: svc.Hostname, namespace: curr.Namespace}] = struct{}{} } // update eds endpoint shards s.edsUpdateByKeys(keys, false) pushReq := &model.PushRequest{ Full: true, ConfigsUpdated: configsUpdated, Reason: []model.TriggerReason{model.ServiceUpdate}, } s.XdsUpdater.ConfigUpdate(pushReq) } // WorkloadInstanceHandler defines the handler for service instances generated by other registries func (s *ServiceEntryStore) WorkloadInstanceHandler(wi *model.WorkloadInstance, event model.Event) { key := configKey{ kind: workloadInstanceConfigType, name: wi.Name, namespace: wi.Namespace, } // Used to indicate if this event was fired for a pod->workloadentry conversion // and that the event can be ignored due to no relevant change in the workloadentry redundantEventForPod := false var addressToDelete string s.storeMutex.Lock() // this is from a pod. Store it in separate map so that // the refreshIndexes function can use these as well as the store ones. k := wi.Namespace + "/" + wi.Name switch event { case model.EventDelete: if _, exists := s.workloadInstancesByIP[wi.Endpoint.Address]; !exists { // multiple delete events for the same pod (succeeded/failed/unknown status repeating). redundantEventForPod = true } else { delete(s.workloadInstancesByIP, wi.Endpoint.Address) delete(s.workloadInstancesIPsByName, k) } default: // add or update // Check to see if the workload entry changed. If it did, clear the old entry existing := s.workloadInstancesIPsByName[k] if existing != "" && existing != wi.Endpoint.Address { delete(s.workloadInstancesByIP, existing) addressToDelete = existing } if old, exists := s.workloadInstancesByIP[wi.Endpoint.Address]; exists { // If multiple k8s services select the same pod or a service has multiple ports, // we may be getting multiple events ignore them as we only care about the Endpoint IP itself. if model.WorkloadInstancesEqual(old, wi) { // ignore the update as nothing has changed redundantEventForPod = true } } s.workloadInstancesByIP[wi.Endpoint.Address] = wi s.workloadInstancesIPsByName[k] = wi.Endpoint.Address } // We will only select entries in the same namespace entries := s.seWithSelectorByNamespace[wi.Namespace] s.storeMutex.Unlock() // nothing useful to do. if len(entries) == 0 || redundantEventForPod { return } log.Debugf("Handle event %s for service instance (from %s) in namespace %s", event, wi.Endpoint.Address, wi.Namespace) instances := []*model.ServiceInstance{} instancesDeleted := []*model.ServiceInstance{} for _, se := range entries { workloadLabels := labels.Collection{wi.Endpoint.Labels} if !workloadLabels.IsSupersetOf(se.entry.WorkloadSelector.Labels) { // Not a match, skip this one continue } instance := convertWorkloadInstanceToServiceInstance(wi.Endpoint, se.services, se.entry) instances = append(instances, instance...) if addressToDelete != "" { for _, i := range instance { di := i.DeepCopy() di.Endpoint.Address = addressToDelete instancesDeleted = append(instancesDeleted, di) } } } if len(instancesDeleted) > 0 { s.deleteExistingInstances(key, instancesDeleted) } if event != model.EventDelete { s.updateExistingInstances(key, instances) } else { s.deleteExistingInstances(key, instances) } s.edsUpdate(instances, true) } func (s *ServiceEntryStore) Provider() serviceregistry.ProviderID { return serviceregistry.External } func (s *ServiceEntryStore) Cluster() string { // DO NOT ASSIGN CLUSTER ID to non-k8s registries. This will prevent service entries with multiple // VIPs or CIDR ranges in the address field return "" } // AppendServiceHandler adds service resource event handler. Service Entries does not use these handlers. func (s *ServiceEntryStore) AppendServiceHandler(_ func(*model.Service, model.Event)) {} // AppendWorkloadHandler adds instance event handler. Service Entries does not use these handlers. func (s *ServiceEntryStore) AppendWorkloadHandler(h func(*model.WorkloadInstance, model.Event)) { s.workloadHandlers = append(s.workloadHandlers, h) } // Run is used by some controllers to execute background jobs after init is done. func (s *ServiceEntryStore) Run(_ <-chan struct{}) {} // HasSynced always returns true for SE func (s *ServiceEntryStore) HasSynced() bool { return true } // Services list declarations of all services in the system func (s *ServiceEntryStore) Services() ([]*model.Service, error) { if !s.processServiceEntry { return nil, nil } s.maybeRefreshIndexes() s.storeMutex.RLock() defer s.storeMutex.RUnlock() return autoAllocateIPs(s.services), nil } // GetService retrieves a service by host name if it exists. // NOTE: This does not auto allocate IPs. The service entry implementation is used only for tests. func (s *ServiceEntryStore) GetService(hostname host.Name) (*model.Service, error) { if !s.processServiceEntry { return nil, nil } services, _ := s.Services() for _, service := range services { if service.Hostname == hostname { return service, nil } } return nil, nil } // InstancesByPort retrieves instances for a service on the given ports with labels that // match any of the supplied labels. All instances match an empty tag list. func (s *ServiceEntryStore) InstancesByPort(svc *model.Service, port int, labels labels.Collection) []*model.ServiceInstance { s.maybeRefreshIndexes() out := make([]*model.ServiceInstance, 0) s.storeMutex.RLock() defer s.storeMutex.RUnlock() instanceLists := s.instances[instancesKey{svc.Hostname, svc.Attributes.Namespace}] for _, instances := range instanceLists { for _, instance := range instances { if instance.Service.Hostname == svc.Hostname && labels.HasSubsetOf(instance.Endpoint.Labels) && portMatchSingle(instance, port) { out = append(out, instance) } } } return out } // servicesWithEntry contains a ServiceEntry and associated model.Services type servicesWithEntry struct { entry *networking.ServiceEntry services []*model.Service } // ResyncEDS will do a full EDS update. This is needed for some tests where we have many configs loaded without calling // the config handlers. // This should probably not be used in production code. func (s *ServiceEntryStore) ResyncEDS() { s.maybeRefreshIndexes() allInstances := []*model.ServiceInstance{} s.storeMutex.RLock() for _, imap := range s.instances { for _, i := range imap { allInstances = append(allInstances, i...) } } s.storeMutex.RUnlock() s.edsUpdate(allInstances, true) } // edsUpdate triggers an EDS cache update for the given instances. // And triggers a push if `push` is true. func (s *ServiceEntryStore) edsUpdate(instances []*model.ServiceInstance, push bool) { // must call it here to refresh s.instances if necessary // otherwise may get no instances or miss some new addes instances s.maybeRefreshIndexes() // Find all keys we need to lookup keys := map[instancesKey]struct{}{} for _, i := range instances { keys[makeInstanceKey(i)] = struct{}{} } s.edsUpdateByKeys(keys, push) } func (s *ServiceEntryStore) edsUpdateByKeys(keys map[instancesKey]struct{}, push bool) { // must call it here to refresh s.instances if necessary // otherwise may get no instances or miss some new addess instances s.maybeRefreshIndexes() allInstances := []*model.ServiceInstance{} s.storeMutex.RLock() for key := range keys { for _, i := range s.instances[key] { allInstances = append(allInstances, i...) } } s.storeMutex.RUnlock() // This was a delete if len(allInstances) == 0 { if push { for k := range keys { s.XdsUpdater.EDSUpdate(s.Cluster(), string(k.hostname), k.namespace, nil) } } else { for k := range keys { s.XdsUpdater.EDSCacheUpdate(s.Cluster(), string(k.hostname), k.namespace, nil) } } return } endpoints := make(map[instancesKey][]*model.IstioEndpoint) for _, instance := range allInstances { port := instance.ServicePort key := makeInstanceKey(instance) endpoints[key] = append(endpoints[key], &model.IstioEndpoint{ Address: instance.Endpoint.Address, EndpointPort: instance.Endpoint.EndpointPort, ServicePortName: port.Name, Labels: instance.Endpoint.Labels, ServiceAccount: instance.Endpoint.ServiceAccount, Network: instance.Endpoint.Network, Locality: instance.Endpoint.Locality, LbWeight: instance.Endpoint.LbWeight, TLSMode: instance.Endpoint.TLSMode, WorkloadName: instance.Endpoint.WorkloadName, Namespace: instance.Endpoint.Namespace, }) } if push { for k, eps := range endpoints { s.XdsUpdater.EDSUpdate(s.Cluster(), string(k.hostname), k.namespace, eps) } } else { for k, eps := range endpoints { s.XdsUpdater.EDSCacheUpdate(s.Cluster(), string(k.hostname), k.namespace, eps) } } } // maybeRefreshIndexes will iterate all ServiceEntries, convert to ServiceInstance (expensive), // and populate the 'by host' and 'by ip' maps, if needed. func (s *ServiceEntryStore) maybeRefreshIndexes() { // We need to take a full lock here, rather than just a read lock and then later updating s.instances // otherwise, what may happen is both the refresh thread and workload entry/pod handler both generate their own // view of s.instances and then write them, leading to inconsistent state. This lock ensures that both threads do // a full R+W before the other can start, rather than R,R,W,W. s.storeMutex.Lock() defer s.storeMutex.Unlock() // Without this pilot becomes very unstable even with few 100 ServiceEntry objects // - the N_clusters * N_update generates too much garbage ( yaml to proto) // This is reset on any change in ServiceEntries that needs index recomputation. if !s.refreshIndexes.Load() { return } defer s.refreshIndexes.Store(false) instanceMap := map[instancesKey]map[configKey][]*model.ServiceInstance{} ip2instances := map[string][]*model.ServiceInstance{} // First refresh service entry seWithSelectorByNamespace := map[string][]servicesWithEntry{} allServices := []*model.Service{} if s.processServiceEntry { for _, cfg := range s.store.ServiceEntries() { key := configKey{ kind: serviceEntryConfigType, name: cfg.Name, namespace: cfg.Namespace, } updateInstances(key, convertServiceEntryToInstances(cfg, nil), instanceMap, ip2instances) services := convertServices(cfg) se := cfg.Spec.(*networking.ServiceEntry) // If we have a workload selector, we will add all instances from WorkloadEntries. Otherwise, we continue if se.WorkloadSelector != nil { seWithSelectorByNamespace[cfg.Namespace] = append(seWithSelectorByNamespace[cfg.Namespace], servicesWithEntry{se, services}) } allServices = append(allServices, services...) } } // Second, refresh workload instances(pods) for _, workloadInstance := range s.workloadInstancesByIP { key := configKey{ kind: workloadInstanceConfigType, name: workloadInstance.Name, namespace: workloadInstance.Namespace, } instances := []*model.ServiceInstance{} // We will only select entries in the same namespace entries := seWithSelectorByNamespace[workloadInstance.Namespace] for _, se := range entries { workloadLabels := labels.Collection{workloadInstance.Endpoint.Labels} if !workloadLabels.IsSupersetOf(se.entry.WorkloadSelector.Labels) { // Not a match, skip this one continue } instance := convertWorkloadInstanceToServiceInstance(workloadInstance.Endpoint, se.services, se.entry) instances = append(instances, instance...) } updateInstances(key, instances, instanceMap, ip2instances) } // Third, refresh workload entry wles, err := s.store.List(gvk.WorkloadEntry, model.NamespaceAll) if err != nil { log.Errorf("Error listing workload entries: %v", err) } for _, wcfg := range wles { wle := wcfg.Spec.(*networking.WorkloadEntry) key := configKey{ kind: workloadEntryConfigType, name: wcfg.Name, namespace: wcfg.Namespace, } // We will only select entries in the same namespace entries := seWithSelectorByNamespace[wcfg.Namespace] for _, se := range entries { workloadLabels := labels.Collection{wle.Labels} if !workloadLabels.IsSupersetOf(se.entry.WorkloadSelector.Labels) { // Not a match, skip this one continue } updateInstances(key, convertWorkloadEntryToServiceInstances(wle, se.services, se.entry, &key), instanceMap, ip2instances) } } s.seWithSelectorByNamespace = seWithSelectorByNamespace s.services = allServices s.instances = instanceMap s.ip2instance = ip2instances } func (s *ServiceEntryStore) deleteExistingInstances(ckey configKey, instances []*model.ServiceInstance) { s.storeMutex.Lock() defer s.storeMutex.Unlock() deleteInstances(ckey, instances, s.instances, s.ip2instance) } // This method is not concurrent safe. func deleteInstances(key configKey, instances []*model.ServiceInstance, instanceMap map[instancesKey]map[configKey][]*model.ServiceInstance, ip2instance map[string][]*model.ServiceInstance) { for _, i := range instances { delete(instanceMap[makeInstanceKey(i)], key) delete(ip2instance, i.Endpoint.Address) } } // updateExistingInstances updates the indexes (by host, byip maps) for the passed in instances. func (s *ServiceEntryStore) updateExistingInstances(ckey configKey, instances []*model.ServiceInstance) { s.storeMutex.Lock() defer s.storeMutex.Unlock() // First, delete the existing instances to avoid leaking memory. deleteInstances(ckey, instances, s.instances, s.ip2instance) // Update the indexes with new instances. updateInstances(ckey, instances, s.instances, s.ip2instance) } // updateInstances updates the instance data to the passed in maps. // This is not concurrent safe. func updateInstances(key configKey, instances []*model.ServiceInstance, instanceMap map[instancesKey]map[configKey][]*model.ServiceInstance, ip2instance map[string][]*model.ServiceInstance) { for _, instance := range instances { ikey := makeInstanceKey(instance) if _, f := instanceMap[ikey]; !f { instanceMap[ikey] = map[configKey][]*model.ServiceInstance{} } instanceMap[ikey][key] = append(instanceMap[ikey][key], instance) ip2instance[instance.Endpoint.Address] = append(ip2instance[instance.Endpoint.Address], instance) } } // returns true if an instance's port matches with any in the provided list func portMatchSingle(instance *model.ServiceInstance, port int) bool { return port == 0 || port == instance.ServicePort.Port } // GetProxyServiceInstances lists service instances co-located with a given proxy // NOTE: The service objects in these instances do not have the auto allocated IP set. func (s *ServiceEntryStore) GetProxyServiceInstances(node *model.Proxy) []*model.ServiceInstance { s.maybeRefreshIndexes() s.storeMutex.RLock() defer s.storeMutex.RUnlock() out := make([]*model.ServiceInstance, 0) for _, ip := range node.IPAddresses { instances, found := s.ip2instance[ip] if found { out = append(out, instances...) } } return out } func (s *ServiceEntryStore) GetProxyWorkloadLabels(proxy *model.Proxy) labels.Collection { s.maybeRefreshIndexes() s.storeMutex.RLock() defer s.storeMutex.RUnlock() out := make(labels.Collection, 0) for _, ip := range proxy.IPAddresses { instances, found := s.ip2instance[ip] if found { for _, instance := range instances { out = append(out, instance.Endpoint.Labels) } } } return out } // GetIstioServiceAccounts implements model.ServiceAccounts operation // For service entries using workload entries or mix of workload entries and pods, // this function returns the appropriate service accounts used by these. func (s *ServiceEntryStore) GetIstioServiceAccounts(svc *model.Service, ports []int) []string { // service entries with built in endpoints have SANs as a dedicated field. // Those with selector labels will have service accounts embedded inside workloadEntries and pods as well. return model.GetServiceAccounts(svc, ports, s) } func (s *ServiceEntryStore) NetworkGateways() map[string][]*model.Gateway { // TODO implement mesh networks loading logic from kube controller if needed return nil } func servicesDiff(os []*model.Service, ns []*model.Service) ([]*model.Service, []*model.Service, []*model.Service, []*model.Service) { var added, deleted, updated, unchanged []*model.Service oldServiceHosts := make(map[string]*model.Service, len(os)) newServiceHosts := make(map[string]*model.Service, len(ns)) for _, s := range os { oldServiceHosts[string(s.Hostname)] = s } for _, s := range ns { newServiceHosts[string(s.Hostname)] = s } for name, oldSvc := range oldServiceHosts { newSvc, f := newServiceHosts[name] if !f { deleted = append(deleted, oldSvc) } else if !reflect.DeepEqual(oldSvc, newSvc) { updated = append(updated, newSvc) } else { unchanged = append(unchanged, newSvc) } } for name, newSvc := range newServiceHosts { if _, f := oldServiceHosts[name]; !f { added = append(added, newSvc) } } return added, deleted, updated, unchanged } // This method compares if the selector on a service entry has changed, meaning that it needs full push. func selectorChanged(old, curr config.Config) bool { o := old.Spec.(*networking.ServiceEntry) n := curr.Spec.(*networking.ServiceEntry) return !reflect.DeepEqual(o.WorkloadSelector, n.WorkloadSelector) } // Automatically allocates IPs for service entry services WITHOUT an // address field if the hostname is not a wildcard, or when resolution // is not NONE. The IPs are allocated from the reserved Class E subnet // (240.240.0.0/16) that is not reachable outside the pod. When DNS // capture is enabled, Envoy will resolve the DNS to these IPs. The // listeners for TCP services will also be set up on these IPs. The // IPs allocated to a service entry may differ from istiod to istiod // but it does not matter because these IPs only affect the listener // IPs on a given proxy managed by a given istiod. // // NOTE: If DNS capture is not enabled by the proxy, the automatically // allocated IP addresses do not take effect. // // The current algorithm to allocate IPs is deterministic across all istiods. // At stable state, given two istiods with exact same set of services, there should // be no change in XDS as the algorithm is just a dumb iterative one that allocates sequentially. // // TODO: Rather than sequentially allocate IPs, switch to a hash based allocation mechanism so that // deletion of the oldest service entry does not cause change of IPs for all other service entries. // Currently, the sequential allocation will result in unnecessary XDS reloads (lds/rds) when a // service entry with auto allocated IP is deleted. We are trading off a perf problem (xds reload) // for a usability problem (e.g., multiple cloud SQL or AWS RDS tcp services with no VIPs end up having // the same port, causing traffic to go to the wrong place). Once we move to a deterministic hash-based // allocation with deterministic collision resolution, the perf problem will go away. If the collision guarantee // cannot be made within the IP address space we have (which is about 64K services), then we may need to // have the sequential allocation algorithm as a fallback when too many collisions take place. func autoAllocateIPs(services []*model.Service) []*model.Service { // i is everything from 240.240.0.(j) to 240.240.255.(j) // j is everything from 240.240.(i).1 to 240.240.(i).254 // we can capture this in one integer variable. // given X, we can compute i by X/255, and j is X%255 // To avoid allocating 240.240.(i).255, if X % 255 is 0, increment X. // For example, when X=510, the resulting IP would be 240.240.2.0 (invalid) // So we bump X to 511, so that the resulting IP is 240.240.2.1 maxIPs := 255 * 255 // are we going to exceeed this limit by processing 64K services? x := 0 for _, svc := range services { // we can allocate IPs only if // 1. the service has resolution set to static/dns. We cannot allocate // for NONE because we will not know the original DST IP that the application requested. // 2. the address is not set (0.0.0.0) // 3. the hostname is not a wildcard if svc.Address == constants.UnspecifiedIP && !svc.Hostname.IsWildCarded() && svc.Resolution != model.Passthrough { x++ if x%255 == 0 { x++ } if x >= maxIPs { log.Errorf("out of IPs to allocate for service entries") return services } thirdOctet := x / 255 fourthOctet := x % 255 svc.AutoAllocatedAddress = fmt.Sprintf("240.240.%d.%d", thirdOctet, fourthOctet) } } return services } func makeConfigKey(svc *model.Service) model.ConfigKey { return model.ConfigKey{ Kind: gvk.ServiceEntry, Name: string(svc.Hostname), Namespace: svc.Attributes.Namespace, } } // isHealthy checks that the provided WorkloadEntry is healthy. If health checks are not enabled, // it is assumed to always be healthy func isHealthy(cfg config.Config) bool { if parseHealthAnnotation(cfg.Annotations[status.WorkloadEntryHealthCheckAnnotation]) { // We default to false if the condition is not set. This ensures newly created WorkloadEntries // are treated as unhealthy until we prove they are healthy by probe success. return status.GetBoolConditionFromSpec(cfg, status.ConditionHealthy, false) } // If health check is not enabled, assume its healthy return true } func parseHealthAnnotation(s string) bool { if s == "" { return false } p, err := strconv.ParseBool(s) if err != nil { return false } return p } Fix Comments (#32851) // Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package serviceentry import ( "fmt" "reflect" "strconv" "sync" "go.uber.org/atomic" networking "istio.io/api/networking/v1alpha3" "istio.io/istio/pilot/pkg/features" "istio.io/istio/pilot/pkg/model" "istio.io/istio/pilot/pkg/model/status" "istio.io/istio/pilot/pkg/serviceregistry" "istio.io/istio/pilot/pkg/util/informermetric" "istio.io/istio/pkg/config" "istio.io/istio/pkg/config/constants" "istio.io/istio/pkg/config/host" "istio.io/istio/pkg/config/labels" "istio.io/istio/pkg/config/schema/gvk" "istio.io/pkg/log" ) var _ serviceregistry.Instance = &ServiceEntryStore{} // instancesKey acts as a key to identify all instances for a given hostname/namespace pair // This is mostly used as an index type instancesKey struct { hostname host.Name namespace string } func makeInstanceKey(i *model.ServiceInstance) instancesKey { return instancesKey{i.Service.Hostname, i.Service.Attributes.Namespace} } type externalConfigType int const ( serviceEntryConfigType externalConfigType = iota workloadEntryConfigType workloadInstanceConfigType ) // configKey unique identifies a config object managed by this registry (ServiceEntry and WorkloadEntry) type configKey struct { kind externalConfigType name string namespace string } // ServiceEntryStore communicates with ServiceEntry CRDs and monitors for changes type ServiceEntryStore struct { // nolint:golint XdsUpdater model.XDSUpdater store model.IstioConfigStore clusterID string storeMutex sync.RWMutex ip2instance map[string][]*model.ServiceInstance // Endpoints table instances map[instancesKey]map[configKey][]*model.ServiceInstance // workload instances from kubernetes pods - map of ip -> workload instance workloadInstancesByIP map[string]*model.WorkloadInstance // Stores a map of workload instance name/namespace to address workloadInstancesIPsByName map[string]string // seWithSelectorByNamespace keeps track of ServiceEntries with selectors, keyed by namespaces seWithSelectorByNamespace map[string][]servicesWithEntry // services keeps track of all services - mainly used to return from Services() to avoid reconversion. services []*model.Service refreshIndexes *atomic.Bool workloadHandlers []func(*model.WorkloadInstance, model.Event) processServiceEntry bool } type ServiceDiscoveryOption func(*ServiceEntryStore) func DisableServiceEntryProcessing() ServiceDiscoveryOption { return func(o *ServiceEntryStore) { o.processServiceEntry = false } } func WithClusterID(clusterID string) ServiceDiscoveryOption { return func(o *ServiceEntryStore) { o.clusterID = clusterID } } // NewServiceDiscovery creates a new ServiceEntry discovery service func NewServiceDiscovery( configController model.ConfigStoreCache, store model.IstioConfigStore, xdsUpdater model.XDSUpdater, options ...ServiceDiscoveryOption, ) *ServiceEntryStore { s := &ServiceEntryStore{ XdsUpdater: xdsUpdater, store: store, ip2instance: map[string][]*model.ServiceInstance{}, instances: map[instancesKey]map[configKey][]*model.ServiceInstance{}, workloadInstancesByIP: map[string]*model.WorkloadInstance{}, workloadInstancesIPsByName: map[string]string{}, refreshIndexes: atomic.NewBool(true), processServiceEntry: true, } for _, o := range options { o(s) } if configController != nil { if s.processServiceEntry { configController.RegisterEventHandler(gvk.ServiceEntry, s.serviceEntryHandler) } configController.RegisterEventHandler(gvk.WorkloadEntry, s.workloadEntryHandler) _ = configController.SetWatchErrorHandler(informermetric.ErrorHandlerForCluster(s.clusterID)) } return s } // workloadEntryHandler defines the handler for workload entries // kube registry controller also calls this function indirectly via the Share interface // When invoked via the kube registry controller, the old object is nil as the registry // controller does its own deduping and has no notion of object versions func (s *ServiceEntryStore) workloadEntryHandler(old, curr config.Config, event model.Event) { var oldWle *networking.WorkloadEntry if old.Spec != nil { oldWle = old.Spec.(*networking.WorkloadEntry) } wle := curr.Spec.(*networking.WorkloadEntry) key := configKey{ kind: workloadEntryConfigType, name: curr.Name, namespace: curr.Namespace, } // If an entry is unhealthy, we will mark this as a delete instead // This ensures we do not track unhealthy endpoints if features.WorkloadEntryHealthChecks && !isHealthy(curr) { event = model.EventDelete } // fire off the k8s handlers if len(s.workloadHandlers) > 0 { si := convertWorkloadEntryToWorkloadInstance(curr) if si != nil { for _, h := range s.workloadHandlers { h(si, event) } } } s.storeMutex.RLock() // We will only select entries in the same namespace entries := s.seWithSelectorByNamespace[curr.Namespace] s.storeMutex.RUnlock() // if there are no service entries, return now to avoid taking unnecessary locks if len(entries) == 0 { return } log.Debugf("Handle event %s for workload entry %s in namespace %s", event, curr.Name, curr.Namespace) instancesUpdated := []*model.ServiceInstance{} instancesDeleted := []*model.ServiceInstance{} workloadLabels := labels.Collection{wle.Labels} fullPush := false configsUpdated := map[model.ConfigKey]struct{}{} for _, se := range entries { selected := false if !workloadLabels.IsSupersetOf(se.entry.WorkloadSelector.Labels) { if oldWle != nil { oldWorkloadLabels := labels.Collection{oldWle.Labels} if oldWorkloadLabels.IsSupersetOf(se.entry.WorkloadSelector.Labels) { selected = true instance := convertWorkloadEntryToServiceInstances(oldWle, se.services, se.entry, &key) instancesDeleted = append(instancesDeleted, instance...) } } } else { selected = true instance := convertWorkloadEntryToServiceInstances(wle, se.services, se.entry, &key) instancesUpdated = append(instancesUpdated, instance...) } if selected { // If serviceentry's resolution is DNS, make a full push // TODO: maybe cds? if se.entry.Resolution == networking.ServiceEntry_DNS { fullPush = true for key, value := range getUpdatedConfigs(se.services) { configsUpdated[key] = value } } } } if len(instancesDeleted) > 0 { s.deleteExistingInstances(key, instancesDeleted) } if event != model.EventDelete { s.updateExistingInstances(key, instancesUpdated) } else { s.deleteExistingInstances(key, instancesUpdated) } if !fullPush { s.edsUpdate(append(instancesUpdated, instancesDeleted...), true) // trigger full xds push to the related sidecar proxy if event == model.EventAdd { s.XdsUpdater.ProxyUpdate(s.Cluster(), wle.Address) } return } // update eds cache only s.edsUpdate(append(instancesUpdated, instancesDeleted...), false) pushReq := &model.PushRequest{ Full: true, ConfigsUpdated: configsUpdated, Reason: []model.TriggerReason{model.EndpointUpdate}, } // trigger a full push s.XdsUpdater.ConfigUpdate(pushReq) } // getUpdatedConfigs returns related service entries when full push func getUpdatedConfigs(services []*model.Service) map[model.ConfigKey]struct{} { configsUpdated := map[model.ConfigKey]struct{}{} for _, svc := range services { configsUpdated[model.ConfigKey{ Kind: gvk.ServiceEntry, Name: string(svc.Hostname), Namespace: svc.Attributes.Namespace, }] = struct{}{} } return configsUpdated } // serviceEntryHandler defines the handler for service entries func (s *ServiceEntryStore) serviceEntryHandler(old, curr config.Config, event model.Event) { cs := convertServices(curr) configsUpdated := map[model.ConfigKey]struct{}{} // If it is add/delete event we should always do a full push. If it is update event, we should do full push, // only when services have changed - otherwise, just push endpoint updates. var addedSvcs, deletedSvcs, updatedSvcs, unchangedSvcs []*model.Service switch event { case model.EventUpdate: os := convertServices(old) if selectorChanged(old, curr) { // Consider all services are updated. mark := make(map[host.Name]*model.Service, len(cs)) for _, svc := range cs { mark[svc.Hostname] = svc updatedSvcs = append(updatedSvcs, svc) } for _, svc := range os { if _, f := mark[svc.Hostname]; !f { updatedSvcs = append(updatedSvcs, svc) } } } else { addedSvcs, deletedSvcs, updatedSvcs, unchangedSvcs = servicesDiff(os, cs) } case model.EventDelete: deletedSvcs = cs case model.EventAdd: addedSvcs = cs default: // this should not happen unchangedSvcs = cs } for _, svc := range addedSvcs { s.XdsUpdater.SvcUpdate(s.Cluster(), string(svc.Hostname), svc.Attributes.Namespace, model.EventAdd) configsUpdated[makeConfigKey(svc)] = struct{}{} } for _, svc := range updatedSvcs { s.XdsUpdater.SvcUpdate(s.Cluster(), string(svc.Hostname), svc.Attributes.Namespace, model.EventUpdate) configsUpdated[makeConfigKey(svc)] = struct{}{} } // If service entry is deleted, cleanup endpoint shards for services. for _, svc := range deletedSvcs { s.XdsUpdater.SvcUpdate(s.Cluster(), string(svc.Hostname), svc.Attributes.Namespace, model.EventDelete) configsUpdated[makeConfigKey(svc)] = struct{}{} } if len(unchangedSvcs) > 0 { currentServiceEntry := curr.Spec.(*networking.ServiceEntry) oldServiceEntry := old.Spec.(*networking.ServiceEntry) // If this service entry had endpoints with IPs (i.e. resolution STATIC), then we do EDS update. // If the service entry had endpoints with FQDNs (i.e. resolution DNS), then we need to do // full push (as fqdn endpoints go via strict_dns clusters in cds). // Non DNS service entries are sent via EDS. So we should compare and update if such endpoints change. if currentServiceEntry.Resolution == networking.ServiceEntry_DNS { if !reflect.DeepEqual(currentServiceEntry.Endpoints, oldServiceEntry.Endpoints) { // fqdn endpoints have changed. Need full push for _, svc := range unchangedSvcs { configsUpdated[makeConfigKey(svc)] = struct{}{} } } } } fullPush := len(configsUpdated) > 0 // if not full push needed, at least one service unchanged if !fullPush { // IP endpoints in a STATIC service entry has changed. We need EDS update // If will do full-push, leave the edsUpdate to that. // XXX We should do edsUpdate for all unchangedSvcs since we begin to calculate service // data according to this "configsUpdated" and thus remove the "!willFullPush" condition. instances := convertServiceEntryToInstances(curr, unchangedSvcs) key := configKey{ kind: serviceEntryConfigType, name: curr.Name, namespace: curr.Namespace, } // If only instances have changed, just update the indexes for the changed instances. s.updateExistingInstances(key, instances) s.edsUpdate(instances, true) return } // Recomputing the index here is too expensive - lazy build when it is needed. // Only recompute indexes if services have changed. s.storeMutex.Lock() s.refreshIndexes.Store(true) s.storeMutex.Unlock() // When doing a full push, the non DNS added, updated, unchanged services trigger an eds update // so that endpoint shards are updated. allServices := make([]*model.Service, 0, len(addedSvcs)+len(updatedSvcs)+len(unchangedSvcs)) nonDNSServices := make([]*model.Service, 0, len(addedSvcs)+len(updatedSvcs)+len(unchangedSvcs)) allServices = append(allServices, addedSvcs...) allServices = append(allServices, updatedSvcs...) allServices = append(allServices, unchangedSvcs...) for _, svc := range allServices { if svc.Resolution != model.DNSLB { nonDNSServices = append(nonDNSServices, svc) } } // non dns service instances keys := map[instancesKey]struct{}{} for _, svc := range nonDNSServices { keys[instancesKey{hostname: svc.Hostname, namespace: curr.Namespace}] = struct{}{} } // update eds endpoint shards s.edsUpdateByKeys(keys, false) pushReq := &model.PushRequest{ Full: true, ConfigsUpdated: configsUpdated, Reason: []model.TriggerReason{model.ServiceUpdate}, } s.XdsUpdater.ConfigUpdate(pushReq) } // WorkloadInstanceHandler defines the handler for service instances generated by other registries func (s *ServiceEntryStore) WorkloadInstanceHandler(wi *model.WorkloadInstance, event model.Event) { key := configKey{ kind: workloadInstanceConfigType, name: wi.Name, namespace: wi.Namespace, } // Used to indicate if this event was fired for a pod->workloadentry conversion // and that the event can be ignored due to no relevant change in the workloadentry redundantEventForPod := false var addressToDelete string s.storeMutex.Lock() // this is from a pod. Store it in separate map so that // the refreshIndexes function can use these as well as the store ones. k := wi.Namespace + "/" + wi.Name switch event { case model.EventDelete: if _, exists := s.workloadInstancesByIP[wi.Endpoint.Address]; !exists { // multiple delete events for the same pod (succeeded/failed/unknown status repeating). redundantEventForPod = true } else { delete(s.workloadInstancesByIP, wi.Endpoint.Address) delete(s.workloadInstancesIPsByName, k) } default: // add or update // Check to see if the workload entry changed. If it did, clear the old entry existing := s.workloadInstancesIPsByName[k] if existing != "" && existing != wi.Endpoint.Address { delete(s.workloadInstancesByIP, existing) addressToDelete = existing } if old, exists := s.workloadInstancesByIP[wi.Endpoint.Address]; exists { // If multiple k8s services select the same pod or a service has multiple ports, // we may be getting multiple events ignore them as we only care about the Endpoint IP itself. if model.WorkloadInstancesEqual(old, wi) { // ignore the update as nothing has changed redundantEventForPod = true } } s.workloadInstancesByIP[wi.Endpoint.Address] = wi s.workloadInstancesIPsByName[k] = wi.Endpoint.Address } // We will only select entries in the same namespace entries := s.seWithSelectorByNamespace[wi.Namespace] s.storeMutex.Unlock() // nothing useful to do. if len(entries) == 0 || redundantEventForPod { return } log.Debugf("Handle event %s for service instance (from %s) in namespace %s", event, wi.Endpoint.Address, wi.Namespace) instances := []*model.ServiceInstance{} instancesDeleted := []*model.ServiceInstance{} for _, se := range entries { workloadLabels := labels.Collection{wi.Endpoint.Labels} if !workloadLabels.IsSupersetOf(se.entry.WorkloadSelector.Labels) { // Not a match, skip this one continue } instance := convertWorkloadInstanceToServiceInstance(wi.Endpoint, se.services, se.entry) instances = append(instances, instance...) if addressToDelete != "" { for _, i := range instance { di := i.DeepCopy() di.Endpoint.Address = addressToDelete instancesDeleted = append(instancesDeleted, di) } } } if len(instancesDeleted) > 0 { s.deleteExistingInstances(key, instancesDeleted) } if event != model.EventDelete { s.updateExistingInstances(key, instances) } else { s.deleteExistingInstances(key, instances) } s.edsUpdate(instances, true) } func (s *ServiceEntryStore) Provider() serviceregistry.ProviderID { return serviceregistry.External } func (s *ServiceEntryStore) Cluster() string { // DO NOT ASSIGN CLUSTER ID to non-k8s registries. This will prevent service entries with multiple // VIPs or CIDR ranges in the address field return "" } // AppendServiceHandler adds service resource event handler. Service Entries does not use these handlers. func (s *ServiceEntryStore) AppendServiceHandler(_ func(*model.Service, model.Event)) {} // AppendWorkloadHandler adds instance event handler. Service Entries does not use these handlers. func (s *ServiceEntryStore) AppendWorkloadHandler(h func(*model.WorkloadInstance, model.Event)) { s.workloadHandlers = append(s.workloadHandlers, h) } // Run is used by some controllers to execute background jobs after init is done. func (s *ServiceEntryStore) Run(_ <-chan struct{}) {} // HasSynced always returns true for SE func (s *ServiceEntryStore) HasSynced() bool { return true } // Services list declarations of all services in the system func (s *ServiceEntryStore) Services() ([]*model.Service, error) { if !s.processServiceEntry { return nil, nil } s.maybeRefreshIndexes() s.storeMutex.RLock() defer s.storeMutex.RUnlock() return autoAllocateIPs(s.services), nil } // GetService retrieves a service by host name if it exists. // NOTE: The service entry implementation is used only for tests. func (s *ServiceEntryStore) GetService(hostname host.Name) (*model.Service, error) { if !s.processServiceEntry { return nil, nil } // TODO(@hzxuzhonghu): only get the specific service instead of converting all the serviceEntries services, _ := s.Services() for _, service := range services { if service.Hostname == hostname { return service, nil } } return nil, nil } // InstancesByPort retrieves instances for a service on the given ports with labels that // match any of the supplied labels. All instances match an empty tag list. func (s *ServiceEntryStore) InstancesByPort(svc *model.Service, port int, labels labels.Collection) []*model.ServiceInstance { s.maybeRefreshIndexes() out := make([]*model.ServiceInstance, 0) s.storeMutex.RLock() defer s.storeMutex.RUnlock() instanceLists := s.instances[instancesKey{svc.Hostname, svc.Attributes.Namespace}] for _, instances := range instanceLists { for _, instance := range instances { if instance.Service.Hostname == svc.Hostname && labels.HasSubsetOf(instance.Endpoint.Labels) && portMatchSingle(instance, port) { out = append(out, instance) } } } return out } // servicesWithEntry contains a ServiceEntry and associated model.Services type servicesWithEntry struct { entry *networking.ServiceEntry services []*model.Service } // ResyncEDS will do a full EDS update. This is needed for some tests where we have many configs loaded without calling // the config handlers. // This should probably not be used in production code. func (s *ServiceEntryStore) ResyncEDS() { s.maybeRefreshIndexes() allInstances := []*model.ServiceInstance{} s.storeMutex.RLock() for _, imap := range s.instances { for _, i := range imap { allInstances = append(allInstances, i...) } } s.storeMutex.RUnlock() s.edsUpdate(allInstances, true) } // edsUpdate triggers an EDS cache update for the given instances. // And triggers a push if `push` is true. func (s *ServiceEntryStore) edsUpdate(instances []*model.ServiceInstance, push bool) { // must call it here to refresh s.instances if necessary // otherwise may get no instances or miss some new addes instances s.maybeRefreshIndexes() // Find all keys we need to lookup keys := map[instancesKey]struct{}{} for _, i := range instances { keys[makeInstanceKey(i)] = struct{}{} } s.edsUpdateByKeys(keys, push) } func (s *ServiceEntryStore) edsUpdateByKeys(keys map[instancesKey]struct{}, push bool) { // must call it here to refresh s.instances if necessary // otherwise may get no instances or miss some new addess instances s.maybeRefreshIndexes() allInstances := []*model.ServiceInstance{} s.storeMutex.RLock() for key := range keys { for _, i := range s.instances[key] { allInstances = append(allInstances, i...) } } s.storeMutex.RUnlock() // This was a delete if len(allInstances) == 0 { if push { for k := range keys { s.XdsUpdater.EDSUpdate(s.Cluster(), string(k.hostname), k.namespace, nil) } } else { for k := range keys { s.XdsUpdater.EDSCacheUpdate(s.Cluster(), string(k.hostname), k.namespace, nil) } } return } endpoints := make(map[instancesKey][]*model.IstioEndpoint) for _, instance := range allInstances { port := instance.ServicePort key := makeInstanceKey(instance) endpoints[key] = append(endpoints[key], &model.IstioEndpoint{ Address: instance.Endpoint.Address, EndpointPort: instance.Endpoint.EndpointPort, ServicePortName: port.Name, Labels: instance.Endpoint.Labels, ServiceAccount: instance.Endpoint.ServiceAccount, Network: instance.Endpoint.Network, Locality: instance.Endpoint.Locality, LbWeight: instance.Endpoint.LbWeight, TLSMode: instance.Endpoint.TLSMode, WorkloadName: instance.Endpoint.WorkloadName, Namespace: instance.Endpoint.Namespace, }) } if push { for k, eps := range endpoints { s.XdsUpdater.EDSUpdate(s.Cluster(), string(k.hostname), k.namespace, eps) } } else { for k, eps := range endpoints { s.XdsUpdater.EDSCacheUpdate(s.Cluster(), string(k.hostname), k.namespace, eps) } } } // maybeRefreshIndexes will iterate all ServiceEntries, convert to ServiceInstance (expensive), // and populate the 'by host' and 'by ip' maps, if needed. func (s *ServiceEntryStore) maybeRefreshIndexes() { // We need to take a full lock here, rather than just a read lock and then later updating s.instances // otherwise, what may happen is both the refresh thread and workload entry/pod handler both generate their own // view of s.instances and then write them, leading to inconsistent state. This lock ensures that both threads do // a full R+W before the other can start, rather than R,R,W,W. s.storeMutex.Lock() defer s.storeMutex.Unlock() // Without this pilot becomes very unstable even with few 100 ServiceEntry objects // - the N_clusters * N_update generates too much garbage ( yaml to proto) // This is reset on any change in ServiceEntries that needs index recomputation. if !s.refreshIndexes.Load() { return } defer s.refreshIndexes.Store(false) instanceMap := map[instancesKey]map[configKey][]*model.ServiceInstance{} ip2instances := map[string][]*model.ServiceInstance{} // First refresh service entry seWithSelectorByNamespace := map[string][]servicesWithEntry{} allServices := []*model.Service{} if s.processServiceEntry { for _, cfg := range s.store.ServiceEntries() { key := configKey{ kind: serviceEntryConfigType, name: cfg.Name, namespace: cfg.Namespace, } updateInstances(key, convertServiceEntryToInstances(cfg, nil), instanceMap, ip2instances) services := convertServices(cfg) se := cfg.Spec.(*networking.ServiceEntry) // If we have a workload selector, we will add all instances from WorkloadEntries. Otherwise, we continue if se.WorkloadSelector != nil { seWithSelectorByNamespace[cfg.Namespace] = append(seWithSelectorByNamespace[cfg.Namespace], servicesWithEntry{se, services}) } allServices = append(allServices, services...) } } // Second, refresh workload instances(pods) for _, workloadInstance := range s.workloadInstancesByIP { key := configKey{ kind: workloadInstanceConfigType, name: workloadInstance.Name, namespace: workloadInstance.Namespace, } instances := []*model.ServiceInstance{} // We will only select entries in the same namespace entries := seWithSelectorByNamespace[workloadInstance.Namespace] for _, se := range entries { workloadLabels := labels.Collection{workloadInstance.Endpoint.Labels} if !workloadLabels.IsSupersetOf(se.entry.WorkloadSelector.Labels) { // Not a match, skip this one continue } instance := convertWorkloadInstanceToServiceInstance(workloadInstance.Endpoint, se.services, se.entry) instances = append(instances, instance...) } updateInstances(key, instances, instanceMap, ip2instances) } // Third, refresh workload entry wles, err := s.store.List(gvk.WorkloadEntry, model.NamespaceAll) if err != nil { log.Errorf("Error listing workload entries: %v", err) } for _, wcfg := range wles { wle := wcfg.Spec.(*networking.WorkloadEntry) key := configKey{ kind: workloadEntryConfigType, name: wcfg.Name, namespace: wcfg.Namespace, } // We will only select entries in the same namespace entries := seWithSelectorByNamespace[wcfg.Namespace] for _, se := range entries { workloadLabels := labels.Collection{wle.Labels} if !workloadLabels.IsSupersetOf(se.entry.WorkloadSelector.Labels) { // Not a match, skip this one continue } updateInstances(key, convertWorkloadEntryToServiceInstances(wle, se.services, se.entry, &key), instanceMap, ip2instances) } } s.seWithSelectorByNamespace = seWithSelectorByNamespace s.services = allServices s.instances = instanceMap s.ip2instance = ip2instances } func (s *ServiceEntryStore) deleteExistingInstances(ckey configKey, instances []*model.ServiceInstance) { s.storeMutex.Lock() defer s.storeMutex.Unlock() deleteInstances(ckey, instances, s.instances, s.ip2instance) } // This method is not concurrent safe. func deleteInstances(key configKey, instances []*model.ServiceInstance, instanceMap map[instancesKey]map[configKey][]*model.ServiceInstance, ip2instance map[string][]*model.ServiceInstance) { for _, i := range instances { delete(instanceMap[makeInstanceKey(i)], key) delete(ip2instance, i.Endpoint.Address) } } // updateExistingInstances updates the indexes (by host, byip maps) for the passed in instances. func (s *ServiceEntryStore) updateExistingInstances(ckey configKey, instances []*model.ServiceInstance) { s.storeMutex.Lock() defer s.storeMutex.Unlock() // First, delete the existing instances to avoid leaking memory. deleteInstances(ckey, instances, s.instances, s.ip2instance) // Update the indexes with new instances. updateInstances(ckey, instances, s.instances, s.ip2instance) } // updateInstances updates the instance data to the passed in maps. // This is not concurrent safe. func updateInstances(key configKey, instances []*model.ServiceInstance, instanceMap map[instancesKey]map[configKey][]*model.ServiceInstance, ip2instance map[string][]*model.ServiceInstance) { for _, instance := range instances { ikey := makeInstanceKey(instance) if _, f := instanceMap[ikey]; !f { instanceMap[ikey] = map[configKey][]*model.ServiceInstance{} } instanceMap[ikey][key] = append(instanceMap[ikey][key], instance) ip2instance[instance.Endpoint.Address] = append(ip2instance[instance.Endpoint.Address], instance) } } // returns true if an instance's port matches with any in the provided list func portMatchSingle(instance *model.ServiceInstance, port int) bool { return port == 0 || port == instance.ServicePort.Port } // GetProxyServiceInstances lists service instances co-located with a given proxy // NOTE: The service objects in these instances do not have the auto allocated IP set. func (s *ServiceEntryStore) GetProxyServiceInstances(node *model.Proxy) []*model.ServiceInstance { s.maybeRefreshIndexes() s.storeMutex.RLock() defer s.storeMutex.RUnlock() out := make([]*model.ServiceInstance, 0) for _, ip := range node.IPAddresses { instances, found := s.ip2instance[ip] if found { out = append(out, instances...) } } return out } func (s *ServiceEntryStore) GetProxyWorkloadLabels(proxy *model.Proxy) labels.Collection { s.maybeRefreshIndexes() s.storeMutex.RLock() defer s.storeMutex.RUnlock() out := make(labels.Collection, 0) for _, ip := range proxy.IPAddresses { instances, found := s.ip2instance[ip] if found { for _, instance := range instances { out = append(out, instance.Endpoint.Labels) } } } return out } // GetIstioServiceAccounts implements model.ServiceAccounts operation // For service entries using workload entries or mix of workload entries and pods, // this function returns the appropriate service accounts used by these. func (s *ServiceEntryStore) GetIstioServiceAccounts(svc *model.Service, ports []int) []string { // service entries with built in endpoints have SANs as a dedicated field. // Those with selector labels will have service accounts embedded inside workloadEntries and pods as well. return model.GetServiceAccounts(svc, ports, s) } func (s *ServiceEntryStore) NetworkGateways() map[string][]*model.Gateway { // TODO implement mesh networks loading logic from kube controller if needed return nil } func servicesDiff(os []*model.Service, ns []*model.Service) ([]*model.Service, []*model.Service, []*model.Service, []*model.Service) { var added, deleted, updated, unchanged []*model.Service oldServiceHosts := make(map[string]*model.Service, len(os)) newServiceHosts := make(map[string]*model.Service, len(ns)) for _, s := range os { oldServiceHosts[string(s.Hostname)] = s } for _, s := range ns { newServiceHosts[string(s.Hostname)] = s } for name, oldSvc := range oldServiceHosts { newSvc, f := newServiceHosts[name] if !f { deleted = append(deleted, oldSvc) } else if !reflect.DeepEqual(oldSvc, newSvc) { updated = append(updated, newSvc) } else { unchanged = append(unchanged, newSvc) } } for name, newSvc := range newServiceHosts { if _, f := oldServiceHosts[name]; !f { added = append(added, newSvc) } } return added, deleted, updated, unchanged } // This method compares if the selector on a service entry has changed, meaning that it needs full push. func selectorChanged(old, curr config.Config) bool { o := old.Spec.(*networking.ServiceEntry) n := curr.Spec.(*networking.ServiceEntry) return !reflect.DeepEqual(o.WorkloadSelector, n.WorkloadSelector) } // Automatically allocates IPs for service entry services WITHOUT an // address field if the hostname is not a wildcard, or when resolution // is not NONE. The IPs are allocated from the reserved Class E subnet // (240.240.0.0/16) that is not reachable outside the pod. When DNS // capture is enabled, Envoy will resolve the DNS to these IPs. The // listeners for TCP services will also be set up on these IPs. The // IPs allocated to a service entry may differ from istiod to istiod // but it does not matter because these IPs only affect the listener // IPs on a given proxy managed by a given istiod. // // NOTE: If DNS capture is not enabled by the proxy, the automatically // allocated IP addresses do not take effect. // // The current algorithm to allocate IPs is deterministic across all istiods. // At stable state, given two istiods with exact same set of services, there should // be no change in XDS as the algorithm is just a dumb iterative one that allocates sequentially. // // TODO: Rather than sequentially allocate IPs, switch to a hash based allocation mechanism so that // deletion of the oldest service entry does not cause change of IPs for all other service entries. // Currently, the sequential allocation will result in unnecessary XDS reloads (lds/rds) when a // service entry with auto allocated IP is deleted. We are trading off a perf problem (xds reload) // for a usability problem (e.g., multiple cloud SQL or AWS RDS tcp services with no VIPs end up having // the same port, causing traffic to go to the wrong place). Once we move to a deterministic hash-based // allocation with deterministic collision resolution, the perf problem will go away. If the collision guarantee // cannot be made within the IP address space we have (which is about 64K services), then we may need to // have the sequential allocation algorithm as a fallback when too many collisions take place. func autoAllocateIPs(services []*model.Service) []*model.Service { // i is everything from 240.240.0.(j) to 240.240.255.(j) // j is everything from 240.240.(i).1 to 240.240.(i).254 // we can capture this in one integer variable. // given X, we can compute i by X/255, and j is X%255 // To avoid allocating 240.240.(i).255, if X % 255 is 0, increment X. // For example, when X=510, the resulting IP would be 240.240.2.0 (invalid) // So we bump X to 511, so that the resulting IP is 240.240.2.1 maxIPs := 255 * 255 // are we going to exceeed this limit by processing 64K services? x := 0 for _, svc := range services { // we can allocate IPs only if // 1. the service has resolution set to static/dns. We cannot allocate // for NONE because we will not know the original DST IP that the application requested. // 2. the address is not set (0.0.0.0) // 3. the hostname is not a wildcard if svc.Address == constants.UnspecifiedIP && !svc.Hostname.IsWildCarded() && svc.Resolution != model.Passthrough { x++ if x%255 == 0 { x++ } if x >= maxIPs { log.Errorf("out of IPs to allocate for service entries") return services } thirdOctet := x / 255 fourthOctet := x % 255 svc.AutoAllocatedAddress = fmt.Sprintf("240.240.%d.%d", thirdOctet, fourthOctet) } } return services } func makeConfigKey(svc *model.Service) model.ConfigKey { return model.ConfigKey{ Kind: gvk.ServiceEntry, Name: string(svc.Hostname), Namespace: svc.Attributes.Namespace, } } // isHealthy checks that the provided WorkloadEntry is healthy. If health checks are not enabled, // it is assumed to always be healthy func isHealthy(cfg config.Config) bool { if parseHealthAnnotation(cfg.Annotations[status.WorkloadEntryHealthCheckAnnotation]) { // We default to false if the condition is not set. This ensures newly created WorkloadEntries // are treated as unhealthy until we prove they are healthy by probe success. return status.GetBoolConditionFromSpec(cfg, status.ConditionHealthy, false) } // If health check is not enabled, assume its healthy return true } func parseHealthAnnotation(s string) bool { if s == "" { return false } p, err := strconv.ParseBool(s) if err != nil { return false } return p }
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" //runtime "k8s.io/apimachinery/pkg/runtime" pb "github.com/kubeflow/katib/pkg/api" ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // StudyJobSpec defines the desired state of StudyJob type StudyJobSpec struct { StudyName string `json:"studyName,omitempty"` Owner string `json:"owner,omitempty"` OptimizationType OptimizationType `json:"optimizationtype,omitempty"` OptimizationGoal *float64 `json:"optimizationgoal,omitempty"` ObjectiveValueName string `json:"objectivevaluename,omitempty"` RequestCount int `json:"requestcount,omitempty"` MetricsNames []string `json:"metricsnames,omitempty"` ParameterConfigs []ParameterConfig `json:"parameterconfigs,omitempty"` WorkerSpec *WorkerSpec `json:"workerSpec,omitempty"` SuggestionSpec *SuggestionSpec `json:"suggestionSpec,omitempty"` EarlyStoppingSpec *EarlyStoppingSpec `json:"earlyStoppingSpec,omitempty"` MetricsCollectorSpec *MetricsCollectorSpec `json:"metricsCollectorSpec,omitempty"` } // StudyJobStatus defines the observed state of StudyJob type StudyJobStatus struct { // Represents time when the StudyJob was acknowledged by the StudyJob controller. // It is not guaranteed to be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. StartTime *metav1.Time `json:"startTime,omitempty"` // Represents time when the StudyJob was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. CompletionTime *metav1.Time `json:"completionTime,omitempty"` // Represents last time when the StudyJob was reconciled. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. LastReconcileTime *metav1.Time `json:"lastReconcileTime,omitempty"` Condition Condition `json:"conditon,omitempty"` StudyID string `json:"studyid,omitempty"` SuggestionParameterID string `json:"suggestionParameterId"` EarlyStoppingParameterID string `json:"earlyStoppingParameterId"` Trials []TrialSet `json:"trials,omitempty"` BestObjectiveValue *float64 `json:"bestObjectiveValue,omitempty"` BestTrialID string `json:"bestTrialId,omitempty"` BestWorkerID string `json:"bestWorkerId,omitempty"` SuggestionCount int `json:"suggestionCount,omitempty"` } type WorkerCondition struct { WorkerID string `json:"workerid,omitempty"` Kind string `json:"kind,omitempty"` Condition Condition `json:"conditon,omitempty"` ObjectiveValue *float64 `json:"objectiveValue,omitempty"` StartTime metav1.Time `json:"startTime,omitempty"` CompletionTime metav1.Time `json:"completionTime,omitempty"` } type TrialSet struct { TrialID string `json:"trialid,omitempty"` WorkerList []WorkerCondition `json:"workeridlist,omitempty"` } type ParameterConfig struct { Name string `json:"name,omitempty"` ParameterType ParameterType `json:"parametertype,omitempty"` Feasible FeasibleSpace `json:"feasible,omitempty"` } type FeasibleSpace struct { Max string `json:"max,omitempty"` Min string `json:"min,omitempty"` List []string `json:"list,omitempty"` } type ParameterType string const ( ParameterTypeUnknown ParameterType = "unknown" ParameterTypeDouble ParameterType = "double" ParameterTypeInt ParameterType = "int" ParameterTypeDiscrete ParameterType = "discrete" ParameterTypeCategorical ParameterType = "categorical" ) type OptimizationType string const ( OptimizationTypeUnknown OptimizationType = "" OptimizationTypeMinimize OptimizationType = "minimize" OptimizationTypeMaximize OptimizationType = "maximize" ) type GoTemplate struct { TemplatePath string `json:"templatePath,omitempty"` RawTemplate string `json:"rawTemplate,omitempty"` } type WorkerSpec struct { Retain bool `json:"retain,omitempty"` GoTemplate GoTemplate `json:"goTemplate,omitempty"` } type MetricsCollectorSpec struct { Retain bool `json:"retain,omitempty"` GoTemplate GoTemplate `json:"goTemplate,omitempty"` } type ServiceParameter struct { Name string `json:"name,omitempty"` Value string `json:"value,omitempty"` } type SuggestionSpec struct { SuggestionAlgorithm string `json:"suggestionAlgorithm,omitempty"` SuggestionParameters []pb.SuggestionParameter `json:"suggestionParameters"` RequestNumber int `json:"requestNumber,omitempty"` } type EarlyStoppingSpec struct { EarlyStoppingAlgorithm string `json:"earlyStoppingAlgorithm,omitempty"` EarlyStoppingParameters []pb.EarlyStoppingParameter `json:"earlyStoppingParameters"` } type ParameterEmbedding string const ( ParameterEmbeddingUndefined ParameterEmbedding = "" ParameterEmbeddingArgument ParameterEmbedding = "args" ParameterEmbeddingEnvironmentValue ParameterEmbedding = "env" ) type Condition string const ( ConditionUnknown Condition = "Unknown" ConditionCreated Condition = "Created" ConditionRunning Condition = "Running" ConditionCompleted Condition = "Completed" ConditionFailed Condition = "Failed" ) // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // StudyJob is the Schema for the studyjob API // +k8s:openapi-gen=true type StudyJob struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec StudyJobSpec `json:"spec,omitempty"` Status StudyJobStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // StudyJobList contains a list of StudyJob type StudyJobList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []StudyJob `json:"items"` } func init() { SchemeBuilder.Register(&StudyJob{}, &StudyJobList{}) } Fix typo (#330) /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" //runtime "k8s.io/apimachinery/pkg/runtime" pb "github.com/kubeflow/katib/pkg/api" ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // StudyJobSpec defines the desired state of StudyJob type StudyJobSpec struct { StudyName string `json:"studyName,omitempty"` Owner string `json:"owner,omitempty"` OptimizationType OptimizationType `json:"optimizationtype,omitempty"` OptimizationGoal *float64 `json:"optimizationgoal,omitempty"` ObjectiveValueName string `json:"objectivevaluename,omitempty"` RequestCount int `json:"requestcount,omitempty"` MetricsNames []string `json:"metricsnames,omitempty"` ParameterConfigs []ParameterConfig `json:"parameterconfigs,omitempty"` WorkerSpec *WorkerSpec `json:"workerSpec,omitempty"` SuggestionSpec *SuggestionSpec `json:"suggestionSpec,omitempty"` EarlyStoppingSpec *EarlyStoppingSpec `json:"earlyStoppingSpec,omitempty"` MetricsCollectorSpec *MetricsCollectorSpec `json:"metricsCollectorSpec,omitempty"` } // StudyJobStatus defines the observed state of StudyJob type StudyJobStatus struct { // Represents time when the StudyJob was acknowledged by the StudyJob controller. // It is not guaranteed to be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. StartTime *metav1.Time `json:"startTime,omitempty"` // Represents time when the StudyJob was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. CompletionTime *metav1.Time `json:"completionTime,omitempty"` // Represents last time when the StudyJob was reconciled. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. LastReconcileTime *metav1.Time `json:"lastReconcileTime,omitempty"` Condition Condition `json:"condition,omitempty"` StudyID string `json:"studyid,omitempty"` SuggestionParameterID string `json:"suggestionParameterId"` EarlyStoppingParameterID string `json:"earlyStoppingParameterId"` Trials []TrialSet `json:"trials,omitempty"` BestObjectiveValue *float64 `json:"bestObjectiveValue,omitempty"` BestTrialID string `json:"bestTrialId,omitempty"` BestWorkerID string `json:"bestWorkerId,omitempty"` SuggestionCount int `json:"suggestionCount,omitempty"` } type WorkerCondition struct { WorkerID string `json:"workerid,omitempty"` Kind string `json:"kind,omitempty"` Condition Condition `json:"condition,omitempty"` ObjectiveValue *float64 `json:"objectiveValue,omitempty"` StartTime metav1.Time `json:"startTime,omitempty"` CompletionTime metav1.Time `json:"completionTime,omitempty"` } type TrialSet struct { TrialID string `json:"trialid,omitempty"` WorkerList []WorkerCondition `json:"workeridlist,omitempty"` } type ParameterConfig struct { Name string `json:"name,omitempty"` ParameterType ParameterType `json:"parametertype,omitempty"` Feasible FeasibleSpace `json:"feasible,omitempty"` } type FeasibleSpace struct { Max string `json:"max,omitempty"` Min string `json:"min,omitempty"` List []string `json:"list,omitempty"` } type ParameterType string const ( ParameterTypeUnknown ParameterType = "unknown" ParameterTypeDouble ParameterType = "double" ParameterTypeInt ParameterType = "int" ParameterTypeDiscrete ParameterType = "discrete" ParameterTypeCategorical ParameterType = "categorical" ) type OptimizationType string const ( OptimizationTypeUnknown OptimizationType = "" OptimizationTypeMinimize OptimizationType = "minimize" OptimizationTypeMaximize OptimizationType = "maximize" ) type GoTemplate struct { TemplatePath string `json:"templatePath,omitempty"` RawTemplate string `json:"rawTemplate,omitempty"` } type WorkerSpec struct { Retain bool `json:"retain,omitempty"` GoTemplate GoTemplate `json:"goTemplate,omitempty"` } type MetricsCollectorSpec struct { Retain bool `json:"retain,omitempty"` GoTemplate GoTemplate `json:"goTemplate,omitempty"` } type ServiceParameter struct { Name string `json:"name,omitempty"` Value string `json:"value,omitempty"` } type SuggestionSpec struct { SuggestionAlgorithm string `json:"suggestionAlgorithm,omitempty"` SuggestionParameters []pb.SuggestionParameter `json:"suggestionParameters"` RequestNumber int `json:"requestNumber,omitempty"` } type EarlyStoppingSpec struct { EarlyStoppingAlgorithm string `json:"earlyStoppingAlgorithm,omitempty"` EarlyStoppingParameters []pb.EarlyStoppingParameter `json:"earlyStoppingParameters"` } type ParameterEmbedding string const ( ParameterEmbeddingUndefined ParameterEmbedding = "" ParameterEmbeddingArgument ParameterEmbedding = "args" ParameterEmbeddingEnvironmentValue ParameterEmbedding = "env" ) type Condition string const ( ConditionUnknown Condition = "Unknown" ConditionCreated Condition = "Created" ConditionRunning Condition = "Running" ConditionCompleted Condition = "Completed" ConditionFailed Condition = "Failed" ) // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // StudyJob is the Schema for the studyjob API // +k8s:openapi-gen=true type StudyJob struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec StudyJobSpec `json:"spec,omitempty"` Status StudyJobStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // StudyJobList contains a list of StudyJob type StudyJobList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []StudyJob `json:"items"` } func init() { SchemeBuilder.Register(&StudyJob{}, &StudyJobList{}) }
/* * This file is part of the KubeVirt project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Copyright 2018 Red Hat, Inc. * */ package validating_webhook import ( "fmt" . "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" k8sv1 "k8s.io/api/core/v1" v1 "kubevirt.io/kubevirt/pkg/api/v1" ) var _ = Describe("Validating Webhook", func() { Context("with VM disk", func() { It("should accept a valid disk", func() { vm := v1.NewMinimalVM("testvm") vm.Spec.Domain.Devices.Disks = append(vm.Spec.Domain.Devices.Disks, v1.Disk{ Name: "testdisk", VolumeName: "testvolume", }) vm.Spec.Volumes = append(vm.Spec.Volumes, v1.Volume{ Name: "testvolume", VolumeSource: v1.VolumeSource{ RegistryDisk: &v1.RegistryDiskSource{}, }, }) errors := validateDisks(vm) Expect(len(errors)).To(Equal(0)) }) It("should reject disk with missing volume", func() { vm := v1.NewMinimalVM("testvm") vm.Spec.Domain.Devices.Disks = append(vm.Spec.Domain.Devices.Disks, v1.Disk{ Name: "testdisk", VolumeName: "testvolume", }) errors := validateDisks(vm) Expect(len(errors)).To(Equal(1)) }) It("should reject disk with multiple targets ", func() { vm := v1.NewMinimalVM("testvm") vm.Spec.Domain.Devices.Disks = append(vm.Spec.Domain.Devices.Disks, v1.Disk{ Name: "testdisk", VolumeName: "testvolume", DiskDevice: v1.DiskDevice{ Disk: &v1.DiskTarget{}, Floppy: &v1.FloppyTarget{}, }, }) errors := validateDisks(vm) // len == 2 because missing volume and multiple targets set Expect(len(errors)).To(Equal(2)) }) table.DescribeTable("should verify LUN is mapped to PVC volume", func(volume *v1.Volume, expectedErrors int) { vm := v1.NewMinimalVM("testvm") vm.Spec.Domain.Devices.Disks = append(vm.Spec.Domain.Devices.Disks, v1.Disk{ Name: "testdisk", VolumeName: "testvolume", DiskDevice: v1.DiskDevice{ LUN: &v1.LunTarget{}, }, }) vm.Spec.Volumes = append(vm.Spec.Volumes, *volume) errors := validateDisks(vm) Expect(len(errors)).To(Equal(expectedErrors)) }, table.Entry("and reject non PVC sources", &v1.Volume{ Name: "testvolume", VolumeSource: v1.VolumeSource{ RegistryDisk: &v1.RegistryDiskSource{}, }, }, 1), table.Entry("and accept PVC sources", &v1.Volume{ Name: "testvolume", VolumeSource: v1.VolumeSource{ PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{}, }, }, 0), ) }) Context("with VM volume", func() { It("should accept valid volume", func() { vm := v1.NewMinimalVM("testvm") vm.Spec.Volumes = append(vm.Spec.Volumes, v1.Volume{ Name: "testvolume", VolumeSource: v1.VolumeSource{ RegistryDisk: &v1.RegistryDiskSource{}, }, }) errors := validateVolumes(vm) Expect(len(errors)).To(Equal(0)) }) It("should reject volume no volume source set", func() { vm := v1.NewMinimalVM("testvm") vm.Spec.Volumes = append(vm.Spec.Volumes, v1.Volume{ Name: "testvolume", }) errors := validateVolumes(vm) Expect(len(errors)).To(Equal(1)) }) It("should reject volume with multiple volume sources set", func() { vm := v1.NewMinimalVM("testvm") vm.Spec.Volumes = append(vm.Spec.Volumes, v1.Volume{ Name: "testvolume", VolumeSource: v1.VolumeSource{ RegistryDisk: &v1.RegistryDiskSource{}, PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{}, }, }) errors := validateVolumes(vm) Expect(len(errors)).To(Equal(1)) }) table.DescribeTable("should verify cloud-init userdata length", func(userDataLen int, expectedErrors int) { vm := v1.NewMinimalVM("testvm") // generate fake userdata userdata := "" for i := 0; i < userDataLen; i++ { userdata = fmt.Sprintf("%sa", userdata) } vm.Spec.Volumes = append(vm.Spec.Volumes, v1.Volume{ VolumeSource: v1.VolumeSource{ CloudInitNoCloud: &v1.CloudInitNoCloudSource{ UserData: userdata, }, }, }) errors := validateVolumes(vm) Expect(len(errors)).To(Equal(expectedErrors)) }, table.Entry("should accept userdata under max limit", 10, 0), table.Entry("should accept userdata equal max limit", cloudInitMaxLen, 0), table.Entry("should reject userdata greater than max limit", cloudInitMaxLen+1, 1), ) }) }) Add advanced validation unit tests Signed-off-by: David Vossel <3a865980c5ac97d5aadbbcbf1cbfa33e47e26202@gmail.com> /* * This file is part of the KubeVirt project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Copyright 2018 Red Hat, Inc. * */ package validating_webhook import ( "encoding/base64" "fmt" . "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" k8sv1 "k8s.io/api/core/v1" v1 "kubevirt.io/kubevirt/pkg/api/v1" ) var _ = Describe("Validating Webhook", func() { Context("with VM disk", func() { table.DescribeTable("should accept a valid disk", func(disk v1.Disk, volume v1.Volume) { vm := v1.NewMinimalVM("testvm") vm.Spec.Domain.Devices.Disks = append(vm.Spec.Domain.Devices.Disks, disk) vm.Spec.Volumes = append(vm.Spec.Volumes, volume) errors := validateDisks(vm) Expect(len(errors)).To(Equal(0)) }, table.Entry("with Disk target", v1.Disk{Name: "testdisk", VolumeName: "testvolume", DiskDevice: v1.DiskDevice{Disk: &v1.DiskTarget{}}}, v1.Volume{Name: "testvolume", VolumeSource: v1.VolumeSource{RegistryDisk: &v1.RegistryDiskSource{Image: "fake"}}}, ), table.Entry("with LUN target", v1.Disk{Name: "testdisk", VolumeName: "testvolume", DiskDevice: v1.DiskDevice{LUN: &v1.LunTarget{}}}, v1.Volume{Name: "testvolume", VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{}}}, ), table.Entry("with Floppy target", v1.Disk{Name: "testdisk", VolumeName: "testvolume", DiskDevice: v1.DiskDevice{Floppy: &v1.FloppyTarget{}}}, v1.Volume{Name: "testvolume", VolumeSource: v1.VolumeSource{RegistryDisk: &v1.RegistryDiskSource{Image: "fake"}}}, ), table.Entry("with CDRom target", v1.Disk{Name: "testdisk", VolumeName: "testvolume", DiskDevice: v1.DiskDevice{CDRom: &v1.CDRomTarget{}}}, v1.Volume{Name: "testvolume", VolumeSource: v1.VolumeSource{RegistryDisk: &v1.RegistryDiskSource{Image: "fake"}}}, ), ) It("should allow disk without a target", func() { vm := v1.NewMinimalVM("testvm") vm.Spec.Domain.Devices.Disks = append(vm.Spec.Domain.Devices.Disks, v1.Disk{ Name: "testdisk", VolumeName: "testvolume", // disk without a target defaults to DiskTarget }) vm.Spec.Volumes = append(vm.Spec.Volumes, v1.Volume{ Name: "testvolume", VolumeSource: v1.VolumeSource{ RegistryDisk: &v1.RegistryDiskSource{Image: "fake"}, }, }) errors := validateDisks(vm) Expect(len(errors)).To(Equal(0)) }) It("should reject disk with missing volume", func() { vm := v1.NewMinimalVM("testvm") vm.Spec.Domain.Devices.Disks = append(vm.Spec.Domain.Devices.Disks, v1.Disk{ Name: "testdisk", VolumeName: "testvolume", }) errors := validateDisks(vm) Expect(len(errors)).To(Equal(1)) }) It("should reject disk with multiple targets ", func() { vm := v1.NewMinimalVM("testvm") vm.Spec.Domain.Devices.Disks = append(vm.Spec.Domain.Devices.Disks, v1.Disk{ Name: "testdisk", VolumeName: "testvolume", DiskDevice: v1.DiskDevice{ Disk: &v1.DiskTarget{}, Floppy: &v1.FloppyTarget{}, }, }) vm.Spec.Volumes = append(vm.Spec.Volumes, v1.Volume{ Name: "testvolume", VolumeSource: v1.VolumeSource{ RegistryDisk: &v1.RegistryDiskSource{Image: "fake"}, }, }) errors := validateDisks(vm) Expect(len(errors)).To(Equal(1)) }) It("should generate multiple errors", func() { vm := v1.NewMinimalVM("testvm") vm.Spec.Domain.Devices.Disks = append(vm.Spec.Domain.Devices.Disks, v1.Disk{ Name: "testdisk", VolumeName: "testvolume", DiskDevice: v1.DiskDevice{ Disk: &v1.DiskTarget{}, Floppy: &v1.FloppyTarget{}, }, }) errors := validateDisks(vm) // missing volume and multiple targets set. should result in 2 errors Expect(len(errors)).To(Equal(2)) }) table.DescribeTable("should verify LUN is mapped to PVC volume", func(volume *v1.Volume, expectedErrors int) { vm := v1.NewMinimalVM("testvm") vm.Spec.Domain.Devices.Disks = append(vm.Spec.Domain.Devices.Disks, v1.Disk{ Name: "testdisk", VolumeName: "testvolume", DiskDevice: v1.DiskDevice{ LUN: &v1.LunTarget{}, }, }) vm.Spec.Volumes = append(vm.Spec.Volumes, *volume) errors := validateDisks(vm) Expect(len(errors)).To(Equal(expectedErrors)) }, table.Entry("and reject non PVC sources", &v1.Volume{ Name: "testvolume", VolumeSource: v1.VolumeSource{ RegistryDisk: &v1.RegistryDiskSource{}, }, }, 1), table.Entry("and accept PVC sources", &v1.Volume{ Name: "testvolume", VolumeSource: v1.VolumeSource{ PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{}, }, }, 0), ) }) Context("with VM volume", func() { table.DescribeTable("should accept valid volume", func(volumeSource v1.VolumeSource) { vm := v1.NewMinimalVM("testvm") vm.Spec.Volumes = append(vm.Spec.Volumes, v1.Volume{ Name: "testvolume", VolumeSource: volumeSource, }) errors := validateVolumes(vm) Expect(len(errors)).To(Equal(0)) }, table.Entry("with pvc volume source", v1.VolumeSource{PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{}}), table.Entry("with cloud-init volume source", v1.VolumeSource{CloudInitNoCloud: &v1.CloudInitNoCloudSource{UserData: "fake"}}), table.Entry("with registryDisk volume source", v1.VolumeSource{RegistryDisk: &v1.RegistryDiskSource{}}), table.Entry("with ephemeral volume source", v1.VolumeSource{Ephemeral: &v1.EphemeralVolumeSource{}}), table.Entry("with emptyDisk volume source", v1.VolumeSource{EmptyDisk: &v1.EmptyDiskSource{}}), ) It("should reject volume no volume source set", func() { vm := v1.NewMinimalVM("testvm") vm.Spec.Volumes = append(vm.Spec.Volumes, v1.Volume{ Name: "testvolume", }) errors := validateVolumes(vm) Expect(len(errors)).To(Equal(1)) }) It("should reject volume with multiple volume sources set", func() { vm := v1.NewMinimalVM("testvm") vm.Spec.Volumes = append(vm.Spec.Volumes, v1.Volume{ Name: "testvolume", VolumeSource: v1.VolumeSource{ RegistryDisk: &v1.RegistryDiskSource{}, PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{}, }, }) errors := validateVolumes(vm) Expect(len(errors)).To(Equal(1)) }) table.DescribeTable("should verify cloud-init userdata length", func(userDataLen int, expectedErrors int, base64Encode bool) { vm := v1.NewMinimalVM("testvm") // generate fake userdata userdata := "" for i := 0; i < userDataLen; i++ { userdata = fmt.Sprintf("%sa", userdata) } vm.Spec.Volumes = append(vm.Spec.Volumes, v1.Volume{VolumeSource: v1.VolumeSource{CloudInitNoCloud: &v1.CloudInitNoCloudSource{}}}) if base64Encode { vm.Spec.Volumes[0].VolumeSource.CloudInitNoCloud.UserDataBase64 = base64.StdEncoding.EncodeToString([]byte(userdata)) } else { vm.Spec.Volumes[0].VolumeSource.CloudInitNoCloud.UserData = userdata } errors := validateVolumes(vm) Expect(len(errors)).To(Equal(expectedErrors)) }, table.Entry("should accept userdata under max limit", 10, 0, false), table.Entry("should accept userdata equal max limit", cloudInitMaxLen, 0, false), table.Entry("should reject userdata greater than max limit", cloudInitMaxLen+1, 1, false), table.Entry("should accept userdata base64 under max limit", 10, 0, true), table.Entry("should accept userdata base64 equal max limit", cloudInitMaxLen, 0, true), table.Entry("should reject userdata base64 greater than max limit", cloudInitMaxLen+1, 1, true), ) It("should reject cloud-init with invalid base64 data", func() { vm := v1.NewMinimalVM("testvm") vm.Spec.Volumes = append(vm.Spec.Volumes, v1.Volume{ VolumeSource: v1.VolumeSource{ CloudInitNoCloud: &v1.CloudInitNoCloudSource{ UserDataBase64: "#######garbage******", }, }, }) errors := validateVolumes(vm) Expect(len(errors)).To(Equal(1)) }) }) })
// Copyright 2018 The go-hep Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package rdict import ( "bytes" "fmt" "go/format" "go/importer" "go/types" "log" "reflect" "strings" "go-hep.org/x/hep/groot/rmeta" ) var ( binMa *types.Interface // encoding.BinaryMarshaler binUn *types.Interface // encoding.BinaryUnmarshaler rootVers *types.Interface // rbytes.RVersioner gosizes types.Sizes ) // Generator holds the state of the ROOT streaemer generation. type Generator struct { buf *bytes.Buffer pkg *types.Package // set of imported packages. // usually: "encoding/binary", "math" imps map[string]int Verbose bool // enable verbose mode } // NewGenerator returns a new code generator for package p, // where p is the package's import path. func NewGenerator(p string) (*Generator, error) { pkg, err := importer.Default().Import(p) if err != nil { return nil, err } return &Generator{ buf: new(bytes.Buffer), pkg: pkg, imps: map[string]int{ "go-hep.org/x/hep/groot/rbase": 1, "go-hep.org/x/hep/groot/rbytes": 1, "go-hep.org/x/hep/groot/rdict": 1, "go-hep.org/x/hep/groot/rmeta": 1, }, }, nil } func (g *Generator) printf(format string, args ...interface{}) { fmt.Fprintf(g.buf, format, args...) } func (g *Generator) Generate(typeName string) { scope := g.pkg.Scope() obj := scope.Lookup(typeName) if obj == nil { log.Fatalf("no such type %q in package %q\n", typeName, g.pkg.Path()+"/"+g.pkg.Name()) } tn, ok := obj.(*types.TypeName) if !ok { log.Fatalf("%q is not a type (%v)\n", typeName, obj) } typ, ok := tn.Type().Underlying().(*types.Struct) if !ok { log.Fatalf("%q is not a named struct (%v)\n", typeName, tn) } if g.Verbose { log.Printf("typ: %q: %+v\n", typeName, typ) } if !types.Implements(tn.Type(), rootVers) && !types.Implements(types.NewPointer(tn.Type()), rootVers) { log.Fatalf("type %q does not implement %q.", tn.Pkg().Path()+"."+tn.Name(), "go-hep.org/x/hep/groot/rbytes.RVersioner") } g.genStreamer(typ, typeName) g.genMarshal(typ, typeName) // g.genUnmarshal(typ, typeName) } func (g *Generator) genMarshal(t types.Type, typeName string) { g.printf(`// MarshalROOT implements rbytes.Marshaler func (o *%[1]s) MarshalROOT(w *rbytes.WBuffer) (int, error) { if w.Err() != nil { return 0, w.Err() } pos := w.WriteVersion(o.RVersion()) `, typeName, ) typ := t.Underlying().(*types.Struct) for i := 0; i < typ.NumFields(); i++ { ft := typ.Field(i) n := ft.Name() // no `groot:"foo"` redirection. g.genMarshalType(ft.Type(), n) } g.printf("\n\treturn w.SetByteCount(pos, o.Class())\n}\n\n") } func (g *Generator) genUnmarshal(t types.Type, typeName string) { g.printf(`// UnmarshalROOT implements rbytes.Unmarshaler func (o *%[1]s) UnmarshalROOT(r *rbytes.RBuffer) error { rs, err := r.RStreamer(o) if err != nil { return err } return rs.RStream(r) } `, typeName, ) } func (g *Generator) genStreamer(t types.Type, typeName string) { g.printf(`func init() { // Streamer for %[1]s. rdict.Streamers.Add(rdict.NewStreamerInfo(%[2]q, int(((*%[1]s)(nil)).RVersion()), []rbytes.StreamerElement{ `, typeName, g.pkg.Path()+"."+typeName, ) typ := t.Underlying().(*types.Struct) for i := 0; i < typ.NumFields(); i++ { ft := typ.Field(i) n := ft.Name() if tag := typ.Tag(i); tag != "" { nn := reflect.StructTag(tag).Get("groot") if nn != "" { n = nn } } g.genStreamerType(ft.Type(), n) } g.printf("}))\n}\n\n") } func (g *Generator) genStreamerType(t types.Type, n string) { ut := t.Underlying() switch ut := ut.(type) { case *types.Basic: switch kind := ut.Kind(); kind { case types.Bool: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Uint8: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Uint16: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Uint32, types.Uint: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Uint64: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Int8: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Int16: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Int32, types.Int: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Int64: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Float32: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Float64: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Complex64: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.Complex128: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.String: g.printf("%s,\n", g.se(ut, n, "", 0)) default: log.Fatalf("unhandled type: %v (underlying: %v)\n", t, ut) } case *types.Array: // FIXME(sbinet): collect+visit element type. switch eut := ut.Elem().Underlying().(type) { case *types.Basic: switch kind := eut.Kind(); kind { default: g.printf( "&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut.Elem(), n, "+ rmeta.OffsetL", ut.Len()), ) case types.String: g.printf( "%s,\n", g.se(ut.Elem(), n, "+ rmeta.OffsetL", ut.Len()), ) } default: g.printf( "%s\n", g.se(ut.Elem(), n, "+ rmeta.OffsetL", ut.Len()), ) } case *types.Slice: // FIXME(sbinet): collect+visit element type. g.printf("rdict.NewStreamerSTL(%q, rmeta.STLvector, rmeta.%v),\n", n, gotype2RMeta(ut.Elem())) case *types.Struct: g.printf( "&rdict.StreamerObjectAny{StreamerElement:rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Any,\nSize: %[4]d,\nEName:rdict.GoName2Cxx(%[3]q),\n}.New()},\n", n, "", t.String(), gosizes.Sizeof(ut), ) default: log.Fatalf("unhandled type: %v (underlying: %v)\n", t, ut) } } func (g *Generator) wt(t types.Type, n, meth, arr string) { ut := t.Underlying() switch ut := ut.(type) { case *types.Basic: switch kind := ut.Kind(); kind { case types.Bool: g.printf("w.Write%sBool(o.%s%s)\n", meth, n, arr) case types.Uint8: g.printf("w.Write%sU8(o.%s%s)\n", meth, n, arr) case types.Uint16: g.printf("w.Write%sU16(o.%s%s)\n", meth, n, arr) case types.Uint32: g.printf("w.Write%sU32(o.%s%s)\n", meth, n, arr) case types.Uint64: g.printf("w.Write%sU64(o.%s%s)\n", meth, n, arr) case types.Int8: g.printf("w.Write%sI8(o.%s%s)\n", meth, n, arr) case types.Int16: g.printf("w.Write%sI16(o.%s%s)\n", meth, n, arr) case types.Int32: g.printf("w.Write%sI32(o.%s%s)\n", meth, n, arr) case types.Int64: g.printf("w.Write%sI64(o.%s%s)\n", meth, n, arr) case types.Float32: g.printf("w.Write%sF32(o.%s%s)\n", meth, n, arr) case types.Float64: g.printf("w.Write%sF64(o.%s%s)\n", meth, n, arr) case types.Uint: g.printf("w.Write%sU64(uint64(o.%s%s))\n", meth, n, arr) case types.Int: g.printf("w.Write%sI64(int64(o.%s%s))\n", meth, n, arr) case types.Complex64: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.Complex128: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.String: g.printf("w.Write%sString(o.%s%s)\n", meth, n, arr) default: log.Fatalf("unhandled type: %v (underlying: %v)\n", t, ut) } case *types.Struct: g.printf("o.%s.MarshalROOT(w)\n", n) default: log.Fatalf("unhandled marshal type: %v (underlying %v)", t, ut) } } func (g *Generator) se(t types.Type, n, rtype string, arrlen int64) string { elmt := Element{ Size: 1, } if arrlen > 0 { elmt.Size = int32(arrlen) elmt.ArrLen = int32(arrlen) elmt.ArrDim = 1 } ut := t.Underlying() switch ut := ut.(type) { case *types.Basic: switch kind := ut.Kind(); kind { case types.Bool: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Bool %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 1*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Uint8: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Uint8 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 1*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Uint16: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Uint16 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 2*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Uint32, types.Uint: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Uint32 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 4*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Uint64: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Uint64 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 8*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Int8: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Int8 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 1*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Int16: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Int16 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 2*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Int32, types.Int: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Int32 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 4*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Int64: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Int64 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 8*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Float32: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Float32 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 4*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Float64: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Float64 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 8*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Complex64: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.Complex128: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.String: return fmt.Sprintf("&rdict.StreamerString{rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.TString %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()}", n, "", "TString", rtype, 24*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) } case *types.Struct: // FIXME(sbinet): implement. // FIXME(sbinet): prevent recursion. old := g.buf g.buf = new(bytes.Buffer) g.genStreamerType(t, n) str := g.buf.String() g.buf = old return str } log.Printf("gen-streamer: unhandled type: %v (underlying %v)", t, ut) return "" } func (g *Generator) genMarshalType(t types.Type, n string) { ut := t.Underlying() switch ut := ut.(type) { case *types.Basic: switch kind := ut.Kind(); kind { case types.Bool: g.wt(ut, n, "", "") case types.Uint8: g.wt(ut, n, "", "") case types.Uint16: g.wt(ut, n, "", "") case types.Uint32: g.wt(ut, n, "", "") case types.Uint64: g.wt(ut, n, "", "") case types.Int8: g.wt(ut, n, "", "") case types.Int16: g.wt(ut, n, "", "") case types.Int32: g.wt(ut, n, "", "") case types.Int64: g.wt(ut, n, "", "") case types.Float32: g.wt(ut, n, "", "") case types.Float64: g.wt(ut, n, "", "") case types.Uint: g.wt(ut, n, "", "") case types.Int: g.wt(ut, n, "", "") case types.Complex64: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.Complex128: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.String: g.wt(ut, n, "", "") default: log.Fatalf("unhandled type: %v (underlying: %v)\n", t, ut) } case *types.Array: switch ut.Elem().Underlying().(type) { case *types.Basic: g.wt(ut.Elem(), n, "FastArray", "[:]") default: g.printf("for i := range o.%s {\n", n) g.wt(ut.Elem(), n+"[i]", "", "") g.printf("}\n") } case *types.Slice: g.wt(ut.Elem(), n, "FastArray", "") case *types.Struct: g.printf("o.%s.MarshalROOT(w)\n", n) default: log.Fatalf("gen-marshal-type: unhandled type: %v (underlying: %v)\n", t, ut) } } func (g *Generator) Format() ([]byte, error) { buf := new(bytes.Buffer) buf.WriteString(fmt.Sprintf(`// DO NOT EDIT; automatically generated by %[1]s package %[2]s import ( `, "root-gen-streamer", g.pkg.Name(), )) // FIXME(sbinet): separate stdlib from 3rd-party imports. for k := range g.imps { fmt.Fprintf(buf, "%q\n", k) } fmt.Fprintf(buf, ")\n\n") buf.Write(g.buf.Bytes()) src, err := format.Source(buf.Bytes()) if err != nil { log.Printf("=== error ===\n%s\n", buf.Bytes()) } return src, err } func init() { pkg, err := importer.Default().Import("encoding") if err != nil { log.Fatalf("error finding package \"encoding\": %v\n", err) } o := pkg.Scope().Lookup("BinaryMarshaler") if o == nil { log.Fatalf("could not find interface encoding.BinaryMarshaler\n") } binMa = o.(*types.TypeName).Type().Underlying().(*types.Interface) o = pkg.Scope().Lookup("BinaryUnmarshaler") if o == nil { log.Fatalf("could not find interface encoding.BinaryUnmarshaler\n") } binUn = o.(*types.TypeName).Type().Underlying().(*types.Interface) pkg, err = importer.Default().Import("go-hep.org/x/hep/groot/rbytes") if err != nil { log.Fatalf("could not find package %q: %v", "go-hep.org/x/hep/groot/rbytes", err) } o = pkg.Scope().Lookup("RVersioner") if o == nil { log.Fatalf("could not find interface rbytes.RVersioner") } rootVers = o.(*types.TypeName).Type().Underlying().(*types.Interface) sz := int64(reflect.TypeOf(int(0)).Size()) gosizes = &types.StdSizes{WordSize: sz, MaxAlign: sz} } func gotype2RMeta(t types.Type) rmeta.Enum { switch ut := t.Underlying().(type) { case *types.Basic: switch ut.Kind() { case types.Bool: return rmeta.Bool case types.Uint8: return rmeta.Uint8 case types.Uint16: return rmeta.Uint16 case types.Uint32, types.Uint: return rmeta.Uint32 case types.Uint64: return rmeta.Uint64 case types.Int8: return rmeta.Int8 case types.Int16: return rmeta.Int16 case types.Int32, types.Int: return rmeta.Int32 case types.Int64: return rmeta.Int64 case types.Float32: return rmeta.Float32 case types.Float64: return rmeta.Float64 case types.String: return rmeta.TString } case *types.Struct: return rmeta.Any case *types.Slice: return rmeta.STL case *types.Array: return rmeta.OffsetL + gotype2RMeta(ut.Elem()) } return -1 } // GoName2Cxx translates a fully-qualified Go type name to a C++ one. // e.g.: // - go-hep.org/x/hep/hbook.H1D -> go_hep_org::x::hep::hbook::H1D func GoName2Cxx(name string) string { repl := strings.NewReplacer( "-", "_", "/", "::", ".", "_", ) i := strings.LastIndex(name, ".") if i > 0 { name = name[:i] + "::" + name[i+1:] } return repl.Replace(name) } // Typename returns a language dependent typename, usually encoded inside a // StreamerInfo's title. func Typename(name, title string) (string, bool) { if title == "" { return name, false } i := strings.Index(title, ";") if i <= 0 { return name, false } lang := title[:i] title = strings.TrimSpace(title[i+1:]) switch lang { case "Go": return title, GoName2Cxx(title) == name default: return title, false } } groot/rdict: properly handle import-rbase // Copyright 2018 The go-hep Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package rdict import ( "bytes" "fmt" "go/format" "go/importer" "go/types" "log" "reflect" "strings" "go-hep.org/x/hep/groot/rmeta" ) var ( binMa *types.Interface // encoding.BinaryMarshaler binUn *types.Interface // encoding.BinaryUnmarshaler rootVers *types.Interface // rbytes.RVersioner gosizes types.Sizes ) // Generator holds the state of the ROOT streaemer generation. type Generator struct { buf *bytes.Buffer pkg *types.Package // set of imported packages. // usually: "encoding/binary", "math" imps map[string]int Verbose bool // enable verbose mode } // NewGenerator returns a new code generator for package p, // where p is the package's import path. func NewGenerator(p string) (*Generator, error) { pkg, err := importer.Default().Import(p) if err != nil { return nil, err } return &Generator{ buf: new(bytes.Buffer), pkg: pkg, imps: map[string]int{ "go-hep.org/x/hep/groot/rbytes": 1, "go-hep.org/x/hep/groot/rdict": 1, "go-hep.org/x/hep/groot/rmeta": 1, }, }, nil } func (g *Generator) printf(format string, args ...interface{}) { fmt.Fprintf(g.buf, format, args...) } func (g *Generator) Generate(typeName string) { scope := g.pkg.Scope() obj := scope.Lookup(typeName) if obj == nil { log.Fatalf("no such type %q in package %q\n", typeName, g.pkg.Path()+"/"+g.pkg.Name()) } tn, ok := obj.(*types.TypeName) if !ok { log.Fatalf("%q is not a type (%v)\n", typeName, obj) } typ, ok := tn.Type().Underlying().(*types.Struct) if !ok { log.Fatalf("%q is not a named struct (%v)\n", typeName, tn) } if g.Verbose { log.Printf("typ: %q: %+v\n", typeName, typ) } if !types.Implements(tn.Type(), rootVers) && !types.Implements(types.NewPointer(tn.Type()), rootVers) { log.Fatalf("type %q does not implement %q.", tn.Pkg().Path()+"."+tn.Name(), "go-hep.org/x/hep/groot/rbytes.RVersioner") } g.genStreamer(typ, typeName) g.genMarshal(typ, typeName) // g.genUnmarshal(typ, typeName) } func (g *Generator) genMarshal(t types.Type, typeName string) { g.printf(`// MarshalROOT implements rbytes.Marshaler func (o *%[1]s) MarshalROOT(w *rbytes.WBuffer) (int, error) { if w.Err() != nil { return 0, w.Err() } pos := w.WriteVersion(o.RVersion()) `, typeName, ) typ := t.Underlying().(*types.Struct) for i := 0; i < typ.NumFields(); i++ { ft := typ.Field(i) n := ft.Name() // no `groot:"foo"` redirection. g.genMarshalType(ft.Type(), n) } g.printf("\n\treturn w.SetByteCount(pos, o.Class())\n}\n\n") } func (g *Generator) genUnmarshal(t types.Type, typeName string) { g.printf(`// UnmarshalROOT implements rbytes.Unmarshaler func (o *%[1]s) UnmarshalROOT(r *rbytes.RBuffer) error { rs, err := r.RStreamer(o) if err != nil { return err } return rs.RStream(r) } `, typeName, ) } func (g *Generator) genStreamer(t types.Type, typeName string) { g.printf(`func init() { // Streamer for %[1]s. rdict.Streamers.Add(rdict.NewStreamerInfo(%[2]q, int(((*%[1]s)(nil)).RVersion()), []rbytes.StreamerElement{ `, typeName, g.pkg.Path()+"."+typeName, ) typ := t.Underlying().(*types.Struct) for i := 0; i < typ.NumFields(); i++ { ft := typ.Field(i) n := ft.Name() if tag := typ.Tag(i); tag != "" { nn := reflect.StructTag(tag).Get("groot") if nn != "" { n = nn } } g.genStreamerType(ft.Type(), n) } g.printf("}))\n}\n\n") } func (g *Generator) genStreamerType(t types.Type, n string) { ut := t.Underlying() switch ut := ut.(type) { case *types.Basic: switch kind := ut.Kind(); kind { case types.Bool: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Uint8: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Uint16: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Uint32, types.Uint: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Uint64: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Int8: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Int16: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Int32, types.Int: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Int64: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Float32: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Float64: g.printf("&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut, n, "", 0)) case types.Complex64: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.Complex128: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.String: g.printf("%s,\n", g.se(ut, n, "", 0)) default: log.Fatalf("unhandled type: %v (underlying: %v)\n", t, ut) } case *types.Array: // FIXME(sbinet): collect+visit element type. switch eut := ut.Elem().Underlying().(type) { case *types.Basic: switch kind := eut.Kind(); kind { default: g.printf( "&rdict.StreamerBasicType{StreamerElement: %s},\n", g.se(ut.Elem(), n, "+ rmeta.OffsetL", ut.Len()), ) case types.String: g.printf( "%s,\n", g.se(ut.Elem(), n, "+ rmeta.OffsetL", ut.Len()), ) } default: g.printf( "%s\n", g.se(ut.Elem(), n, "+ rmeta.OffsetL", ut.Len()), ) } case *types.Slice: // FIXME(sbinet): collect+visit element type. g.printf("rdict.NewStreamerSTL(%q, rmeta.STLvector, rmeta.%v),\n", n, gotype2RMeta(ut.Elem())) case *types.Struct: g.imps["go-hep.org/x/hep/groot/rbase"]++ g.printf( "&rdict.StreamerObjectAny{StreamerElement:rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Any,\nSize: %[4]d,\nEName:rdict.GoName2Cxx(%[3]q),\n}.New()},\n", n, "", t.String(), gosizes.Sizeof(ut), ) default: log.Fatalf("unhandled type: %v (underlying: %v)\n", t, ut) } } func (g *Generator) wt(t types.Type, n, meth, arr string) { ut := t.Underlying() switch ut := ut.(type) { case *types.Basic: switch kind := ut.Kind(); kind { case types.Bool: g.printf("w.Write%sBool(o.%s%s)\n", meth, n, arr) case types.Uint8: g.printf("w.Write%sU8(o.%s%s)\n", meth, n, arr) case types.Uint16: g.printf("w.Write%sU16(o.%s%s)\n", meth, n, arr) case types.Uint32: g.printf("w.Write%sU32(o.%s%s)\n", meth, n, arr) case types.Uint64: g.printf("w.Write%sU64(o.%s%s)\n", meth, n, arr) case types.Int8: g.printf("w.Write%sI8(o.%s%s)\n", meth, n, arr) case types.Int16: g.printf("w.Write%sI16(o.%s%s)\n", meth, n, arr) case types.Int32: g.printf("w.Write%sI32(o.%s%s)\n", meth, n, arr) case types.Int64: g.printf("w.Write%sI64(o.%s%s)\n", meth, n, arr) case types.Float32: g.printf("w.Write%sF32(o.%s%s)\n", meth, n, arr) case types.Float64: g.printf("w.Write%sF64(o.%s%s)\n", meth, n, arr) case types.Uint: g.printf("w.Write%sU64(uint64(o.%s%s))\n", meth, n, arr) case types.Int: g.printf("w.Write%sI64(int64(o.%s%s))\n", meth, n, arr) case types.Complex64: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.Complex128: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.String: g.printf("w.Write%sString(o.%s%s)\n", meth, n, arr) default: log.Fatalf("unhandled type: %v (underlying: %v)\n", t, ut) } case *types.Struct: g.printf("o.%s.MarshalROOT(w)\n", n) default: log.Fatalf("unhandled marshal type: %v (underlying %v)", t, ut) } } func (g *Generator) se(t types.Type, n, rtype string, arrlen int64) string { elmt := Element{ Size: 1, } if arrlen > 0 { elmt.Size = int32(arrlen) elmt.ArrLen = int32(arrlen) elmt.ArrDim = 1 } ut := t.Underlying() switch ut := ut.(type) { case *types.Basic: g.imps["go-hep.org/x/hep/groot/rbase"]++ switch kind := ut.Kind(); kind { case types.Bool: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Bool %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 1*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Uint8: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Uint8 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 1*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Uint16: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Uint16 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 2*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Uint32, types.Uint: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Uint32 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 4*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Uint64: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Uint64 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 8*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Int8: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Int8 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 1*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Int16: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Int16 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 2*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Int32, types.Int: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Int32 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 4*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Int64: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Int64 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 8*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Float32: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Float32 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 4*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Float64: return fmt.Sprintf("rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.Float64 %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()", n, "", rmeta.GoType2Cxx[ut.Name()], rtype, 8*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) case types.Complex64: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.Complex128: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.String: return fmt.Sprintf("&rdict.StreamerString{rdict.Element{\nName: *rbase.NewNamed(%[1]q, %[2]q),\nType: rmeta.TString %[4]s,\nSize: %[5]d,\nEName:%[3]q,\nArrLen:%[6]d,\nArrDim:%[7]d,\n}.New()}", n, "", "TString", rtype, 24*elmt.Size, elmt.ArrLen, elmt.ArrDim, ) } case *types.Struct: // FIXME(sbinet): implement. // FIXME(sbinet): prevent recursion. old := g.buf g.buf = new(bytes.Buffer) g.genStreamerType(t, n) str := g.buf.String() g.buf = old return str } log.Printf("gen-streamer: unhandled type: %v (underlying %v)", t, ut) return "" } func (g *Generator) genMarshalType(t types.Type, n string) { ut := t.Underlying() switch ut := ut.(type) { case *types.Basic: switch kind := ut.Kind(); kind { case types.Bool: g.wt(ut, n, "", "") case types.Uint8: g.wt(ut, n, "", "") case types.Uint16: g.wt(ut, n, "", "") case types.Uint32: g.wt(ut, n, "", "") case types.Uint64: g.wt(ut, n, "", "") case types.Int8: g.wt(ut, n, "", "") case types.Int16: g.wt(ut, n, "", "") case types.Int32: g.wt(ut, n, "", "") case types.Int64: g.wt(ut, n, "", "") case types.Float32: g.wt(ut, n, "", "") case types.Float64: g.wt(ut, n, "", "") case types.Uint: g.wt(ut, n, "", "") case types.Int: g.wt(ut, n, "", "") case types.Complex64: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.Complex128: log.Fatalf("unhandled type: %v (underlying %v)\n", t, ut) // FIXME(sbinet) case types.String: g.wt(ut, n, "", "") default: log.Fatalf("unhandled type: %v (underlying: %v)\n", t, ut) } case *types.Array: switch ut.Elem().Underlying().(type) { case *types.Basic: g.wt(ut.Elem(), n, "FastArray", "[:]") default: g.printf("for i := range o.%s {\n", n) g.wt(ut.Elem(), n+"[i]", "", "") g.printf("}\n") } case *types.Slice: g.wt(ut.Elem(), n, "FastArray", "") case *types.Struct: g.printf("o.%s.MarshalROOT(w)\n", n) default: log.Fatalf("gen-marshal-type: unhandled type: %v (underlying: %v)\n", t, ut) } } func (g *Generator) Format() ([]byte, error) { buf := new(bytes.Buffer) buf.WriteString(fmt.Sprintf(`// DO NOT EDIT; automatically generated by %[1]s package %[2]s import ( `, "root-gen-streamer", g.pkg.Name(), )) // FIXME(sbinet): separate stdlib from 3rd-party imports. for k := range g.imps { fmt.Fprintf(buf, "%q\n", k) } fmt.Fprintf(buf, ")\n\n") buf.Write(g.buf.Bytes()) src, err := format.Source(buf.Bytes()) if err != nil { log.Printf("=== error ===\n%s\n", buf.Bytes()) } return src, err } func init() { pkg, err := importer.Default().Import("encoding") if err != nil { log.Fatalf("error finding package \"encoding\": %v\n", err) } o := pkg.Scope().Lookup("BinaryMarshaler") if o == nil { log.Fatalf("could not find interface encoding.BinaryMarshaler\n") } binMa = o.(*types.TypeName).Type().Underlying().(*types.Interface) o = pkg.Scope().Lookup("BinaryUnmarshaler") if o == nil { log.Fatalf("could not find interface encoding.BinaryUnmarshaler\n") } binUn = o.(*types.TypeName).Type().Underlying().(*types.Interface) pkg, err = importer.Default().Import("go-hep.org/x/hep/groot/rbytes") if err != nil { log.Fatalf("could not find package %q: %v", "go-hep.org/x/hep/groot/rbytes", err) } o = pkg.Scope().Lookup("RVersioner") if o == nil { log.Fatalf("could not find interface rbytes.RVersioner") } rootVers = o.(*types.TypeName).Type().Underlying().(*types.Interface) sz := int64(reflect.TypeOf(int(0)).Size()) gosizes = &types.StdSizes{WordSize: sz, MaxAlign: sz} } func gotype2RMeta(t types.Type) rmeta.Enum { switch ut := t.Underlying().(type) { case *types.Basic: switch ut.Kind() { case types.Bool: return rmeta.Bool case types.Uint8: return rmeta.Uint8 case types.Uint16: return rmeta.Uint16 case types.Uint32, types.Uint: return rmeta.Uint32 case types.Uint64: return rmeta.Uint64 case types.Int8: return rmeta.Int8 case types.Int16: return rmeta.Int16 case types.Int32, types.Int: return rmeta.Int32 case types.Int64: return rmeta.Int64 case types.Float32: return rmeta.Float32 case types.Float64: return rmeta.Float64 case types.String: return rmeta.TString } case *types.Struct: return rmeta.Any case *types.Slice: return rmeta.STL case *types.Array: return rmeta.OffsetL + gotype2RMeta(ut.Elem()) } return -1 } // GoName2Cxx translates a fully-qualified Go type name to a C++ one. // e.g.: // - go-hep.org/x/hep/hbook.H1D -> go_hep_org::x::hep::hbook::H1D func GoName2Cxx(name string) string { repl := strings.NewReplacer( "-", "_", "/", "::", ".", "_", ) i := strings.LastIndex(name, ".") if i > 0 { name = name[:i] + "::" + name[i+1:] } return repl.Replace(name) } // Typename returns a language dependent typename, usually encoded inside a // StreamerInfo's title. func Typename(name, title string) (string, bool) { if title == "" { return name, false } i := strings.Index(title, ";") if i <= 0 { return name, false } lang := title[:i] title = strings.TrimSpace(title[i+1:]) switch lang { case "Go": return title, GoName2Cxx(title) == name default: return title, false } }
/* Copyright (c) 2015, Hendrik van Wyk All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of invertergui nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package webui import ( "fmt" "reflect" "testing" "time" "github.com/diebietse/invertergui/mk2driver" ) func TestWebGui(t *testing.T) { t.Skip("Not yet implimented") //TODO figure out how to test template output. } type templateTest struct { input *mk2driver.Mk2Info output *templateInput } var fakenow = time.Date(2017, 1, 2, 3, 4, 5, 6, time.UTC) var templateInputTests = []templateTest{ { input: &mk2driver.Mk2Info{ OutCurrent: 2.0, InCurrent: 2.3, OutVoltage: 230.0, InVoltage: 230.1, BatVoltage: 25, BatCurrent: -10, InFrequency: 50, OutFrequency: 50, ChargeState: 1, LEDs: map[mk2driver.Led]mk2driver.LEDstate{mk2driver.LedMain: mk2driver.LedOn}, Errors: nil, Timestamp: fakenow, }, output: &templateInput{ Error: nil, Date: fakenow.Format(time.RFC1123Z), OutCurrent: "2.00", OutVoltage: "230.00", OutPower: "460.00", InCurrent: "2.30", InVoltage: "230.10", InPower: "529.23", InMinOut: "69.23", BatVoltage: "25.00", BatCurrent: "-10.00", BatPower: "-250.00", InFreq: "50.00", OutFreq: "50.00", BatCharge: "100.00", LedMap: map[string]string{"led_mains": "dot-green"}, }, }, } func TestTemplateInput(t *testing.T) { for i := range templateInputTests { templateInput := buildTemplateInput(templateInputTests[i].input) if !reflect.DeepEqual(templateInput, templateInputTests[i].output) { t.Errorf("buildTemplateInput not producing expected results") fmt.Printf("%v\n%v\n", templateInput, templateInputTests[i].output) } } } Remove old invalid test /* Copyright (c) 2015, Hendrik van Wyk All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of invertergui nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package webui import ( "fmt" "reflect" "testing" "time" "github.com/diebietse/invertergui/mk2driver" ) type templateTest struct { input *mk2driver.Mk2Info output *templateInput } var fakenow = time.Date(2017, 1, 2, 3, 4, 5, 6, time.UTC) var templateInputTests = []templateTest{ { input: &mk2driver.Mk2Info{ OutCurrent: 2.0, InCurrent: 2.3, OutVoltage: 230.0, InVoltage: 230.1, BatVoltage: 25, BatCurrent: -10, InFrequency: 50, OutFrequency: 50, ChargeState: 1, LEDs: map[mk2driver.Led]mk2driver.LEDstate{mk2driver.LedMain: mk2driver.LedOn}, Errors: nil, Timestamp: fakenow, }, output: &templateInput{ Error: nil, Date: fakenow.Format(time.RFC1123Z), OutCurrent: "2.00", OutVoltage: "230.00", OutPower: "460.00", InCurrent: "2.30", InVoltage: "230.10", InPower: "529.23", InMinOut: "69.23", BatVoltage: "25.00", BatCurrent: "-10.00", BatPower: "-250.00", InFreq: "50.00", OutFreq: "50.00", BatCharge: "100.00", LedMap: map[string]string{"led_mains": "dot-green"}, }, }, } func TestTemplateInput(t *testing.T) { for i := range templateInputTests { templateInput := buildTemplateInput(templateInputTests[i].input) if !reflect.DeepEqual(templateInput, templateInputTests[i].output) { t.Errorf("buildTemplateInput not producing expected results") fmt.Printf("%v\n%v\n", templateInput, templateInputTests[i].output) } } }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package algorithm import ( "testing" "k8s.io/kubernetes/pkg/api" ) // Some functions used by multiple scheduler tests. type schedulerTester struct { t *testing.T scheduler ScheduleAlgorithm nodeLister NodeLister } // Call if you know exactly where pod should get scheduled. func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) { actual, err := st.scheduler.Schedule(pod, st.nodeLister) if err != nil { st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod) return } if actual != expected { st.t.Errorf("Unexpected scheduling value: %v, expected %v", actual, expected) } } // Call if you can't predict where pod will be scheduled. func (st *schedulerTester) expectSuccess(pod *api.Pod) { _, err := st.scheduler.Schedule(pod, st.nodeLister) if err != nil { st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod) return } } // Call if pod should *not* schedule. func (st *schedulerTester) expectFailure(pod *api.Pod) { _, err := st.scheduler.Schedule(pod, st.nodeLister) if err == nil { st.t.Error("Unexpected non-error") } } fix typo /* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package algorithm import ( "testing" "k8s.io/kubernetes/pkg/api" ) // Some functions used by multiple scheduler tests. type schedulerTester struct { t *testing.T scheduler ScheduleAlgorithm nodeLister NodeLister } // Call if you know exactly where pod should get scheduled. func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) { actual, err := st.scheduler.Schedule(pod, st.nodeLister) if err != nil { st.t.Errorf("Unexpected error %v\nTried to schedule: %#v", err, pod) return } if actual != expected { st.t.Errorf("Unexpected scheduling value: %v, expected %v", actual, expected) } } // Call if you can't predict where pod will be scheduled. func (st *schedulerTester) expectSuccess(pod *api.Pod) { _, err := st.scheduler.Schedule(pod, st.nodeLister) if err != nil { st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod) return } } // Call if pod should *not* schedule. func (st *schedulerTester) expectFailure(pod *api.Pod) { _, err := st.scheduler.Schedule(pod, st.nodeLister) if err == nil { st.t.Error("Unexpected non-error") } }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package algorithm import ( "testing" "k8s.io/kubernetes/pkg/api" ) // Some functions used by multiple scheduler tests. type schedulerTester struct { t *testing.T scheduler ScheduleAlgorithm nodeLister NodeLister } // Call if you know exactly where pod should get scheduled. func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) { actual, err := st.scheduler.Schedule(pod, st.nodeLister) if err != nil { st.t.Errorf("Unexpected error %v\nTried to schedule: %#v", err, pod) return } if actual != expected { st.t.Errorf("Unexpected scheduling value: %v, expected %v", actual, expected) } } // Call if you can't predict where pod will be scheduled. func (st *schedulerTester) expectSuccess(pod *api.Pod) { _, err := st.scheduler.Schedule(pod, st.nodeLister) if err != nil { st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod) return } } // Call if pod should *not* schedule. func (st *schedulerTester) expectFailure(pod *api.Pod) { _, err := st.scheduler.Schedule(pod, st.nodeLister) if err == nil { st.t.Error("Unexpected non-error") } } Error info "scheduler" modify /* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package algorithm import ( "testing" "k8s.io/kubernetes/pkg/api" ) // Some functions used by multiple scheduler tests. type schedulerTester struct { t *testing.T scheduler ScheduleAlgorithm nodeLister NodeLister } // Call if you know exactly where pod should get scheduled. func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) { actual, err := st.scheduler.Schedule(pod, st.nodeLister) if err != nil { st.t.Errorf("Unexpected error %v\nTried to schedule: %#v", err, pod) return } if actual != expected { st.t.Errorf("Unexpected scheduling value: %v, expected %v", actual, expected) } } // Call if you can't predict where pod will be scheduled. func (st *schedulerTester) expectSuccess(pod *api.Pod) { _, err := st.scheduler.Schedule(pod, st.nodeLister) if err != nil { st.t.Errorf("Unexpected error %v\nTried to schedule: %#v", err, pod) return } } // Call if pod should *not* schedule. func (st *schedulerTester) expectFailure(pod *api.Pod) { _, err := st.scheduler.Schedule(pod, st.nodeLister) if err == nil { st.t.Error("Unexpected non-error") } }
package guardian import ( "context" "encoding/json" "errors" "fmt" "log" "net" "net/http" "time" "github.com/scjalliance/resourceful/environment" "github.com/scjalliance/resourceful/guardian/transport" "github.com/scjalliance/resourceful/lease" "github.com/scjalliance/resourceful/lease/leaseutil" "github.com/scjalliance/resourceful/policy" "github.com/scjalliance/resourceful/strategy" ) // ServerConfig is the configuration for a resourceful guardian server. type ServerConfig struct { ListenSpec string PolicyProvider policy.Provider LeaseProvider lease.Provider ShutdownTimeout time.Duration // Time allowed to the HTTP server to perform a graceful shutdown Logger *log.Logger } // Server is a resourceful guardian HTTP server that coordinates locks on // finite resources. type Server struct { ServerConfig } // NewServer creates a new resourceful guardian server that will handle HTTP // requests. func NewServer(cfg ServerConfig) *Server { return &Server{ ServerConfig: cfg, } } // Run will create and run a resourceful guardian server until the provided // context is canceled. func Run(ctx context.Context, cfg ServerConfig) (err error) { s := NewServer(cfg) return s.Run(ctx) } // Run will start the server and let it run until the context is cancelled. // // If the server cannot be started it will return an error immediately. func (s *Server) Run(ctx context.Context) (err error) { s.purge() defer s.purge() printf(s.Logger, "Starting HTTP listener on %s", s.ListenSpec) listener, err := net.Listen("tcp", s.ListenSpec) if err != nil { s.Logger.Printf("Error creating HTTP listener on %s: %v", s.ListenSpec, err) return } mux := http.NewServeMux() mux.Handle("/health", http.HandlerFunc(s.healthHandler)) mux.Handle("/leases", http.HandlerFunc(s.leasesHandler)) mux.Handle("/acquire", http.HandlerFunc(s.acquireHandler)) mux.Handle("/release", http.HandlerFunc(s.releaseHandler)) srv := &http.Server{ ReadTimeout: 60 * time.Second, WriteTimeout: 60 * time.Second, MaxHeaderBytes: 1 << 16, Handler: mux, } result := make(chan error) go func() { result <- srv.Serve(listener) close(result) }() select { case err = <-result: printf(s.Logger, "Stopped HTTP listener on %s due to error: %v", s.ListenSpec, err) return case <-ctx.Done(): } printf(s.Logger, "Stopping HTTP listener on %s due to shutdown signal", s.ListenSpec) shutdownCtx, cancel := context.WithTimeout(context.Background(), s.ShutdownTimeout) defer cancel() srv.Shutdown(shutdownCtx) err = <-result printf(s.Logger, "Stopped HTTP listener on %s", s.ListenSpec) return } // healthHandler will return the condition of the server. func (s *Server) healthHandler(w http.ResponseWriter, r *http.Request) { response := transport.HealthResponse{OK: true} data, err := json.Marshal(response) if err != nil { http.Error(w, "Unable to marshal health response", http.StatusBadRequest) return } fmt.Fprintf(w, string(data)) } // leasesHandler will return the set of leases for a particular resource. func (s *Server) leasesHandler(w http.ResponseWriter, r *http.Request) { req, err := parseRequest(r) if err != nil { err = fmt.Errorf("unable to parse request: %v", err) printf(s.Logger, "Bad leases request: %v\n", err) http.Error(w, err.Error(), http.StatusBadRequest) return } revision, leases, err := s.LeaseProvider.LeaseView(req.Resource) if err != nil { printf(s.Logger, "Lease retrieval failed: %v\n", err) } now := time.Now() tx := lease.NewTx(req.Resource, revision, leases) leaseutil.Refresh(tx, now) response := transport.LeasesResponse{ Request: req, Leases: tx.Leases(), } data, err := json.MarshalIndent(response, "", "\t") if err != nil { http.Error(w, "Unable to marshal health response", http.StatusBadRequest) return } fmt.Fprintf(w, string(data)) } // acquireHandler will attempt to acquire a lease for the specified resource. func (s *Server) acquireHandler(w http.ResponseWriter, r *http.Request) { req, pol, err := s.initRequest(r) if err != nil { printf(s.Logger, "Bad acquire request: %v\n", err) http.Error(w, err.Error(), http.StatusBadRequest) return } prefix := fmt.Sprintf("%s %s", req.Resource, req.Consumer) printf(s.Logger, "%s: Lease acquisition requested\n", prefix) strat := pol.Strategy() limit := pol.Limit() duration := pol.Duration() decay := pol.Decay() refresh := pol.Refresh() var leases lease.Set var ls lease.Lease mode := "Creation" // Only used for logging for attempt := 0; attempt < 5; attempt++ { var revision uint64 revision, leases, err = s.LeaseProvider.LeaseView(req.Resource) if err != nil { printf(s.Logger, "%s: Lease retrieval failed: %v\n", prefix, err) } now := time.Now() ls = lease.Lease{ Resource: req.Resource, Consumer: req.Consumer, Instance: req.Instance, Environment: req.Environment, Started: now, Renewed: now, Strategy: strat, Limit: limit, Duration: duration, Decay: decay, Refresh: refresh, } if ls.Refresh.Active != 0 { if ls.Duration <= ls.Refresh.Active { printf(s.Logger, "%s: The lease policy specified an active refresh interval of %s for a lease with a duration of %s. The refresh interval will be overridden.\n", prefix, ls.Refresh.Active.String(), ls.Duration.String()) ls.Refresh.Active = 0 // Use the default refresh rate instead of nonsense } } if ls.Refresh.Queued != 0 { if ls.Duration <= ls.Refresh.Queued { printf(s.Logger, "%s: The lease policy specified a queued refresh interval of %s for a lease with a duration of %s. The refresh interval will be overridden.\n", prefix, ls.Refresh.Queued.String(), ls.Duration.String()) ls.Refresh.Queued = 0 // Use the default refresh rate instead of nonsense } } tx := lease.NewTx(req.Resource, revision, leases) acc := leaseutil.Refresh(tx, now) consumed := acc.Total(strat) released := acc.Released(req.Consumer) existing, found := tx.Instance(req.Consumer, req.Instance) if found { if existing.Status == lease.Released { // Renewal of a released lease, possibly because of timing skew // Because the lease has expired we treat this as a creation if consumed <= limit { ls.Status = lease.Active } else { ls.Status = lease.Queued } tx.Update(existing.Consumer, existing.Instance, ls) } else { // Renewal of active or queued lease mode = "Renewal" ls.Status = existing.Status ls.Started = existing.Started tx.Update(existing.Consumer, existing.Instance, ls) } } else { if released > 0 && consumed <= limit { // Lease replacement (for an expired or released lease previously // issued to the the same consumer, that's in a decaying state) replaceable := tx.Consumer(req.Consumer).Status(lease.Released) if uint(len(replaceable)) != released { panic("server: acquireHandler: accumulator returned a different count for relased leases than the transaction") } replaced := replaceable[released-1] ls.Status = lease.Active tx.Update(replaced.Consumer, replaced.Instance, ls) } else { // New lease if leaseutil.CanActivate(strat, acc.Active(req.Consumer), consumed, limit) { ls.Status = lease.Active } else { ls.Status = lease.Queued } tx.Create(ls) } } // Don't bother committing empty transactions if tx.Empty() { break } // Attempt to commit the transaction err = s.LeaseProvider.LeaseCommit(tx) if err == nil { leases = tx.Leases() break } printf(s.Logger, "%s: Lease acquisition failed: %v\n", prefix, err) } if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } summary := statsSummary(limit, leases.Stats(), strat) printf(s.Logger, "%s: %s of %s lease succeeded (%s)\n", prefix, mode, ls.Status, summary) response := transport.AcquireResponse{ Request: req, Lease: ls, Leases: leases, } data, err := json.Marshal(response) if err != nil { printf(s.Logger, "%s: Failed to marshal response: %v\n", prefix, err) http.Error(w, "Failed to marshal response", http.StatusBadRequest) return } fmt.Fprintf(w, string(data)) } // releaseHandler will attempt to remove the lease for the given resource and // consumer. func (s *Server) releaseHandler(w http.ResponseWriter, r *http.Request) { req, pol, err := s.initRequest(r) if err != nil { printf(s.Logger, "Bad release request: %v\n", err) http.Error(w, err.Error(), http.StatusBadRequest) return } prefix := fmt.Sprintf("%s %s", req.Resource, req.Consumer) printf(s.Logger, "%s: Release requested\n", prefix) strat := pol.Strategy() limit := pol.Limit() var leases lease.Set var ls lease.Lease var found bool for attempt := 0; attempt < 5; attempt++ { var revision uint64 revision, leases, err = s.LeaseProvider.LeaseView(req.Resource) if err != nil { printf(s.Logger, "%s: Release failed: %v\n", prefix, err) continue } // Prepare a delete transaction now := time.Now() tx := lease.NewTx(req.Resource, revision, leases) leaseutil.Refresh(tx, now) // Update stale values ls, found = tx.Instance(req.Consumer, req.Instance) tx.Release(req.Consumer, req.Instance, now) leaseutil.Refresh(tx, now) // Updates leases after release // Don't bother committing empty transactions if tx.Empty() { break } // Attempt to commit the transaction err = s.LeaseProvider.LeaseCommit(tx) if err == nil { leases = tx.Leases() break } printf(s.Logger, "%s: Release failed: %v\n", prefix, err) } if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } summary := statsSummary(limit, leases.Stats(), strat) if found { if ls.Status == lease.Released { printf(s.Logger, "%s: Release ignored because the lease had already been released (%s)\n", prefix, summary) } else { printf(s.Logger, "%s: Release of %s lease succeeded (%s)\n", prefix, ls.Status, summary) } } else { printf(s.Logger, "%s: Release ignored because the lease could not be found (%s)\n", prefix, summary) } response := transport.ReleaseResponse{ Request: req, Success: true, } data, err := json.Marshal(response) if err != nil { printf(s.Logger, "%s: Failed to marshal response: %v\n", prefix, err) http.Error(w, "Failed to marshal response", http.StatusBadRequest) return } fmt.Fprintf(w, string(data)) } func (s *Server) purge() error { resources, err := s.LeaseProvider.LeaseResources() if err != nil { return err } for _, resource := range resources { for attempt := 0; attempt < 5; attempt++ { var ( leases lease.Set revision uint64 ) revision, leases, err = s.LeaseProvider.LeaseView(resource) if err != nil { printf(s.Logger, "Purge of \"%s\" failed: %v\n", resource, err) continue } // Prepare a purge transaction now := time.Now() tx := lease.NewTx(resource, revision, leases) leaseutil.Refresh(tx, now) if len(tx.Ops()) == 0 { break // Nothing to purge } // Attempt to commit the transaction err = s.LeaseProvider.LeaseCommit(tx) if err == nil { break } printf(s.Logger, "Purge of \"%s\" failed: %v\n", resource, err) } if err != nil { return err } } return nil } func (s *Server) initRequest(r *http.Request) (req transport.Request, policies policy.Set, err error) { req, err = parseRequest(r) if err != nil { err = fmt.Errorf("unable to parse request: %v", err) return } all, err := s.PolicyProvider.Policies() if err != nil { err = fmt.Errorf("unable to retrieve policies: %v", err) return } policies = all.Match(req.Resource, req.Consumer, req.Environment) resource := policies.Resource() if resource != "" { req.Resource = resource } req.Environment["resource.id"] = req.Resource consumer := policies.Consumer() if consumer != "" { req.Consumer = consumer } env := policies.Environment() if len(env) > 0 { req.Environment = environment.Merge(req.Environment, env) } if req.Resource == "" { err = errors.New("resource not specified or determinable") } else if req.Consumer == "" { err = errors.New("consumer not specified or determinable") } return } func parseRequest(r *http.Request) (req transport.Request, err error) { err = r.ParseForm() if err != nil { return } req.Environment = make(environment.Environment) for k, values := range r.Form { if len(values) == 0 { continue } value := values[0] // Ignore multiple values switch k { case "resource": req.Resource = value case "consumer": req.Consumer = value case "instance": req.Instance = value default: req.Environment[k] = value } } return } func statsSummary(limit uint, stats lease.Stats, strat strategy.Strategy) string { consumed := stats.Consumed(strat) active := stats.Active(strat) released := stats.Released(strat) queued := stats.Queued(strat) return fmt.Sprintf("alloc: %d/%d, active: %d, released: %d, queued: %d", consumed, limit, active, released, queued) } func printf(logger *log.Logger, format string, v ...interface{}) { if logger != nil { logger.Printf(format, v...) } } guardian: Made Server.Purge() public package guardian import ( "context" "encoding/json" "errors" "fmt" "log" "net" "net/http" "time" "github.com/scjalliance/resourceful/environment" "github.com/scjalliance/resourceful/guardian/transport" "github.com/scjalliance/resourceful/lease" "github.com/scjalliance/resourceful/lease/leaseutil" "github.com/scjalliance/resourceful/policy" "github.com/scjalliance/resourceful/strategy" ) // ServerConfig is the configuration for a resourceful guardian server. type ServerConfig struct { ListenSpec string PolicyProvider policy.Provider LeaseProvider lease.Provider ShutdownTimeout time.Duration // Time allowed to the HTTP server to perform a graceful shutdown Logger *log.Logger } // Server is a resourceful guardian HTTP server that coordinates locks on // finite resources. type Server struct { ServerConfig } // NewServer creates a new resourceful guardian server that will handle HTTP // requests. func NewServer(cfg ServerConfig) *Server { return &Server{ ServerConfig: cfg, } } // Run will create and run a resourceful guardian server until the provided // context is canceled. func Run(ctx context.Context, cfg ServerConfig) (err error) { s := NewServer(cfg) return s.Run(ctx) } // Run will start the server and let it run until the context is cancelled. // // If the server cannot be started it will return an error immediately. func (s *Server) Run(ctx context.Context) (err error) { s.Purge() defer s.Purge() printf(s.Logger, "Starting HTTP listener on %s", s.ListenSpec) listener, err := net.Listen("tcp", s.ListenSpec) if err != nil { s.Logger.Printf("Error creating HTTP listener on %s: %v", s.ListenSpec, err) return } mux := http.NewServeMux() mux.Handle("/health", http.HandlerFunc(s.healthHandler)) mux.Handle("/leases", http.HandlerFunc(s.leasesHandler)) mux.Handle("/acquire", http.HandlerFunc(s.acquireHandler)) mux.Handle("/release", http.HandlerFunc(s.releaseHandler)) srv := &http.Server{ ReadTimeout: 60 * time.Second, WriteTimeout: 60 * time.Second, MaxHeaderBytes: 1 << 16, Handler: mux, } result := make(chan error) go func() { result <- srv.Serve(listener) close(result) }() select { case err = <-result: printf(s.Logger, "Stopped HTTP listener on %s due to error: %v", s.ListenSpec, err) return case <-ctx.Done(): } printf(s.Logger, "Stopping HTTP listener on %s due to shutdown signal", s.ListenSpec) shutdownCtx, cancel := context.WithTimeout(context.Background(), s.ShutdownTimeout) defer cancel() srv.Shutdown(shutdownCtx) err = <-result printf(s.Logger, "Stopped HTTP listener on %s", s.ListenSpec) return } // healthHandler will return the condition of the server. func (s *Server) healthHandler(w http.ResponseWriter, r *http.Request) { response := transport.HealthResponse{OK: true} data, err := json.Marshal(response) if err != nil { http.Error(w, "Unable to marshal health response", http.StatusBadRequest) return } fmt.Fprintf(w, string(data)) } // leasesHandler will return the set of leases for a particular resource. func (s *Server) leasesHandler(w http.ResponseWriter, r *http.Request) { req, err := parseRequest(r) if err != nil { err = fmt.Errorf("unable to parse request: %v", err) printf(s.Logger, "Bad leases request: %v\n", err) http.Error(w, err.Error(), http.StatusBadRequest) return } revision, leases, err := s.LeaseProvider.LeaseView(req.Resource) if err != nil { printf(s.Logger, "Lease retrieval failed: %v\n", err) } now := time.Now() tx := lease.NewTx(req.Resource, revision, leases) leaseutil.Refresh(tx, now) response := transport.LeasesResponse{ Request: req, Leases: tx.Leases(), } data, err := json.MarshalIndent(response, "", "\t") if err != nil { http.Error(w, "Unable to marshal health response", http.StatusBadRequest) return } fmt.Fprintf(w, string(data)) } // acquireHandler will attempt to acquire a lease for the specified resource. func (s *Server) acquireHandler(w http.ResponseWriter, r *http.Request) { req, pol, err := s.initRequest(r) if err != nil { printf(s.Logger, "Bad acquire request: %v\n", err) http.Error(w, err.Error(), http.StatusBadRequest) return } prefix := fmt.Sprintf("%s %s", req.Resource, req.Consumer) printf(s.Logger, "%s: Lease acquisition requested\n", prefix) strat := pol.Strategy() limit := pol.Limit() duration := pol.Duration() decay := pol.Decay() refresh := pol.Refresh() var leases lease.Set var ls lease.Lease mode := "Creation" // Only used for logging for attempt := 0; attempt < 5; attempt++ { var revision uint64 revision, leases, err = s.LeaseProvider.LeaseView(req.Resource) if err != nil { printf(s.Logger, "%s: Lease retrieval failed: %v\n", prefix, err) } now := time.Now() ls = lease.Lease{ Resource: req.Resource, Consumer: req.Consumer, Instance: req.Instance, Environment: req.Environment, Started: now, Renewed: now, Strategy: strat, Limit: limit, Duration: duration, Decay: decay, Refresh: refresh, } if ls.Refresh.Active != 0 { if ls.Duration <= ls.Refresh.Active { printf(s.Logger, "%s: The lease policy specified an active refresh interval of %s for a lease with a duration of %s. The refresh interval will be overridden.\n", prefix, ls.Refresh.Active.String(), ls.Duration.String()) ls.Refresh.Active = 0 // Use the default refresh rate instead of nonsense } } if ls.Refresh.Queued != 0 { if ls.Duration <= ls.Refresh.Queued { printf(s.Logger, "%s: The lease policy specified a queued refresh interval of %s for a lease with a duration of %s. The refresh interval will be overridden.\n", prefix, ls.Refresh.Queued.String(), ls.Duration.String()) ls.Refresh.Queued = 0 // Use the default refresh rate instead of nonsense } } tx := lease.NewTx(req.Resource, revision, leases) acc := leaseutil.Refresh(tx, now) consumed := acc.Total(strat) released := acc.Released(req.Consumer) existing, found := tx.Instance(req.Consumer, req.Instance) if found { if existing.Status == lease.Released { // Renewal of a released lease, possibly because of timing skew // Because the lease has expired we treat this as a creation if consumed <= limit { ls.Status = lease.Active } else { ls.Status = lease.Queued } tx.Update(existing.Consumer, existing.Instance, ls) } else { // Renewal of active or queued lease mode = "Renewal" ls.Status = existing.Status ls.Started = existing.Started tx.Update(existing.Consumer, existing.Instance, ls) } } else { if released > 0 && consumed <= limit { // Lease replacement (for an expired or released lease previously // issued to the the same consumer, that's in a decaying state) replaceable := tx.Consumer(req.Consumer).Status(lease.Released) if uint(len(replaceable)) != released { panic("server: acquireHandler: accumulator returned a different count for relased leases than the transaction") } replaced := replaceable[released-1] ls.Status = lease.Active tx.Update(replaced.Consumer, replaced.Instance, ls) } else { // New lease if leaseutil.CanActivate(strat, acc.Active(req.Consumer), consumed, limit) { ls.Status = lease.Active } else { ls.Status = lease.Queued } tx.Create(ls) } } // Don't bother committing empty transactions if tx.Empty() { break } // Attempt to commit the transaction err = s.LeaseProvider.LeaseCommit(tx) if err == nil { leases = tx.Leases() break } printf(s.Logger, "%s: Lease acquisition failed: %v\n", prefix, err) } if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } summary := statsSummary(limit, leases.Stats(), strat) printf(s.Logger, "%s: %s of %s lease succeeded (%s)\n", prefix, mode, ls.Status, summary) response := transport.AcquireResponse{ Request: req, Lease: ls, Leases: leases, } data, err := json.Marshal(response) if err != nil { printf(s.Logger, "%s: Failed to marshal response: %v\n", prefix, err) http.Error(w, "Failed to marshal response", http.StatusBadRequest) return } fmt.Fprintf(w, string(data)) } // releaseHandler will attempt to remove the lease for the given resource and // consumer. func (s *Server) releaseHandler(w http.ResponseWriter, r *http.Request) { req, pol, err := s.initRequest(r) if err != nil { printf(s.Logger, "Bad release request: %v\n", err) http.Error(w, err.Error(), http.StatusBadRequest) return } prefix := fmt.Sprintf("%s %s", req.Resource, req.Consumer) printf(s.Logger, "%s: Release requested\n", prefix) strat := pol.Strategy() limit := pol.Limit() var leases lease.Set var ls lease.Lease var found bool for attempt := 0; attempt < 5; attempt++ { var revision uint64 revision, leases, err = s.LeaseProvider.LeaseView(req.Resource) if err != nil { printf(s.Logger, "%s: Release failed: %v\n", prefix, err) continue } // Prepare a delete transaction now := time.Now() tx := lease.NewTx(req.Resource, revision, leases) leaseutil.Refresh(tx, now) // Update stale values ls, found = tx.Instance(req.Consumer, req.Instance) tx.Release(req.Consumer, req.Instance, now) leaseutil.Refresh(tx, now) // Updates leases after release // Don't bother committing empty transactions if tx.Empty() { break } // Attempt to commit the transaction err = s.LeaseProvider.LeaseCommit(tx) if err == nil { leases = tx.Leases() break } printf(s.Logger, "%s: Release failed: %v\n", prefix, err) } if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } summary := statsSummary(limit, leases.Stats(), strat) if found { if ls.Status == lease.Released { printf(s.Logger, "%s: Release ignored because the lease had already been released (%s)\n", prefix, summary) } else { printf(s.Logger, "%s: Release of %s lease succeeded (%s)\n", prefix, ls.Status, summary) } } else { printf(s.Logger, "%s: Release ignored because the lease could not be found (%s)\n", prefix, summary) } response := transport.ReleaseResponse{ Request: req, Success: true, } data, err := json.Marshal(response) if err != nil { printf(s.Logger, "%s: Failed to marshal response: %v\n", prefix, err) http.Error(w, "Failed to marshal response", http.StatusBadRequest) return } fmt.Fprintf(w, string(data)) } // Purge instructs the server to conduct a full survey of all lease data // and delete expired leases. func (s *Server) Purge() error { resources, err := s.LeaseProvider.LeaseResources() if err != nil { return err } for _, resource := range resources { for attempt := 0; attempt < 5; attempt++ { var ( leases lease.Set revision uint64 ) revision, leases, err = s.LeaseProvider.LeaseView(resource) if err != nil { printf(s.Logger, "Purge of \"%s\" failed: %v\n", resource, err) continue } // Prepare a purge transaction now := time.Now() tx := lease.NewTx(resource, revision, leases) leaseutil.Refresh(tx, now) if len(tx.Ops()) == 0 { break // Nothing to purge } // Attempt to commit the transaction err = s.LeaseProvider.LeaseCommit(tx) if err == nil { break } printf(s.Logger, "Purge of \"%s\" failed: %v\n", resource, err) } if err != nil { return err } } return nil } func (s *Server) initRequest(r *http.Request) (req transport.Request, policies policy.Set, err error) { req, err = parseRequest(r) if err != nil { err = fmt.Errorf("unable to parse request: %v", err) return } all, err := s.PolicyProvider.Policies() if err != nil { err = fmt.Errorf("unable to retrieve policies: %v", err) return } policies = all.Match(req.Resource, req.Consumer, req.Environment) resource := policies.Resource() if resource != "" { req.Resource = resource } req.Environment["resource.id"] = req.Resource consumer := policies.Consumer() if consumer != "" { req.Consumer = consumer } env := policies.Environment() if len(env) > 0 { req.Environment = environment.Merge(req.Environment, env) } if req.Resource == "" { err = errors.New("resource not specified or determinable") } else if req.Consumer == "" { err = errors.New("consumer not specified or determinable") } return } func parseRequest(r *http.Request) (req transport.Request, err error) { err = r.ParseForm() if err != nil { return } req.Environment = make(environment.Environment) for k, values := range r.Form { if len(values) == 0 { continue } value := values[0] // Ignore multiple values switch k { case "resource": req.Resource = value case "consumer": req.Consumer = value case "instance": req.Instance = value default: req.Environment[k] = value } } return } func statsSummary(limit uint, stats lease.Stats, strat strategy.Strategy) string { consumed := stats.Consumed(strat) active := stats.Active(strat) released := stats.Released(strat) queued := stats.Queued(strat) return fmt.Sprintf("alloc: %d/%d, active: %d, released: %d, queued: %d", consumed, limit, active, released, queued) } func printf(logger *log.Logger, format string, v ...interface{}) { if logger != nil { logger.Printf(format, v...) } }
// Simple use of the tuntap package that prints packets received by the interface. package main import ( "fmt" "os" "code.google.com/p/tuntap" ) func main() { if len(os.Args) != 3 { fmt.Println("syntax:", os.Args[0], "tun|tap", "<device name>") return } var typ tuntap.DevKind switch os.Args[1] { case "tun": typ = tuntap.DevTun case "tap": typ = tuntap.DevTap default: fmt.Println("Unknown device type", os.Args[1]) return } tun, err := tuntap.Open(os.Args[2], typ) if err != nil { fmt.Println("Error opening tun/tap device:", err) return } fmt.Println("Listening on", tun.Name()) for { pkt, err := tun.ReadPacket() if err != nil { fmt.Println("Read error:", err) } else { if pkt.Truncated { fmt.Printf("!") } else { fmt.Printf(" ") } fmt.Printf("%x %x\n", pkt.Protocol, pkt.Packet) } } } Kill more code.google.com/p/ references, and update example code to new API (It's been broken for a while) // Simple use of the tuntap package that prints packets received by the interface. package main import ( "fmt" "os" "github.com/mistsys/tuntap" ) func main() { if len(os.Args) != 3 { fmt.Println("syntax:", os.Args[0], "tun|tap", "<device name>") return } var typ tuntap.DevKind switch os.Args[1] { case "tun": typ = tuntap.DevTun case "tap": typ = tuntap.DevTap default: fmt.Println("Unknown device type", os.Args[1]) return } tun, err := tuntap.Open(os.Args[2], typ) if err != nil { fmt.Println("Error opening tun/tap device:", err) return } fmt.Println("Listening on", tun.Name()) for { buf := make([]byte, 1536) pkt, err := tun.ReadPacket(buf) if err != nil { fmt.Println("Read error:", err) } else { if pkt.Truncated { fmt.Printf("!") } else { fmt.Printf(" ") } fmt.Printf("%x %x\n", pkt.Protocol, pkt.Body) } } }
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "errors" "fmt" "path/filepath" "time" "github.com/keybase/client/go/logger" "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/kbfs/kbfsblock" "github.com/keybase/kbfs/kbfscodec" "github.com/keybase/kbfs/tlf" "golang.org/x/net/context" "golang.org/x/sync/errgroup" ) type overallBlockState int const ( // cleanState: no outstanding local writes. cleanState overallBlockState = iota // dirtyState: there are outstanding local writes that haven't yet been // synced. dirtyState ) // blockReqType indicates whether an operation makes block // modifications or not type blockReqType int const ( // A block read request. blockRead blockReqType = iota // A block write request. blockWrite // A block read request that is happening from a different // goroutine than the blockLock rlock holder, using the same lState. blockReadParallel // We are looking up a block for the purposes of creating a new // node in the node cache for it; avoid any unlocks as part of the // lookup process. blockLookup ) type mdToCleanIfUnused struct { md ReadOnlyRootMetadata bps *blockPutState } type syncInfo struct { oldInfo BlockInfo op *syncOp unrefs []BlockInfo bps *blockPutState refBytes uint64 unrefBytes uint64 toCleanIfUnused []mdToCleanIfUnused } func (si *syncInfo) DeepCopy(codec kbfscodec.Codec) (*syncInfo, error) { newSi := &syncInfo{ oldInfo: si.oldInfo, refBytes: si.refBytes, unrefBytes: si.unrefBytes, } newSi.unrefs = make([]BlockInfo, len(si.unrefs)) copy(newSi.unrefs, si.unrefs) if si.bps != nil { newSi.bps = si.bps.DeepCopy() } if si.op != nil { err := kbfscodec.Update(codec, &newSi.op, si.op) if err != nil { return nil, err } } newSi.toCleanIfUnused = make([]mdToCleanIfUnused, len(si.toCleanIfUnused)) for i, toClean := range si.toCleanIfUnused { // It might be overkill to deep-copy these MDs and bpses, // which are probably immutable, but for now let's do the safe // thing. copyMd, err := toClean.md.deepCopy(codec) if err != nil { return nil, err } newSi.toCleanIfUnused[i].md = copyMd.ReadOnly() newSi.toCleanIfUnused[i].bps = toClean.bps.DeepCopy() } return newSi, nil } func (si *syncInfo) removeReplacedBlock(ctx context.Context, log logger.Logger, ptr BlockPointer) { for i, ref := range si.op.RefBlocks { if ref == ptr { log.CDebugf(ctx, "Replacing old ref %v", ptr) si.op.RefBlocks = append(si.op.RefBlocks[:i], si.op.RefBlocks[i+1:]...) for j, unref := range si.unrefs { if unref.BlockPointer == ptr { // Don't completely remove the unref, // since it contains size info that we // need to incorporate into the MD // usage calculations. si.unrefs[j].BlockPointer = zeroPtr } } break } } } func (si *syncInfo) mergeUnrefCache(md *RootMetadata) { for _, info := range si.unrefs { // it's ok if we push the same ptr.ID/RefNonce multiple times, // because the subsequent ones should have a QuotaSize of 0. md.AddUnrefBlock(info) } } type deCacheEntry struct { // dirEntry is the dirty directory entry corresponding to the // BlockPointer that maps to this struct. dirEntry DirEntry // adds is a map of the pointers for new entry names that have // been added to the DirBlock for the BlockPointer that maps to // this struct. adds map[string]BlockPointer // dels is a set of the names that have been removed from the // DirBlock for the BlockPointer that maps to this struct. dels map[string]bool } // folderBlockOps contains all the fields that must be synchronized by // blockLock. It will eventually also contain all the methods that // must be synchronized by blockLock, so that folderBranchOps will // have no knowledge of blockLock. // // -- And now, a primer on tracking dirty bytes -- // // The DirtyBlockCache tracks the number of bytes that are dirtied // system-wide, as the number of bytes that haven't yet been synced // ("unsynced"), and a number of bytes that haven't yet been resolved // yet because the overall file Sync hasn't finished yet ("total"). // This data helps us decide when we need to block incoming Writes, in // order to keep memory usage from exploding. // // It's the responsibility of folderBlockOps (and its helper struct // dirtyFile) to update these totals in DirtyBlockCache for the // individual files within this TLF. This is complicated by a few things: // * New writes to a file are "deferred" while a Sync is happening, and // are replayed after the Sync finishes. // * Syncs can be canceled or error out halfway through syncing the blocks, // leaving the file in a dirty state until the next Sync. // * Syncs can fail with a /recoverable/ error, in which case they get // retried automatically by folderBranchOps. In that case, the retried // Sync also sucks in any outstanding deferred writes. // // With all that in mind, here is the rough breakdown of how this // bytes-tracking is implemented: // * On a Write/Truncate to a block, folderBranchOps counts all the // newly-dirtied bytes in a file as "unsynced". That is, if the block was // already in the dirty cache (and not already being synced), only // extensions to the block count as "unsynced" bytes. // * When a Sync starts, dirtyFile remembers the total of bytes being synced, // and the size of each block being synced. // * When each block put finishes successfully, dirtyFile subtracts the size // of that block from "unsynced". // * When a Sync finishes successfully, the total sum of bytes in that sync // are subtracted from the "total" dirty bytes outstanding. // * If a Sync fails, but some blocks were put successfully, those blocks // are "re-dirtied", which means they count as unsynced bytes again. // dirtyFile handles this. // * When a Write/Truncate is deferred due to an ongoing Sync, its bytes // still count towards the "unsynced" total. In fact, this essentially // creates a new copy of those blocks, and the whole size of that block // (not just the newly-dirtied bytes) count for the total. However, // when the write gets replayed, folderBlockOps first subtracts those bytes // from the system-wide numbers, since they are about to be replayed. // * When a Sync is retried after a recoverable failure, dirtyFile adds // the newly-dirtied deferred bytes to the system-wide numbers, since they // are now being assimilated into this Sync. // * dirtyFile also exposes a concept of "orphaned" blocks. These are child // blocks being synced that are now referenced via a new, permanent block // ID from the parent indirect block. This matters for when hard failures // occur during a Sync -- the blocks will no longer be accessible under // their previous old pointers, and so dirtyFile needs to know their old // bytes can be cleaned up now. type folderBlockOps struct { config Config log logger.Logger folderBranch FolderBranch observers *observerList // forceSyncChan can be sent on to trigger an immediate // Sync(). It is a blocking channel. forceSyncChan chan<- struct{} // protects access to blocks in this folder and all fields // below. blockLock blockLock // Which files are currently dirty and have dirty blocks that are either // currently syncing, or waiting to be sync'd. dirtyFiles map[BlockPointer]*dirtyFile // For writes and truncates, track the unsynced to-be-unref'd // block infos, per-path. unrefCache map[BlockRef]*syncInfo // For writes and truncates, track the modified (but not yet // committed) directory entries. Maps the entry BlockRef to a // modified entry. deCache map[BlockRef]deCacheEntry // Writes and truncates for blocks that were being sync'd, and // need to be replayed after the sync finishes on top of the new // versions of the blocks. deferredWrites []func(context.Context, *lockState, KeyMetadata, path) error // Blocks that need to be deleted from the dirty cache before any // deferred writes are replayed. deferredDirtyDeletes []BlockPointer deferredWaitBytes int64 // set to true if this write or truncate should be deferred doDeferWrite bool // nodeCache itself is goroutine-safe, but write/truncate must // call PathFromNode() only under blockLock (see nodeCache // comments in folder_branch_ops.go). nodeCache NodeCache } // Only exported methods of folderBlockOps should be used outside of this // file. // // Although, temporarily, folderBranchOps is allowed to reach in and // manipulate folderBlockOps fields and methods directly. func (fbo *folderBlockOps) id() tlf.ID { return fbo.folderBranch.Tlf } func (fbo *folderBlockOps) branch() BranchName { return fbo.folderBranch.Branch } // GetState returns the overall block state of this TLF. func (fbo *folderBlockOps) GetState(lState *lockState) overallBlockState { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) if len(fbo.deCache) == 0 { return cleanState } return dirtyState } // getCleanEncodedBlockHelperLocked retrieves the encoded size of the // clean block pointed to by ptr, which must be valid, either from the // cache or from the server. If `rtype` is `blockReadParallel`, it's // assumed that some coordinating goroutine is holding the correct // locks, and in that case `lState` must be `nil`. func (fbo *folderBlockOps) getCleanEncodedBlockSizeLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName, rtype blockReqType) (uint32, error) { if rtype != blockReadParallel { if rtype == blockWrite { panic("Cannot get the size of a block for writing") } fbo.blockLock.AssertAnyLocked(lState) } else if lState != nil { panic("Non-nil lState passed to getCleanEncodedBlockSizeLocked " + "with blockReadParallel") } if !ptr.IsValid() { return 0, InvalidBlockRefError{ptr.Ref()} } if block, err := fbo.config.BlockCache().Get(ptr); err == nil { return block.GetEncodedSize(), nil } if err := checkDataVersion(fbo.config, path{}, ptr); err != nil { return 0, err } // Unlock the blockLock while we wait for the network, only if // it's locked for reading by a single goroutine. If it's locked // for writing, that indicates we are performing an atomic write // operation, and we need to ensure that nothing else comes in and // modifies the blocks, so don't unlock. // // If there may be multiple goroutines fetching blocks under the // same lState, we can't safely unlock since some of the other // goroutines may be operating on the data assuming they have the // lock. bops := fbo.config.BlockOps() var size uint32 var err error if rtype != blockReadParallel && rtype != blockLookup { fbo.blockLock.DoRUnlockedIfPossible(lState, func(*lockState) { size, err = bops.GetEncodedSize(ctx, kmd, ptr) }) } else { size, err = bops.GetEncodedSize(ctx, kmd, ptr) } if err != nil { return 0, err } return size, nil } // getBlockHelperLocked retrieves the block pointed to by ptr, which // must be valid, either from the cache or from the server. If // notifyPath is valid and the block isn't cached, trigger a read // notification. If `rtype` is `blockReadParallel`, it's assumed that // some coordinating goroutine is holding the correct locks, and // in that case `lState` must be `nil`. // // This must be called only by get{File,Dir}BlockHelperLocked(). func (fbo *folderBlockOps) getBlockHelperLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName, newBlock makeNewBlock, lifetime BlockCacheLifetime, notifyPath path, rtype blockReqType) (Block, error) { if rtype != blockReadParallel { fbo.blockLock.AssertAnyLocked(lState) } else if lState != nil { panic("Non-nil lState passed to getBlockHelperLocked " + "with blockReadParallel") } if !ptr.IsValid() { return nil, InvalidBlockRefError{ptr.Ref()} } if block, err := fbo.config.DirtyBlockCache().Get( fbo.id(), ptr, branch); err == nil { return block, nil } if block, hasPrefetched, lifetime, err := fbo.config.BlockCache().GetWithPrefetch(ptr); err == nil { // If the block was cached in the past, we need to handle it as if it's // an on-demand request so that its downstream prefetches are triggered // correctly according to the new on-demand fetch priority. fbo.config.BlockOps().BlockRetriever().CacheAndPrefetch(ctx, ptr, block, kmd, defaultOnDemandRequestPriority, lifetime, hasPrefetched) return block, nil } if err := checkDataVersion(fbo.config, notifyPath, ptr); err != nil { return nil, err } if notifyPath.isValidForNotification() { fbo.config.Reporter().Notify(ctx, readNotification(notifyPath, false)) defer fbo.config.Reporter().Notify(ctx, readNotification(notifyPath, true)) } // Unlock the blockLock while we wait for the network, only if // it's locked for reading by a single goroutine. If it's locked // for writing, that indicates we are performing an atomic write // operation, and we need to ensure that nothing else comes in and // modifies the blocks, so don't unlock. // // If there may be multiple goroutines fetching blocks under the // same lState, we can't safely unlock since some of the other // goroutines may be operating on the data assuming they have the // lock. // fetch the block, and add to cache block := newBlock() bops := fbo.config.BlockOps() var err error if rtype != blockReadParallel && rtype != blockLookup { fbo.blockLock.DoRUnlockedIfPossible(lState, func(*lockState) { err = bops.Get(ctx, kmd, ptr, block, lifetime) }) } else { err = bops.Get(ctx, kmd, ptr, block, lifetime) } if err != nil { return nil, err } return block, nil } // getFileBlockHelperLocked retrieves the block pointed to by ptr, // which must be valid, either from an internal cache, the block // cache, or from the server. An error is returned if the retrieved // block is not a file block. If `rtype` is `blockReadParallel`, it's // assumed that some coordinating goroutine is holding the correct // locks, and in that case `lState` must be `nil`. // // This must be called only by GetFileBlockForReading(), // getFileBlockLocked(), and getFileLocked(). // // p is used only when reporting errors and sending read // notifications, and can be empty. func (fbo *folderBlockOps) getFileBlockHelperLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName, p path, rtype blockReqType) ( *FileBlock, error) { if rtype != blockReadParallel { fbo.blockLock.AssertAnyLocked(lState) } else if lState != nil { panic("Non-nil lState passed to getFileBlockHelperLocked " + "with blockReadParallel") } block, err := fbo.getBlockHelperLocked( ctx, lState, kmd, ptr, branch, NewFileBlock, TransientEntry, p, rtype) if err != nil { return nil, err } fblock, ok := block.(*FileBlock) if !ok { return nil, NotFileBlockError{ptr, branch, p} } return fblock, nil } // GetBlockForReading retrieves the block pointed to by ptr, which // must be valid, either from the cache or from the server. The // returned block may have a generic type (not DirBlock or FileBlock). // // This should be called for "internal" operations, like conflict // resolution and state checking, which don't know what kind of block // the pointer refers to. The block will not be cached, if it wasn't // in the cache already. func (fbo *folderBlockOps) GetBlockForReading(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName) ( Block, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.getBlockHelperLocked(ctx, lState, kmd, ptr, branch, NewCommonBlock, NoCacheEntry, path{}, blockRead) } // GetCleanEncodedBlocksSizeSum retrieves the sum of the encoded sizes // of the blocks pointed to by ptrs, all of which must be valid, // either from the cache or from the server. // // The caller can specify a set of pointers using // `ignoreRecoverableForRemovalErrors` for which "recoverable" fetch // errors are tolerated. In that case, the returned sum will not // include the size for any pointers in the // `ignoreRecoverableForRemovalErrors` set that hit such an error. // // This should be called for "internal" operations, like conflict // resolution and state checking, which don't know what kind of block // the pointers refer to. Any downloaded blocks will not be cached, // if they weren't in the cache already. func (fbo *folderBlockOps) GetCleanEncodedBlocksSizeSum(ctx context.Context, lState *lockState, kmd KeyMetadata, ptrs []BlockPointer, ignoreRecoverableForRemovalErrors map[BlockPointer]bool, branch BranchName) (uint64, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) sumCh := make(chan uint32, len(ptrs)) eg, groupCtx := errgroup.WithContext(ctx) for _, ptr := range ptrs { ptr := ptr // capture range variable eg.Go(func() error { size, err := fbo.getCleanEncodedBlockSizeLocked(groupCtx, nil, kmd, ptr, branch, blockReadParallel) // TODO: we might be able to recover the size of the // top-most block of a removed file using the merged // directory entry, the same way we do in // `folderBranchOps.unrefEntry`. if isRecoverableBlockErrorForRemoval(err) && ignoreRecoverableForRemovalErrors[ptr] { fbo.log.CDebugf(groupCtx, "Hit an ignorable, recoverable "+ "error for block %v: %v", ptr, err) return nil } if err != nil { return err } sumCh <- size return nil }) } if err := eg.Wait(); err != nil { return 0, err } close(sumCh) var sum uint64 for size := range sumCh { sum += uint64(size) } return sum, nil } // getDirBlockHelperLocked retrieves the block pointed to by ptr, which // must be valid, either from the cache or from the server. An error // is returned if the retrieved block is not a dir block. // // This must be called only by GetDirBlockForReading() and // getDirLocked(). // // p is used only when reporting errors, and can be empty. func (fbo *folderBlockOps) getDirBlockHelperLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName, p path, rtype blockReqType) (*DirBlock, error) { if rtype != blockReadParallel { fbo.blockLock.AssertAnyLocked(lState) } // Pass in an empty notify path because notifications should only // trigger for file reads. block, err := fbo.getBlockHelperLocked( ctx, lState, kmd, ptr, branch, NewDirBlock, TransientEntry, path{}, rtype) if err != nil { return nil, err } dblock, ok := block.(*DirBlock) if !ok { return nil, NotDirBlockError{ptr, branch, p} } return dblock, nil } // GetFileBlockForReading retrieves the block pointed to by ptr, which // must be valid, either from the cache or from the server. An error // is returned if the retrieved block is not a file block. // // This should be called for "internal" operations, like conflict // resolution and state checking. "Real" operations should use // getFileBlockLocked() and getFileLocked() instead. // // p is used only when reporting errors, and can be empty. func (fbo *folderBlockOps) GetFileBlockForReading(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName, p path) (*FileBlock, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.getFileBlockHelperLocked( ctx, lState, kmd, ptr, branch, p, blockRead) } // GetDirBlockForReading retrieves the block pointed to by ptr, which // must be valid, either from the cache or from the server. An error // is returned if the retrieved block is not a dir block. // // This should be called for "internal" operations, like conflict // resolution and state checking. "Real" operations should use // getDirLocked() instead. // // p is used only when reporting errors, and can be empty. func (fbo *folderBlockOps) GetDirBlockForReading(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName, p path) (*DirBlock, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.getDirBlockHelperLocked( ctx, lState, kmd, ptr, branch, p, blockRead) } // getFileBlockLocked retrieves the block pointed to by ptr, which // must be valid, either from the cache or from the server. An error // is returned if the retrieved block is not a file block. // // The given path must be valid, and the given pointer must be its // tail pointer or an indirect pointer from it. A read notification is // triggered for the given path only if the block isn't in the cache. // // This shouldn't be called for "internal" operations, like conflict // resolution and state checking -- use GetFileBlockForReading() for // those instead. // // When rtype == blockWrite and the cached version of the block is // currently clean, or the block is currently being synced, this // method makes a copy of the file block and returns it. If this // method might be called again for the same block within a single // operation, it is the caller's responsibility to write that block // back to the cache as dirty. // // Note that blockLock must be locked exactly when rtype == // blockWrite, and must be r-locked when rtype == blockRead. (This // differs from getDirLocked.) This is because a write operation // (like write, truncate and sync which lock blockLock) fetching a // file block will almost always need to modify that block, and so // will pass in blockWrite. If rtype == blockReadParallel, it's // assumed that some coordinating goroutine is holding the correct // locks, and in that case `lState` must be `nil`. // // file is used only when reporting errors and sending read // notifications, and can be empty except that file.Branch must be set // correctly. // // This method also returns whether the block was already dirty. func (fbo *folderBlockOps) getFileBlockLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, file path, rtype blockReqType) ( fblock *FileBlock, wasDirty bool, err error) { switch rtype { case blockRead: fbo.blockLock.AssertRLocked(lState) case blockWrite: fbo.blockLock.AssertLocked(lState) case blockReadParallel: // This goroutine might not be the official lock holder, so // don't make any assertions. if lState != nil { panic("Non-nil lState passed to getFileBlockLocked " + "with blockReadParallel") } case blockLookup: panic("blockLookup should only be used for directory blocks") default: panic(fmt.Sprintf("Unknown block req type: %d", rtype)) } fblock, err = fbo.getFileBlockHelperLocked( ctx, lState, kmd, ptr, file.Branch, file, rtype) if err != nil { return nil, false, err } wasDirty = fbo.config.DirtyBlockCache().IsDirty(fbo.id(), ptr, file.Branch) if rtype == blockWrite { // Copy the block if it's for writing, and either the // block is not yet dirty or the block is currently // being sync'd and needs a copy even though it's // already dirty. df := fbo.dirtyFiles[file.tailPointer()] if !wasDirty || (df != nil && df.blockNeedsCopy(ptr)) { fblock = fblock.DeepCopy() } } return fblock, wasDirty, nil } // getFileLocked is getFileBlockLocked called with file.tailPointer(). func (fbo *folderBlockOps) getFileLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, file path, rtype blockReqType) (*FileBlock, error) { // Callers should have already done this check, but it doesn't // hurt to do it again. if !file.isValid() { return nil, InvalidPathError{file} } fblock, _, err := fbo.getFileBlockLocked( ctx, lState, kmd, file.tailPointer(), file, rtype) return fblock, err } // GetIndirectFileBlockInfos returns a list of BlockInfos for all // indirect blocks of the given file. If the returned error is a // recoverable one (as determined by // isRecoverableBlockErrorForRemoval), the returned list may still be // non-empty, and holds all the BlockInfos for all found indirect // blocks. func (fbo *folderBlockOps) GetIndirectFileBlockInfos(ctx context.Context, lState *lockState, kmd KeyMetadata, file path) ([]BlockInfo, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) var uid keybase1.UID // Data reads don't depend on the uid. fd := fbo.newFileData(lState, file, uid, kmd) return fd.getIndirectFileBlockInfos(ctx) } // GetIndirectFileBlockInfosWithTopBlock returns a list of BlockInfos // for all indirect blocks of the given file, starting from the given // top-most block. If the returned error is a recoverable one (as // determined by isRecoverableBlockErrorForRemoval), the returned list // may still be non-empty, and holds all the BlockInfos for all found // indirect blocks. (This will be relevant when we handle multiple // levels of indirection.) func (fbo *folderBlockOps) GetIndirectFileBlockInfosWithTopBlock( ctx context.Context, lState *lockState, kmd KeyMetadata, file path, topBlock *FileBlock) ( []BlockInfo, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) var uid keybase1.UID // Data reads don't depend on the uid. fd := fbo.newFileData(lState, file, uid, kmd) return fd.getIndirectFileBlockInfosWithTopBlock(ctx, topBlock) } // DeepCopyFile makes a complete copy of the given file, deduping leaf // blocks and making new random BlockPointers for all indirect blocks. // It returns the new top pointer of the copy, and all the new child // pointers in the copy. It takes a custom DirtyBlockCache, which // directs where the resulting block copies are stored. func (fbo *folderBlockOps) DeepCopyFile( ctx context.Context, lState *lockState, kmd KeyMetadata, file path, dirtyBcache DirtyBlockCache, dataVer DataVer) ( newTopPtr BlockPointer, allChildPtrs []BlockPointer, err error) { // Deep copying doesn't alter any data in use, it only makes copy, // so only a read lock is needed. fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return BlockPointer{}, nil, err } fd := fbo.newFileDataWithCache( lState, file, session.UID, kmd, dirtyBcache) return fd.deepCopy(ctx, dataVer) } func (fbo *folderBlockOps) UndupChildrenInCopy(ctx context.Context, lState *lockState, kmd KeyMetadata, file path, bps *blockPutState, dirtyBcache DirtyBlockCache, topBlock *FileBlock) ([]BlockInfo, error) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return nil, err } fd := fbo.newFileDataWithCache( lState, file, session.UID, kmd, dirtyBcache) return fd.undupChildrenInCopy(ctx, fbo.config.BlockCache(), fbo.config.BlockOps(), bps, topBlock) } func (fbo *folderBlockOps) ReadyNonLeafBlocksInCopy(ctx context.Context, lState *lockState, kmd KeyMetadata, file path, bps *blockPutState, dirtyBcache DirtyBlockCache, topBlock *FileBlock) ([]BlockInfo, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return nil, err } fd := fbo.newFileDataWithCache( lState, file, session.UID, kmd, dirtyBcache) return fd.readyNonLeafBlocksInCopy(ctx, fbo.config.BlockCache(), fbo.config.BlockOps(), bps, topBlock) } // getDirLocked retrieves the block pointed to by the tail pointer of // the given path, which must be valid, either from the cache or from // the server. An error is returned if the retrieved block is not a // dir block. // // This shouldn't be called for "internal" operations, like conflict // resolution and state checking -- use GetDirBlockForReading() for // those instead. // // When rtype == blockWrite and the cached version of the block is // currently clean, this method makes a copy of the directory block // and returns it. If this method might be called again for the same // block within a single operation, it is the caller's responsibility // to write that block back to the cache as dirty. // // Note that blockLock must be either r-locked or locked, but // independently of rtype. (This differs from getFileLocked and // getFileBlockLocked.) File write operations (which lock blockLock) // don't need a copy of parent dir blocks, and non-file write // operations do need to copy dir blocks for modifications. func (fbo *folderBlockOps) getDirLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, dir path, rtype blockReqType) ( *DirBlock, error) { fbo.blockLock.AssertAnyLocked(lState) // Callers should have already done this check, but it doesn't // hurt to do it again. if !dir.isValid() { return nil, InvalidPathError{dir} } // Get the block for the last element in the path. dblock, err := fbo.getDirBlockHelperLocked( ctx, lState, kmd, dir.tailPointer(), dir.Branch, dir, rtype) if err != nil { return nil, err } if rtype == blockWrite && !fbo.config.DirtyBlockCache().IsDirty( fbo.id(), dir.tailPointer(), dir.Branch) { // Copy the block if it's for writing and the block is // not yet dirty. dblock = dblock.DeepCopy() } return dblock, nil } // GetDir retrieves the block pointed to by the tail pointer of the // given path, which must be valid, either from the cache or from the // server. An error is returned if the retrieved block is not a dir // block. // // This shouldn't be called for "internal" operations, like conflict // resolution and state checking -- use GetDirBlockForReading() for // those instead. // // When rtype == blockWrite and the cached version of the block is // currently clean, this method makes a copy of the directory block // and returns it. If this method might be called again for the same // block within a single operation, it is the caller's responsibility // to write that block back to the cache as dirty. func (fbo *folderBlockOps) GetDir( ctx context.Context, lState *lockState, kmd KeyMetadata, dir path, rtype blockReqType) (*DirBlock, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.getDirLocked(ctx, lState, kmd, dir, rtype) } func (fbo *folderBlockOps) addDirEntryInCacheLocked(lState *lockState, dir path, newName string, newDe DirEntry) { fbo.blockLock.AssertLocked(lState) cacheEntry := fbo.deCache[dir.tailPointer().Ref()] if cacheEntry.adds == nil { cacheEntry.adds = make(map[string]BlockPointer) } cacheEntry.adds[newName] = newDe.BlockPointer // In case it was removed in the cache but not flushed yet. delete(cacheEntry.dels, newName) fbo.deCache[dir.tailPointer().Ref()] = cacheEntry } // AddDirEntryInCache adds a brand new entry to the given directory in // the cache, which will get applied to the dirty block on subsequent // fetches for the directory. The new entry must not yet have a cache // entry itself. func (fbo *folderBlockOps) AddDirEntryInCache(lState *lockState, dir path, newName string, newDe DirEntry) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) fbo.addDirEntryInCacheLocked(lState, dir, newName, newDe) // Add target dir entry as well. if newDe.IsInitialized() { cacheEntry, ok := fbo.deCache[newDe.Ref()] if ok { panic("New entry shouldn't already exist") } cacheEntry.dirEntry = newDe fbo.deCache[newDe.Ref()] = cacheEntry } } func (fbo *folderBlockOps) removeDirEntryInCacheLocked(lState *lockState, dir path, oldName string) { fbo.blockLock.AssertLocked(lState) cacheEntry := fbo.deCache[dir.tailPointer().Ref()] if cacheEntry.dels == nil { cacheEntry.dels = make(map[string]bool) } cacheEntry.dels[oldName] = true // In case it was added in the cache but not flushed yet. delete(cacheEntry.adds, oldName) fbo.deCache[dir.tailPointer().Ref()] = cacheEntry } // RemoveDirEntryInCache removes an entry from the given directory in // the cache, which will get applied to the dirty block on subsequent // fetches for the directory. func (fbo *folderBlockOps) RemoveDirEntryInCache(lState *lockState, dir path, oldName string) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) fbo.removeDirEntryInCacheLocked(lState, dir, oldName) } // RenameDirEntryInCache updates the entries of both the old and new // parent dirs for the given target dir atomically (with respect to // blockLock). It also updates the cache entry for the target, which // would have its Ctime changed. The updates will get applied to the // dirty blocks on subsequent fetches. // // The returned bool indicates whether or not the caller should clean // up the target cache entry when the effects of the operation are no // longer needed. func (fbo *folderBlockOps) RenameDirEntryInCache(lState *lockState, oldParent path, oldName string, newParent path, newName string, newDe DirEntry) (deleteTargetDirEntry bool) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) fbo.addDirEntryInCacheLocked(lState, newParent, newName, newDe) fbo.removeDirEntryInCacheLocked(lState, oldParent, oldName) // If there's already an entry for the target, only update the // Ctime on a rename. cacheEntry, ok := fbo.deCache[newDe.Ref()] if ok && cacheEntry.dirEntry.IsInitialized() { cacheEntry.dirEntry.Ctime = newDe.Ctime } else { cacheEntry.dirEntry = newDe deleteTargetDirEntry = true } fbo.deCache[newDe.Ref()] = cacheEntry return deleteTargetDirEntry } func (fbo *folderBlockOps) setCachedAttrLocked( lState *lockState, ref BlockRef, attr attrChange, realEntry *DirEntry, doCreate bool) { fbo.blockLock.AssertLocked(lState) fileEntry, ok := fbo.deCache[ref] if !ok { if !doCreate { return } fileEntry.dirEntry = *realEntry } switch attr { case exAttr: fileEntry.dirEntry.Type = realEntry.Type case mtimeAttr: fileEntry.dirEntry.Mtime = realEntry.Mtime } fileEntry.dirEntry.Ctime = realEntry.Ctime fbo.deCache[ref] = fileEntry } // SetAttrInDirEntryInCache removes an entry from the given directory // in the cache, which will get applied to the dirty block on // subsequent fetches for the directory. // // The returned bool indicates whether or not the caller should clean // up the cache entry when the effects of the operation are no longer // needed. func (fbo *folderBlockOps) SetAttrInDirEntryInCache(lState *lockState, newDe DirEntry, attr attrChange) (deleteTargetDirEntry bool) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) // If there's already an entry for the target, only update the // Ctime on a rename. _, ok := fbo.deCache[newDe.Ref()] if !ok { deleteTargetDirEntry = true } fbo.setCachedAttrLocked( lState, newDe.Ref(), attr, &newDe, true /* create the deCache entry if it doesn't exist yet */) return deleteTargetDirEntry } // ClearCachedAddsAndRemoves clears out any cached directory entry // adds and removes for the given dir. func (fbo *folderBlockOps) ClearCachedAddsAndRemoves( lState *lockState, dir path) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) cacheEntry, ok := fbo.deCache[dir.tailPointer().Ref()] if !ok { return } // If there's no dirEntry, we can just delete the whole thing. if !cacheEntry.dirEntry.IsInitialized() { delete(fbo.deCache, dir.tailPointer().Ref()) return } // Otherwise just nil out the adds and dels. cacheEntry.adds = nil cacheEntry.dels = nil fbo.deCache[dir.tailPointer().Ref()] = cacheEntry } // updateWithDirtyEntriesLocked checks if the given DirBlock has any // entries that are in deCache (i.e., entries pointing to dirty // files). If so, it makes a copy with all such entries replaced with // the ones in deCache and returns it. If not, it just returns the // given one. func (fbo *folderBlockOps) updateWithDirtyEntriesLocked(ctx context.Context, lState *lockState, dir path, dblock *DirBlock) (*DirBlock, error) { fbo.blockLock.AssertAnyLocked(lState) // see if this directory has any outstanding writes/truncates that // require an updated DirEntry // Save some time for the common case of having no dirty // files. if len(fbo.deCache) == 0 { return dblock, nil } var dblockCopy *DirBlock dirCacheEntry := fbo.deCache[dir.tailPointer().Ref()] // TODO: We should get rid of deCache completely and use only // DirtyBlockCache to store the dirtied version of the DirBlock. // We can't do that yet, because there might be multiple // outstanding dirty files in one directory, and the KBFSOps API // allows for syncing one at a time, so keeping a single dirtied // DirBlock would accidentally sync the DirEntry of file A when a // sync of file B is requested. // // Soon a sync will sync everything that's dirty at once, and so // we can remove deCache at that point. Until then, we must // incrementally build it up each time. // Add cached additions to the copy. for k, ptr := range dirCacheEntry.adds { de, ok := fbo.deCache[ptr.Ref()] if !ok { return nil, fmt.Errorf("No cached dir entry found for new entry "+ "%s in dir %s (%v)", k, dir, dir.tailPointer()) } if dblockCopy == nil { dblockCopy = dblock.DeepCopy() } dblockCopy.Children[k] = de.dirEntry } // Remove cached removals from the copy. for k := range dirCacheEntry.adds { _, ok := dblock.Children[k] if !ok { continue } if dblockCopy == nil { dblockCopy = dblock.DeepCopy() } delete(dblockCopy.Children, k) } // Update dir entries for any modified files. for k, v := range dblock.Children { de, ok := fbo.deCache[v.Ref()] if !ok { continue } if dblockCopy == nil { dblockCopy = dblock.DeepCopy() } dblockCopy.Children[k] = de.dirEntry } if dblockCopy == nil { return dblock, nil } return dblockCopy, nil } // getDirtyDirLocked composes getDirLocked and // updatedWithDirtyEntriesLocked. Note that a dirty dir means that it // has entries possibly pointing to dirty files, not that it's dirty // itself. func (fbo *folderBlockOps) getDirtyDirLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, dir path, rtype blockReqType) ( *DirBlock, error) { fbo.blockLock.AssertAnyLocked(lState) dblock, err := fbo.getDirLocked(ctx, lState, kmd, dir, rtype) if err != nil { return nil, err } return fbo.updateWithDirtyEntriesLocked(ctx, lState, dir, dblock) } // GetDirtyDirChildren returns a map of EntryInfos for the (possibly // dirty) children entries of the given directory. func (fbo *folderBlockOps) GetDirtyDirChildren( ctx context.Context, lState *lockState, kmd KeyMetadata, dir path) ( map[string]EntryInfo, error) { dblock, err := func() (*DirBlock, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.getDirtyDirLocked(ctx, lState, kmd, dir, blockRead) }() if err != nil { return nil, err } children := make(map[string]EntryInfo) for k, de := range dblock.Children { children[k] = de.EntryInfo } return children, nil } // file must have a valid parent. func (fbo *folderBlockOps) getDirtyParentAndEntryLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, file path, rtype blockReqType) ( *DirBlock, DirEntry, error) { fbo.blockLock.AssertAnyLocked(lState) if !file.hasValidParent() { return nil, DirEntry{}, InvalidParentPathError{file} } parentPath := file.parentPath() dblock, err := fbo.getDirtyDirLocked( ctx, lState, kmd, *parentPath, rtype) if err != nil { return nil, DirEntry{}, err } // make sure it exists name := file.tailName() de, ok := dblock.Children[name] if !ok { return nil, DirEntry{}, NoSuchNameError{name} } return dblock, de, err } // GetDirtyParentAndEntry returns a copy of the parent DirBlock // (suitable for modification) of the given file, which may contain // entries pointing to other dirty files, and its possibly-dirty // DirEntry in that directory. file must have a valid parent. Use // GetDirtyEntry() if you only need the DirEntry. func (fbo *folderBlockOps) GetDirtyParentAndEntry( ctx context.Context, lState *lockState, kmd KeyMetadata, file path) ( *DirBlock, DirEntry, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.getDirtyParentAndEntryLocked( ctx, lState, kmd, file, blockWrite) } // file must have a valid parent. func (fbo *folderBlockOps) getDirtyEntryLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, file path) (DirEntry, error) { // TODO: Since we only need a single DirEntry, avoid having to // look up every entry in the DirBlock. _, de, err := fbo.getDirtyParentAndEntryLocked( ctx, lState, kmd, file, blockLookup) return de, err } // GetDirtyEntry returns the possibly-dirty DirEntry of the given file // in its parent DirBlock. file must have a valid parent. func (fbo *folderBlockOps) GetDirtyEntry( ctx context.Context, lState *lockState, kmd KeyMetadata, file path) (DirEntry, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.getDirtyEntryLocked(ctx, lState, kmd, file) } // Lookup returns the possibly-dirty DirEntry of the given file in its // parent DirBlock, and a Node for the file if it exists. It has to // do all of this under the block lock to avoid races with // UpdatePointers. func (fbo *folderBlockOps) Lookup( ctx context.Context, lState *lockState, kmd KeyMetadata, dir Node, name string) (Node, DirEntry, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) dirPath := fbo.nodeCache.PathFromNode(dir) if !dirPath.isValid() { return nil, DirEntry{}, InvalidPathError{dirPath} } childPath := dirPath.ChildPathNoPtr(name) de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, childPath) if err != nil { return nil, DirEntry{}, err } if de.Type == Sym { return nil, de, nil } err = checkDataVersion(fbo.config, childPath, de.BlockPointer) if err != nil { return nil, DirEntry{}, err } node, err := fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir) if err != nil { return nil, DirEntry{}, err } return node, de, nil } func (fbo *folderBlockOps) getOrCreateDirtyFileLocked(lState *lockState, file path) *dirtyFile { fbo.blockLock.AssertLocked(lState) ptr := file.tailPointer() df := fbo.dirtyFiles[ptr] if df == nil { df = newDirtyFile(file, fbo.config.DirtyBlockCache()) fbo.dirtyFiles[ptr] = df } return df } // cacheBlockIfNotYetDirtyLocked puts a block into the cache, but only // does so if the block isn't already marked as dirty in the cache. // This is useful when operating on a dirty copy of a block that may // already be in the cache. func (fbo *folderBlockOps) cacheBlockIfNotYetDirtyLocked( lState *lockState, ptr BlockPointer, file path, block Block) error { fbo.blockLock.AssertLocked(lState) df := fbo.getOrCreateDirtyFileLocked(lState, file) needsCaching, isSyncing := df.setBlockDirty(ptr) if needsCaching { err := fbo.config.DirtyBlockCache().Put(fbo.id(), ptr, file.Branch, block) if err != nil { return err } } if isSyncing { fbo.doDeferWrite = true } return nil } func (fbo *folderBlockOps) getOrCreateSyncInfoLocked( lState *lockState, de DirEntry) (*syncInfo, error) { fbo.blockLock.AssertLocked(lState) ref := de.Ref() si, ok := fbo.unrefCache[ref] if !ok { so, err := newSyncOp(de.BlockPointer) if err != nil { return nil, err } si = &syncInfo{ oldInfo: de.BlockInfo, op: so, } fbo.unrefCache[ref] = si } return si, nil } // GetDirtyRefs returns a list of references of all known dirty // blocks. func (fbo *folderBlockOps) GetDirtyRefs(lState *lockState) []BlockRef { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) var dirtyRefs []BlockRef for ref := range fbo.deCache { dirtyRefs = append(dirtyRefs, ref) } return dirtyRefs } // fixChildBlocksAfterRecoverableErrorLocked should be called when a sync // failed with a recoverable block error on a multi-block file. It // makes sure that any outstanding dirty versions of the file are // fixed up to reflect the fact that some of the indirect pointers now // need to change. func (fbo *folderBlockOps) fixChildBlocksAfterRecoverableErrorLocked( ctx context.Context, lState *lockState, file path, kmd KeyMetadata, redirtyOnRecoverableError map[BlockPointer]BlockPointer) { fbo.blockLock.AssertLocked(lState) df := fbo.dirtyFiles[file.tailPointer()] if df != nil { // Un-orphan old blocks, since we are reverting back to the // previous state. for _, oldPtr := range redirtyOnRecoverableError { fbo.log.CDebugf(ctx, "Un-orphaning %v", oldPtr) df.setBlockOrphaned(oldPtr, false) } } dirtyBcache := fbo.config.DirtyBlockCache() topBlock, err := dirtyBcache.Get(fbo.id(), file.tailPointer(), fbo.branch()) fblock, ok := topBlock.(*FileBlock) if err != nil || !ok { fbo.log.CWarningf(ctx, "Couldn't find dirtied "+ "top-block for %v: %v", file.tailPointer(), err) return } session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { fbo.log.CWarningf(ctx, "Couldn't find uid during recovery: %v", err) return } fd := fbo.newFileData(lState, file, session.UID, kmd) // If a copy of the top indirect block was made, we need to // redirty all the sync'd blocks under their new IDs, so that // future syncs will know they failed. newPtrs := make(map[BlockPointer]bool, len(redirtyOnRecoverableError)) for newPtr := range redirtyOnRecoverableError { newPtrs[newPtr] = true } found, err := fd.findIPtrsAndClearSize(ctx, fblock, newPtrs) if err != nil { fbo.log.CWarningf( ctx, "Couldn't find and clear iptrs during recovery: %v", err) return } for newPtr, oldPtr := range redirtyOnRecoverableError { if !found[newPtr] { continue } fbo.log.CDebugf(ctx, "Re-dirtying %v (and deleting dirty block %v)", newPtr, oldPtr) // These blocks would have been permanent, so they're // definitely still in the cache. b, err := fbo.config.BlockCache().Get(newPtr) if err != nil { fbo.log.CWarningf(ctx, "Couldn't re-dirty %v: %v", newPtr, err) continue } if err = fbo.cacheBlockIfNotYetDirtyLocked( lState, newPtr, file, b); err != nil { fbo.log.CWarningf(ctx, "Couldn't re-dirty %v: %v", newPtr, err) } fbo.log.CDebugf(ctx, "Deleting dirty ptr %v after recoverable error", oldPtr) err = dirtyBcache.Delete(fbo.id(), oldPtr, fbo.branch()) if err != nil { fbo.log.CDebugf(ctx, "Couldn't del-dirty %v: %v", oldPtr, err) } } } func (fbo *folderBlockOps) nowUnixNano() int64 { return fbo.config.Clock().Now().UnixNano() } // PrepRename prepares the given rename operation. It returns copies // of the old and new parent block (which may be the same), what is to // be the new DirEntry, and a local block cache. It also modifies md, // which must be a copy. func (fbo *folderBlockOps) PrepRename( ctx context.Context, lState *lockState, md *RootMetadata, oldParent path, oldName string, newParent path, newName string) ( oldPBlock, newPBlock *DirBlock, newDe DirEntry, lbc localBcache, err error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) // look up in the old path oldPBlock, err = fbo.getDirLocked( ctx, lState, md, oldParent, blockWrite) if err != nil { return nil, nil, DirEntry{}, nil, err } newDe, ok := oldPBlock.Children[oldName] // does the name exist? if !ok { return nil, nil, DirEntry{}, nil, NoSuchNameError{oldName} } ro, err := newRenameOp(oldName, oldParent.tailPointer(), newName, newParent.tailPointer(), newDe.BlockPointer, newDe.Type) if err != nil { return nil, nil, DirEntry{}, nil, err } // A renameOp doesn't have a single path to represent it, so we // can't call setFinalPath here unfortunately. That means any // rename may force a manual paths population at other layers // (e.g., for journal statuses). TODO: allow a way to set more // than one final path for renameOps? md.AddOp(ro) lbc = make(localBcache) // TODO: Write a SameBlock() function that can deal properly with // dedup'd blocks that share an ID but can be updated separately. if oldParent.tailPointer().ID == newParent.tailPointer().ID { newPBlock = oldPBlock } else { newPBlock, err = fbo.getDirLocked( ctx, lState, md, newParent, blockWrite) if err != nil { return nil, nil, DirEntry{}, nil, err } now := fbo.nowUnixNano() oldGrandparent := *oldParent.parentPath() if len(oldGrandparent.path) > 0 { // Update the old parent's mtime/ctime, unless the // oldGrandparent is the same as newParent (in which // case, the syncBlockAndCheckEmbedLocked call by the // caller will take care of it). if oldGrandparent.tailPointer().ID != newParent.tailPointer().ID { b, err := fbo.getDirLocked(ctx, lState, md, oldGrandparent, blockWrite) if err != nil { return nil, nil, DirEntry{}, nil, err } if de, ok := b.Children[oldParent.tailName()]; ok { de.Ctime = now de.Mtime = now b.Children[oldParent.tailName()] = de // Put this block back into the local cache as dirty lbc[oldGrandparent.tailPointer()] = b } } } else { md.data.Dir.Ctime = now md.data.Dir.Mtime = now } } return oldPBlock, newPBlock, newDe, lbc, nil } func (fbo *folderBlockOps) newFileData(lState *lockState, file path, uid keybase1.UID, kmd KeyMetadata) *fileData { fbo.blockLock.AssertAnyLocked(lState) return newFileData(file, uid, fbo.config.Crypto(), fbo.config.BlockSplitter(), kmd, func(ctx context.Context, kmd KeyMetadata, ptr BlockPointer, file path, rtype blockReqType) (*FileBlock, bool, error) { lState := lState if rtype == blockReadParallel { lState = nil } return fbo.getFileBlockLocked( ctx, lState, kmd, ptr, file, rtype) }, func(ptr BlockPointer, block Block) error { return fbo.cacheBlockIfNotYetDirtyLocked( lState, ptr, file, block) }, fbo.log) } func (fbo *folderBlockOps) newFileDataWithCache(lState *lockState, file path, uid keybase1.UID, kmd KeyMetadata, dirtyBcache DirtyBlockCache) *fileData { fbo.blockLock.AssertAnyLocked(lState) return newFileData(file, uid, fbo.config.Crypto(), fbo.config.BlockSplitter(), kmd, func(ctx context.Context, kmd KeyMetadata, ptr BlockPointer, file path, rtype blockReqType) (*FileBlock, bool, error) { block, err := dirtyBcache.Get(file.Tlf, ptr, file.Branch) if fblock, ok := block.(*FileBlock); ok && err == nil { return fblock, true, nil } lState := lState if rtype == blockReadParallel { lState = nil } return fbo.getFileBlockLocked( ctx, lState, kmd, ptr, file, rtype) }, func(ptr BlockPointer, block Block) error { return dirtyBcache.Put(file.Tlf, ptr, file.Branch, block) }, fbo.log) } // Read reads from the given file into the given buffer at the given // offset. It returns the number of bytes read and nil, or 0 and the // error if there was one. func (fbo *folderBlockOps) Read( ctx context.Context, lState *lockState, kmd KeyMetadata, file path, dest []byte, off int64) (int64, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) fbo.log.CDebugf(ctx, "Reading from %v", file.tailPointer()) var uid keybase1.UID // Data reads don't depend on the uid. fd := fbo.newFileData(lState, file, uid, kmd) return fd.read(ctx, dest, off) } func (fbo *folderBlockOps) maybeWaitOnDeferredWrites( ctx context.Context, lState *lockState, file Node, c DirtyPermChan) error { var errListener chan error err := func() error { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file) if err != nil { return err } df := fbo.getOrCreateDirtyFileLocked(lState, filePath) errListener = make(chan error, 1) df.addErrListener(errListener) return nil }() if err != nil { return err } logTimer := time.After(100 * time.Millisecond) doLogUnblocked := false for { select { case <-c: if doLogUnblocked { fbo.log.CDebugf(ctx, "Write unblocked") } // Make sure there aren't any queued errors. select { case err := <-errListener: return err default: } return nil case <-logTimer: // Print a log message once if it's taking too long. fbo.log.CDebugf(ctx, "Blocking a write because of a full dirty buffer") doLogUnblocked = true case err := <-errListener: // XXX: should we ignore non-fatal errors (like // context.Canceled), or errors that are specific only to // some other file being sync'd (e.g., "recoverable" block // errors from which we couldn't recover)? return err } } } func (fbo *folderBlockOps) pathFromNodeForBlockWriteLocked( lState *lockState, n Node) (path, error) { fbo.blockLock.AssertLocked(lState) p := fbo.nodeCache.PathFromNode(n) if !p.isValid() { return path{}, InvalidPathError{p} } return p, nil } // writeGetFileLocked checks write permissions explicitly for // writeDataLocked, truncateLocked etc and returns func (fbo *folderBlockOps) writeGetFileLocked( ctx context.Context, lState *lockState, kmd KeyMetadata, file path) (*FileBlock, keybase1.UID, error) { fbo.blockLock.AssertLocked(lState) session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return nil, "", err } if !kmd.GetTlfHandle().IsWriter(session.UID) { return nil, "", NewWriteAccessError(kmd.GetTlfHandle(), session.Name, file.String()) } fblock, err := fbo.getFileLocked(ctx, lState, kmd, file, blockWrite) if err != nil { return nil, "", err } return fblock, session.UID, nil } // Returns the set of blocks dirtied during this write that might need // to be cleaned up if the write is deferred. func (fbo *folderBlockOps) writeDataLocked( ctx context.Context, lState *lockState, kmd KeyMetadata, file path, data []byte, off int64) (latestWrite WriteRange, dirtyPtrs []BlockPointer, newlyDirtiedChildBytes int64, err error) { if jServer, err := GetJournalServer(fbo.config); err == nil { jServer.dirtyOpStart(fbo.id()) defer jServer.dirtyOpEnd(fbo.id()) } fbo.blockLock.AssertLocked(lState) fbo.log.CDebugf(ctx, "writeDataLocked on file pointer %v", file.tailPointer()) defer func() { fbo.log.CDebugf(ctx, "writeDataLocked done: %v", err) }() fblock, uid, err := fbo.writeGetFileLocked(ctx, lState, kmd, file) if err != nil { return WriteRange{}, nil, 0, err } fd := fbo.newFileData(lState, file, uid, kmd) dirtyBcache := fbo.config.DirtyBlockCache() df := fbo.getOrCreateDirtyFileLocked(lState, file) defer func() { // Always update unsynced bytes and potentially force a sync, // even on an error, since the previously-dirty bytes stay in // the cache. df.updateNotYetSyncingBytes(newlyDirtiedChildBytes) if dirtyBcache.ShouldForceSync(fbo.id()) { select { // If we can't send on the channel, that means a sync is // already in progress. case fbo.forceSyncChan <- struct{}{}: fbo.log.CDebugf(ctx, "Forcing a sync due to full buffer") default: } } }() de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, file) if err != nil { return WriteRange{}, nil, 0, err } if de.BlockPointer != file.tailPointer() { fbo.log.CDebugf(ctx, "DirEntry and file tail pointer don't match: "+ "%v vs %v", de.BlockPointer, file.tailPointer()) } si, err := fbo.getOrCreateSyncInfoLocked(lState, de) if err != nil { return WriteRange{}, nil, 0, err } newDe, dirtyPtrs, unrefs, newlyDirtiedChildBytes, bytesExtended, err := fd.write(ctx, data, off, fblock, de, df) // Record the unrefs before checking the error so we remember the // state of newly dirtied blocks. si.unrefs = append(si.unrefs, unrefs...) if err != nil { return WriteRange{}, nil, newlyDirtiedChildBytes, err } // Put it in the `deCache` even if the size didn't change, since // the `deCache` is used to determine whether there are any dirty // files. TODO: combine `deCache` with `dirtyFiles` and // `unrefCache`. cacheEntry := fbo.deCache[file.tailPointer().Ref()] cacheEntry.dirEntry = newDe fbo.deCache[file.tailPointer().Ref()] = cacheEntry if fbo.doDeferWrite { df.addDeferredNewBytes(bytesExtended) } latestWrite = si.op.addWrite(uint64(off), uint64(len(data))) return latestWrite, dirtyPtrs, newlyDirtiedChildBytes, nil } // Write writes the given data to the given file. May block if there // is too much unflushed data; in that case, it will be unblocked by a // future sync. func (fbo *folderBlockOps) Write( ctx context.Context, lState *lockState, kmd KeyMetadata, file Node, data []byte, off int64) error { // If there is too much unflushed data, we should wait until some // of it gets flush so our memory usage doesn't grow without // bound. c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx, fbo.id(), int64(len(data))) if err != nil { return err } defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(), -int64(len(data)), false) err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c) if err != nil { return err } fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file) if err != nil { return err } defer func() { fbo.doDeferWrite = false }() latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.writeDataLocked( ctx, lState, kmd, filePath, data, off) if err != nil { return err } fbo.observers.localChange(ctx, file, latestWrite) if fbo.doDeferWrite { // There's an ongoing sync, and this write altered dirty // blocks that are in the process of syncing. So, we have to // redo this write once the sync is complete, using the new // file path. // // There is probably a less terrible of doing this that // doesn't involve so much copying and rewriting, but this is // the most obviously correct way. dataCopy := make([]byte, len(data)) copy(dataCopy, data) fbo.log.CDebugf(ctx, "Deferring a write to file %v off=%d len=%d", filePath.tailPointer(), off, len(data)) fbo.deferredDirtyDeletes = append(fbo.deferredDirtyDeletes, dirtyPtrs...) fbo.deferredWrites = append(fbo.deferredWrites, func(ctx context.Context, lState *lockState, kmd KeyMetadata, f path) error { // We are about to re-dirty these bytes, so mark that // they will no longer be synced via the old file. df := fbo.getOrCreateDirtyFileLocked(lState, filePath) df.updateNotYetSyncingBytes(-newlyDirtiedChildBytes) // Write the data again. We know this won't be // deferred, so no need to check the new ptrs. _, _, _, err = fbo.writeDataLocked( ctx, lState, kmd, f, dataCopy, off) return err }) fbo.deferredWaitBytes += newlyDirtiedChildBytes } return nil } // truncateExtendLocked is called by truncateLocked to extend a file and // creates a hole. func (fbo *folderBlockOps) truncateExtendLocked( ctx context.Context, lState *lockState, kmd KeyMetadata, file path, size uint64, parentBlocks []parentBlockAndChildIndex) ( WriteRange, []BlockPointer, error) { fblock, uid, err := fbo.writeGetFileLocked(ctx, lState, kmd, file) if err != nil { return WriteRange{}, nil, err } fd := fbo.newFileData(lState, file, uid, kmd) de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, file) if err != nil { return WriteRange{}, nil, err } df := fbo.getOrCreateDirtyFileLocked(lState, file) newDe, dirtyPtrs, err := fd.truncateExtend( ctx, size, fblock, parentBlocks, de, df) if err != nil { return WriteRange{}, nil, err } cacheEntry := fbo.deCache[file.tailPointer().Ref()] cacheEntry.dirEntry = newDe fbo.deCache[file.tailPointer().Ref()] = cacheEntry si, err := fbo.getOrCreateSyncInfoLocked(lState, de) if err != nil { return WriteRange{}, nil, err } latestWrite := si.op.addTruncate(size) if fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) { select { // If we can't send on the channel, that means a sync is // already in progress case fbo.forceSyncChan <- struct{}{}: fbo.log.CDebugf(ctx, "Forcing a sync due to full buffer") default: } } fbo.log.CDebugf(ctx, "truncateExtendLocked: done") return latestWrite, dirtyPtrs, nil } // truncateExtendCutoffPoint is the amount of data in extending // truncate that will trigger the extending with a hole algorithm. const truncateExtendCutoffPoint = 128 * 1024 // Returns the set of newly-ID'd blocks created during this truncate // that might need to be cleaned up if the truncate is deferred. func (fbo *folderBlockOps) truncateLocked( ctx context.Context, lState *lockState, kmd KeyMetadata, file path, size uint64) (*WriteRange, []BlockPointer, int64, error) { if jServer, err := GetJournalServer(fbo.config); err == nil { jServer.dirtyOpStart(fbo.id()) defer jServer.dirtyOpEnd(fbo.id()) } fblock, uid, err := fbo.writeGetFileLocked(ctx, lState, kmd, file) if err != nil { return &WriteRange{}, nil, 0, err } fd := fbo.newFileData(lState, file, uid, kmd) // find the block where the file should now end iSize := int64(size) // TODO: deal with overflow _, parentBlocks, block, nextBlockOff, startOff, _, err := fd.getFileBlockAtOffset(ctx, fblock, iSize, blockWrite) if err != nil { return &WriteRange{}, nil, 0, err } currLen := int64(startOff) + int64(len(block.Contents)) if currLen+truncateExtendCutoffPoint < iSize { latestWrite, dirtyPtrs, err := fbo.truncateExtendLocked( ctx, lState, kmd, file, uint64(iSize), parentBlocks) if err != nil { return &latestWrite, dirtyPtrs, 0, err } return &latestWrite, dirtyPtrs, 0, err } else if currLen < iSize { moreNeeded := iSize - currLen latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.writeDataLocked(ctx, lState, kmd, file, make([]byte, moreNeeded, moreNeeded), currLen) if err != nil { return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err } return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err } else if currLen == iSize && nextBlockOff < 0 { // same size! return nil, nil, 0, nil } // update the local entry size de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, file) if err != nil { return nil, nil, 0, err } si, err := fbo.getOrCreateSyncInfoLocked(lState, de) if err != nil { return nil, nil, 0, err } newDe, dirtyPtrs, unrefs, newlyDirtiedChildBytes, err := fd.truncateShrink( ctx, size, fblock, de) // Record the unrefs before checking the error so we remember the // state of newly dirtied blocks. si.unrefs = append(si.unrefs, unrefs...) if err != nil { return nil, nil, newlyDirtiedChildBytes, err } // Update dirtied bytes and unrefs regardless of error. df := fbo.getOrCreateDirtyFileLocked(lState, file) df.updateNotYetSyncingBytes(newlyDirtiedChildBytes) latestWrite := si.op.addTruncate(size) cacheEntry := fbo.deCache[file.tailPointer().Ref()] cacheEntry.dirEntry = newDe fbo.deCache[file.tailPointer().Ref()] = cacheEntry return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, nil } // Truncate truncates or extends the given file to the given size. // May block if there is too much unflushed data; in that case, it // will be unblocked by a future sync. func (fbo *folderBlockOps) Truncate( ctx context.Context, lState *lockState, kmd KeyMetadata, file Node, size uint64) error { // If there is too much unflushed data, we should wait until some // of it gets flush so our memory usage doesn't grow without // bound. // // Assume the whole remaining file will be dirty after this // truncate. TODO: try to figure out how many bytes actually will // be dirtied ahead of time? c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx, fbo.id(), int64(size)) if err != nil { return err } defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(), -int64(size), false) err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c) if err != nil { return err } fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file) if err != nil { return err } defer func() { fbo.doDeferWrite = false }() latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.truncateLocked( ctx, lState, kmd, filePath, size) if err != nil { return err } if latestWrite != nil { fbo.observers.localChange(ctx, file, *latestWrite) } if fbo.doDeferWrite { // There's an ongoing sync, and this truncate altered // dirty blocks that are in the process of syncing. So, // we have to redo this truncate once the sync is complete, // using the new file path. fbo.log.CDebugf(ctx, "Deferring a truncate to file %v", filePath.tailPointer()) fbo.deferredDirtyDeletes = append(fbo.deferredDirtyDeletes, dirtyPtrs...) fbo.deferredWrites = append(fbo.deferredWrites, func(ctx context.Context, lState *lockState, kmd KeyMetadata, f path) error { // We are about to re-dirty these bytes, so mark that // they will no longer be synced via the old file. df := fbo.getOrCreateDirtyFileLocked(lState, filePath) df.updateNotYetSyncingBytes(-newlyDirtiedChildBytes) // Truncate the file again. We know this won't be // deferred, so no need to check the new ptrs. _, _, _, err := fbo.truncateLocked( ctx, lState, kmd, f, size) return err }) fbo.deferredWaitBytes += newlyDirtiedChildBytes } return nil } // IsDirty returns whether the given file is dirty; if false is // returned, then the file doesn't need to be synced. func (fbo *folderBlockOps) IsDirty(lState *lockState, file path) bool { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) // Definitely dirty if a block is dirty. if fbo.config.DirtyBlockCache().IsDirty( fbo.id(), file.tailPointer(), file.Branch) { return true } // The deCache entry could still be dirty, if a file had an // attribute set (like mtime or exec) after the file was removed. // Still count the file as dirty in that case; most likely, the // caller will next call `ClearCacheInfo` to remove this entry. // (See comments in `folderBranchOps.syncLocked`.) _, ok := fbo.deCache[file.tailPointer().Ref()] return ok } func (fbo *folderBlockOps) clearCacheInfoLocked(lState *lockState, file path) error { fbo.blockLock.AssertLocked(lState) ref := file.tailPointer().Ref() delete(fbo.deCache, ref) delete(fbo.unrefCache, ref) df := fbo.dirtyFiles[file.tailPointer()] if df != nil { err := df.finishSync() if err != nil { return err } delete(fbo.dirtyFiles, file.tailPointer()) } return nil } // ClearCacheInfo removes any cached info for the the given file. func (fbo *folderBlockOps) ClearCacheInfo(lState *lockState, file path) error { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) return fbo.clearCacheInfoLocked(lState, file) } // revertSyncInfoAfterRecoverableError updates the saved sync info to // include all the blocks from before the error, except for those that // have encountered recoverable block errors themselves. func (fbo *folderBlockOps) revertSyncInfoAfterRecoverableError( blocksToRemove []BlockPointer, result fileSyncState) { si := result.si savedSi := result.savedSi // Save the blocks we need to clean up on the next attempt. toClean := si.toCleanIfUnused newIndirect := make(map[BlockPointer]bool) for _, ptr := range result.newIndirectFileBlockPtrs { newIndirect[ptr] = true } // Propagate all unrefs forward, except those that belong to new // blocks that were created during the sync. unrefs := make([]BlockInfo, 0, len(si.unrefs)) for _, unref := range si.unrefs { if newIndirect[unref.BlockPointer] { fbo.log.CDebugf(nil, "Dropping unref %v", unref) continue } unrefs = append(unrefs, unref) } // This sync will be retried and needs new blocks, so // reset everything in the sync info. *si = *savedSi si.toCleanIfUnused = toClean si.unrefs = unrefs if si.bps == nil { return } si.bps.blockStates = nil // Mark any bad pointers so they get skipped next time. blocksToRemoveSet := make(map[BlockPointer]bool) for _, ptr := range blocksToRemove { blocksToRemoveSet[ptr] = true } for _, bs := range savedSi.bps.blockStates { // Only save the good pointers if !blocksToRemoveSet[bs.blockPtr] { si.bps.blockStates = append(si.bps.blockStates, bs) } } } // ReadyBlock is a thin wrapper around BlockOps.Ready() that handles // checking for duplicates. func ReadyBlock(ctx context.Context, bcache BlockCache, bops BlockOps, crypto cryptoPure, kmd KeyMetadata, block Block, uid keybase1.UID, bType keybase1.BlockType) ( info BlockInfo, plainSize int, readyBlockData ReadyBlockData, err error) { var ptr BlockPointer directType := IndirectBlock if fBlock, ok := block.(*FileBlock); ok && !fBlock.IsInd { directType = DirectBlock // first see if we are duplicating any known blocks in this folder ptr, err = bcache.CheckForKnownPtr(kmd.TlfID(), fBlock) if err != nil { return } } else if dBlock, ok := block.(*DirBlock); ok { if dBlock.IsInd { panic("Indirect directory blocks aren't supported yet") } // TODO: support indirect directory blocks. directType = DirectBlock } // Ready the block, even in the case where we can reuse an // existing block, just so that we know what the size of the // encrypted data will be. id, plainSize, readyBlockData, err := bops.Ready(ctx, kmd, block) if err != nil { return } if ptr.IsInitialized() { ptr.RefNonce, err = crypto.MakeBlockRefNonce() if err != nil { return } ptr.SetWriter(uid) // In case we're deduping an old pointer with an unknown block type. ptr.DirectType = directType } else { ptr = BlockPointer{ ID: id, KeyGen: kmd.LatestKeyGeneration(), DataVer: block.DataVersion(), DirectType: directType, Context: kbfsblock.MakeFirstContext(uid, bType), } } info = BlockInfo{ BlockPointer: ptr, EncodedSize: uint32(readyBlockData.GetEncodedSize()), } return } // fileSyncState holds state for a sync operation for a single // file. type fileSyncState struct { // If fblock is non-nil, the (dirty, indirect, cached) block // it points to will be set to savedFblock on a recoverable // error. fblock, savedFblock *FileBlock // redirtyOnRecoverableError, which is non-nil only when fblock is // non-nil, contains pointers that need to be re-dirtied if the // top block gets copied during the sync, and a recoverable error // happens. Maps to the old block pointer for the block, which // would need a DirtyBlockCache.Delete. redirtyOnRecoverableError map[BlockPointer]BlockPointer // If si is non-nil, its updated state will be reset on // error. Also, if the error is recoverable, it will be // reverted to savedSi. // // TODO: Working with si in this way is racy, since si is a // member of unrefCache. si, savedSi *syncInfo // oldFileBlockPtrs is a list of transient entries in the // block cache for the file, which should be removed when the // sync finishes. oldFileBlockPtrs []BlockPointer // newIndirectFileBlockPtrs is a list of permanent entries // added to the block cache for the file, which should be // removed after the blocks have been sent to the server. // They are not removed on an error, because in that case the // file is still dirty locally and may get another chance to // be sync'd. // // TODO: This can be a list of IDs instead. newIndirectFileBlockPtrs []BlockPointer } // startSyncWrite contains the portion of StartSync() that's done // while write-locking blockLock. If there is no dirty de cache // entry, dirtyDe will be nil. func (fbo *folderBlockOps) startSyncWrite(ctx context.Context, lState *lockState, md *RootMetadata, uid keybase1.UID, file path) ( fblock *FileBlock, bps *blockPutState, syncState fileSyncState, dirtyDe *DirEntry, err error) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) // update the parent directories, and write all the new blocks out // to disk fblock, err = fbo.getFileLocked(ctx, lState, md.ReadOnly(), file, blockWrite) if err != nil { return nil, nil, syncState, nil, err } fileRef := file.tailPointer().Ref() si, ok := fbo.unrefCache[fileRef] if !ok { return nil, nil, syncState, nil, fmt.Errorf("No syncOp found for file ref %v", fileRef) } // Collapse the write range to reduce the size of the sync op. si.op.Writes = si.op.collapseWriteRange(nil) // If this function returns a success, we need to make sure the op // in `md` is not the same variable as the op in `unrefCache`, // because the latter could get updated still by local writes // before `md` is flushed to the server. We don't copy it here // because code below still needs to modify it (and by extension, // the one stored in `syncState.si`). si.op.setFinalPath(file) md.AddOp(si.op) // Fill in syncState. if fblock.IsInd { fblockCopy := fblock.DeepCopy() syncState.fblock = fblock syncState.savedFblock = fblockCopy syncState.redirtyOnRecoverableError = make(map[BlockPointer]BlockPointer) } syncState.si = si syncState.savedSi, err = si.DeepCopy(fbo.config.Codec()) if err != nil { return nil, nil, syncState, nil, err } if si.bps == nil { si.bps = newBlockPutState(1) } else { // reinstate byte accounting from the previous Sync md.SetRefBytes(si.refBytes) md.AddDiskUsage(si.refBytes) md.SetUnrefBytes(si.unrefBytes) md.SetMDRefBytes(0) // this will be calculated anew md.SetDiskUsage(md.DiskUsage() - si.unrefBytes) syncState.newIndirectFileBlockPtrs = append( syncState.newIndirectFileBlockPtrs, si.op.Refs()...) } defer func() { si.refBytes = md.RefBytes() si.unrefBytes = md.UnrefBytes() }() dirtyBcache := fbo.config.DirtyBlockCache() df := fbo.getOrCreateDirtyFileLocked(lState, file) fd := fbo.newFileData(lState, file, uid, md.ReadOnly()) // Note: below we add possibly updated file blocks as "unref" and // "ref" blocks. This is fine, since conflict resolution or // notifications will never happen within a file. // If needed, split the children blocks up along new boundaries // (e.g., if using a fingerprint-based block splitter). unrefs, err := fd.split(ctx, fbo.id(), dirtyBcache, fblock, df) // Preserve any unrefs before checking the error. for _, unref := range unrefs { md.AddUnrefBlock(unref) } if err != nil { return nil, nil, syncState, nil, err } // Ready all children blocks, if any. oldPtrs, err := fd.ready(ctx, fbo.id(), fbo.config.BlockCache(), fbo.config.DirtyBlockCache(), fbo.config.BlockOps(), si.bps, fblock, df) if err != nil { return nil, nil, syncState, nil, err } for newInfo, oldPtr := range oldPtrs { syncState.newIndirectFileBlockPtrs = append( syncState.newIndirectFileBlockPtrs, newInfo.BlockPointer) df.setBlockOrphaned(oldPtr, true) // Defer the DirtyBlockCache.Delete until after the new path // is ready, in case anyone tries to read the dirty file in // the meantime. syncState.oldFileBlockPtrs = append(syncState.oldFileBlockPtrs, oldPtr) md.AddRefBlock(newInfo) // If this block is replacing a block from a previous, failed // Sync, we need to take that block out of the refs list, and // avoid unrefing it as well. si.removeReplacedBlock(ctx, fbo.log, oldPtr) err = df.setBlockSyncing(oldPtr) if err != nil { return nil, nil, syncState, nil, err } syncState.redirtyOnRecoverableError[newInfo.BlockPointer] = oldPtr } err = df.setBlockSyncing(file.tailPointer()) if err != nil { return nil, nil, syncState, nil, err } syncState.oldFileBlockPtrs = append( syncState.oldFileBlockPtrs, file.tailPointer()) // Capture the current de before we release the block lock, so // other deferred writes don't slip in. if de, ok := fbo.deCache[fileRef]; ok { dirtyDe = &de.dirEntry } // Leave a copy of the syncOp in `unrefCache`, since it may be // modified by future local writes while the syncOp in `md` should // only be modified by the rest of this sync process. var syncOpCopy *syncOp err = kbfscodec.Update(fbo.config.Codec(), &syncOpCopy, si.op) if err != nil { return nil, nil, syncState, nil, err } fbo.unrefCache[fileRef].op = syncOpCopy // If there are any deferred bytes, it must be because this is // a retried sync and some blocks snuck in between sync. Those // blocks will get transferred now, but they are also on the // deferred list and will be retried on the next sync as well. df.assimilateDeferredNewBytes() // TODO: Returning si.bps in this way is racy, since si is a // member of unrefCache. return fblock, si.bps, syncState, dirtyDe, nil } func (fbo *folderBlockOps) makeLocalBcache(ctx context.Context, lState *lockState, md *RootMetadata, file path, si *syncInfo, dirtyDe *DirEntry) (lbc localBcache, err error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) parentPath := file.parentPath() dblock, err := fbo.getDirLocked( ctx, lState, md.ReadOnly(), *parentPath, blockWrite) if err != nil { return nil, err } // Add in the cached unref'd blocks. si.mergeUnrefCache(md) lbc = make(localBcache) // Update the file's directory entry to the cached copy. if dirtyDe != nil { dirtyDe.EncodedSize = si.oldInfo.EncodedSize dblock.Children[file.tailName()] = *dirtyDe lbc[parentPath.tailPointer()] = dblock } return lbc, nil } // StartSync starts a sync for the given file. It returns the new // FileBlock which has the readied top-level block which includes all // writes since the last sync. Must be used with CleanupSyncState() // and UpdatePointers/FinishSyncLocked() like so: // // fblock, bps, lbc, syncState, err := // ...fbo.StartSync(ctx, lState, md, uid, file) // defer func() { // ...fbo.CleanupSyncState( // ctx, lState, md, file, ..., syncState, err) // }() // if err != nil { // ... // } // ... // // // ... = fbo.UpdatePointers(..., func() error { // ...fbo.FinishSyncLocked(ctx, lState, file, ..., syncState) // }) func (fbo *folderBlockOps) StartSync(ctx context.Context, lState *lockState, md *RootMetadata, uid keybase1.UID, file path) ( fblock *FileBlock, bps *blockPutState, lbc localBcache, syncState fileSyncState, err error) { if jServer, err := GetJournalServer(fbo.config); err == nil { jServer.dirtyOpStart(fbo.id()) } fblock, bps, syncState, dirtyDe, err := fbo.startSyncWrite( ctx, lState, md, uid, file) if err != nil { return nil, nil, nil, syncState, err } lbc, err = fbo.makeLocalBcache(ctx, lState, md, file, syncState.savedSi, dirtyDe) if err != nil { return nil, nil, nil, syncState, err } return fblock, bps, lbc, syncState, err } // Does any clean-up for a sync of the given file, given an error // (which may be nil) that happens during or after StartSync() and // before FinishSync(). blocksToRemove may be nil. func (fbo *folderBlockOps) CleanupSyncState( ctx context.Context, lState *lockState, md ReadOnlyRootMetadata, file path, blocksToRemove []BlockPointer, result fileSyncState, err error) { if jServer, err := GetJournalServer(fbo.config); err == nil { defer jServer.dirtyOpEnd(fbo.id()) } if err == nil { return } fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) // Notify error listeners before we reset the dirty blocks and // permissions to be granted. fbo.notifyErrListenersLocked(lState, file.tailPointer(), err) // If there was an error, we need to back out any changes that // might have been filled into the sync op, because it could // get reused again in a later Sync call. if result.si != nil { result.si.op.resetUpdateState() // Save this MD for later, so we can clean up its // newly-referenced block pointers if necessary. result.si.toCleanIfUnused = append(result.si.toCleanIfUnused, mdToCleanIfUnused{md, result.si.bps.DeepCopy()}) } if isRecoverableBlockError(err) { if result.si != nil { fbo.revertSyncInfoAfterRecoverableError(blocksToRemove, result) } if result.fblock != nil { result.fblock.Set(result.savedFblock) fbo.fixChildBlocksAfterRecoverableErrorLocked( ctx, lState, file, md, result.redirtyOnRecoverableError) } } else { // Since the sync has errored out unrecoverably, the deferred // bytes are already accounted for. if df := fbo.dirtyFiles[file.tailPointer()]; df != nil { df.updateNotYetSyncingBytes(-fbo.deferredWaitBytes) // Some blocks that were dirty are now clean under their // readied block ID, and now live in the bps rather than // the dirty bcache, so we can delete them from the dirty // bcache. dirtyBcache := fbo.config.DirtyBlockCache() for _, ptr := range result.oldFileBlockPtrs { if df.isBlockOrphaned(ptr) { fbo.log.CDebugf(ctx, "Deleting dirty orphan: %v", ptr) if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil { fbo.log.CDebugf(ctx, "Couldn't delete %v", ptr) } } } } // On an unrecoverable error, the deferred writes aren't // needed anymore since they're already part of the // (still-)dirty blocks. fbo.deferredDirtyDeletes = nil fbo.deferredWrites = nil fbo.deferredWaitBytes = 0 } // The sync is over, due to an error, so reset the map so that we // don't defer any subsequent writes. // Old syncing blocks are now just dirty if df := fbo.dirtyFiles[file.tailPointer()]; df != nil { df.resetSyncingBlocksToDirty() } } // cleanUpUnusedBlocks cleans up the blocks from any previous failed // sync attempts. func (fbo *folderBlockOps) cleanUpUnusedBlocks(ctx context.Context, md ReadOnlyRootMetadata, syncState fileSyncState, fbm *folderBlockManager) error { numToClean := len(syncState.si.toCleanIfUnused) if numToClean == 0 { return nil } // What blocks are referenced in the successful MD? refs := make(map[BlockPointer]bool) for _, op := range md.data.Changes.Ops { for _, ptr := range op.Refs() { if ptr == zeroPtr { panic("Unexpected zero ref ptr in a sync MD revision") } refs[ptr] = true } for _, update := range op.allUpdates() { if update.Ref == zeroPtr { panic("Unexpected zero update ref ptr in a sync MD revision") } refs[update.Ref] = true } } // For each MD to clean, clean up the old failed blocks // immediately if the merge status matches the successful put, if // they didn't get referenced in the successful put. If the merge // status is different (e.g., we ended up on a conflict branch), // clean it up only if the original revision failed. If the same // block appears more than once, the one with a different merged // status takes precedence (which will always come earlier in the // list of MDs). blocksSeen := make(map[BlockPointer]bool) for _, oldMD := range syncState.si.toCleanIfUnused { bdType := blockDeleteAlways if oldMD.md.MergedStatus() != md.MergedStatus() { bdType = blockDeleteOnMDFail } failedBps := newBlockPutState(len(oldMD.bps.blockStates)) for _, bs := range oldMD.bps.blockStates { if bs.blockPtr == zeroPtr { panic("Unexpected zero block ptr in an old sync MD revision") } if blocksSeen[bs.blockPtr] { continue } blocksSeen[bs.blockPtr] = true if refs[bs.blockPtr] && bdType == blockDeleteAlways { continue } failedBps.blockStates = append(failedBps.blockStates, blockState{blockPtr: bs.blockPtr}) fbo.log.CDebugf(ctx, "Cleaning up block %v from a previous "+ "failed revision %d (oldMD is %s, bdType=%d)", bs.blockPtr, oldMD.md.Revision(), oldMD.md.MergedStatus(), bdType) } if len(failedBps.blockStates) > 0 { fbm.cleanUpBlockState(oldMD.md, failedBps, bdType) } } return nil } func (fbo *folderBlockOps) doDeferredWritesLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, newPath path) ( stillDirty bool, err error) { fbo.blockLock.AssertLocked(lState) // Redo any writes or truncates that happened to our file while // the sync was happening. deletes := fbo.deferredDirtyDeletes writes := fbo.deferredWrites stillDirty = len(fbo.deferredWrites) != 0 fbo.deferredDirtyDeletes = nil fbo.deferredWrites = nil fbo.deferredWaitBytes = 0 // Clear any dirty blocks that resulted from a write/truncate // happening during the sync, since we're redoing them below. dirtyBcache := fbo.config.DirtyBlockCache() for _, ptr := range deletes { fbo.log.CDebugf(ctx, "Deleting deferred dirty ptr %v", ptr) if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil { return true, err } } for _, f := range writes { err = f(ctx, lState, kmd, newPath) if err != nil { // It's a little weird to return an error from a deferred // write here. Hopefully that will never happen. return true, err } } return stillDirty, nil } // FinishSyncLocked finishes the sync process for a file, given the // state from StartSync. Specifically, it re-applies any writes that // happened since the call to StartSync. func (fbo *folderBlockOps) FinishSyncLocked( ctx context.Context, lState *lockState, oldPath, newPath path, md ReadOnlyRootMetadata, syncState fileSyncState, fbm *folderBlockManager) ( stillDirty bool, err error) { fbo.blockLock.AssertLocked(lState) dirtyBcache := fbo.config.DirtyBlockCache() for _, ptr := range syncState.oldFileBlockPtrs { fbo.log.CDebugf(ctx, "Deleting dirty ptr %v", ptr) if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil { return true, err } } bcache := fbo.config.BlockCache() for _, ptr := range syncState.newIndirectFileBlockPtrs { err := bcache.DeletePermanent(ptr.ID) if err != nil { fbo.log.CWarningf(ctx, "Error when deleting %v from cache: %v", ptr.ID, err) } } stillDirty, err = fbo.doDeferredWritesLocked(ctx, lState, md, newPath) if err != nil { return true, err } // Clear cached info for the old path. We are guaranteed that any // concurrent write to this file was deferred, even if it was to a // block that wasn't currently being sync'd, since the top-most // block is always in dirtyFiles and is always dirtied during a // write/truncate. // // Also, we can get rid of all the sync state that might have // happened during the sync, since we will replay the writes // below anyway. if err := fbo.clearCacheInfoLocked(lState, oldPath); err != nil { return true, err } if err := fbo.cleanUpUnusedBlocks(ctx, md, syncState, fbm); err != nil { return true, err } return stillDirty, nil } // notifyErrListeners notifies any write operations that are blocked // on a file so that they can learn about unrecoverable sync errors. func (fbo *folderBlockOps) notifyErrListenersLocked(lState *lockState, ptr BlockPointer, err error) { fbo.blockLock.AssertLocked(lState) if isRecoverableBlockError(err) { // Don't bother any listeners with this error, since the sync // will be retried. Unless the sync has reached its retry // limit, but in that case the listeners will just proceed as // normal once the dirty block cache bytes are freed, and // that's ok since this error isn't fatal. return } df := fbo.dirtyFiles[ptr] if df != nil { df.notifyErrListeners(err) } } type searchWithOutOfDateCacheError struct { } func (e searchWithOutOfDateCacheError) Error() string { return fmt.Sprintf("Search is using an out-of-date node cache; " + "try again with a clean cache.") } // searchForNodesInDirLocked recursively tries to find a path, and // ultimately a node, to ptr, given the set of pointers that were // updated in a particular operation. The keys in nodeMap make up the // set of BlockPointers that are being searched for, and nodeMap is // updated in place to include the corresponding discovered nodes. // // Returns the number of nodes found by this invocation. If the error // it returns is searchWithOutOfDateCache, the search should be // retried by the caller with a clean cache. func (fbo *folderBlockOps) searchForNodesInDirLocked(ctx context.Context, lState *lockState, cache NodeCache, newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootNode Node, currDir path, nodeMap map[BlockPointer]Node, numNodesFoundSoFar int) (int, error) { fbo.blockLock.AssertAnyLocked(lState) dirBlock, err := fbo.getDirLocked( ctx, lState, kmd, currDir, blockRead) if err != nil { return 0, err } // getDirLocked may have unlocked blockLock, which means the cache // could have changed out from under us. Verify that didn't // happen, so we can avoid messing it up with nodes from an old MD // version. If it did happen, return a special error that lets // the caller know they should retry with a fresh cache. if currDir.path[0].BlockPointer != cache.PathFromNode(rootNode).tailPointer() { return 0, searchWithOutOfDateCacheError{} } if numNodesFoundSoFar >= len(nodeMap) { return 0, nil } numNodesFound := 0 for name, de := range dirBlock.Children { if _, ok := nodeMap[de.BlockPointer]; ok { childPath := currDir.ChildPath(name, de.BlockPointer) // make a node for every pathnode n := rootNode for i, pn := range childPath.path[1:] { if !pn.BlockPointer.IsValid() { // Temporary debugging output for KBFS-1764 -- the // GetOrCreate call below will panic. fbo.log.CDebugf(ctx, "Invalid block pointer, path=%s, "+ "path.path=%v (index %d), name=%s, de=%#v, "+ "nodeMap=%v, newPtrs=%v, kmd=%#v", childPath, childPath.path, i, name, de, nodeMap, newPtrs, kmd) } n, err = cache.GetOrCreate(pn.BlockPointer, pn.Name, n) if err != nil { return 0, err } } nodeMap[de.BlockPointer] = n numNodesFound++ if numNodesFoundSoFar+numNodesFound >= len(nodeMap) { return numNodesFound, nil } } // otherwise, recurse if this represents an updated block if _, ok := newPtrs[de.BlockPointer]; de.Type == Dir && ok { childPath := currDir.ChildPath(name, de.BlockPointer) n, err := fbo.searchForNodesInDirLocked(ctx, lState, cache, newPtrs, kmd, rootNode, childPath, nodeMap, numNodesFoundSoFar+numNodesFound) if err != nil { return 0, err } numNodesFound += n if numNodesFoundSoFar+numNodesFound >= len(nodeMap) { return numNodesFound, nil } } } return numNodesFound, nil } func (fbo *folderBlockOps) trySearchWithCacheLocked(ctx context.Context, lState *lockState, cache NodeCache, ptrs []BlockPointer, newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootPtr BlockPointer) ( map[BlockPointer]Node, error) { fbo.blockLock.AssertAnyLocked(lState) nodeMap := make(map[BlockPointer]Node) for _, ptr := range ptrs { nodeMap[ptr] = nil } if len(ptrs) == 0 { return nodeMap, nil } var node Node // The node cache used by the main part of KBFS is // fbo.nodeCache. This basically maps from BlockPointers to // Nodes. Nodes are used by the callers of the library, but // internally we need to know the series of BlockPointers and // file/dir names that make up the path of the corresponding // file/dir. fbo.nodeCache is long-lived and never invalidated. // // As folderBranchOps gets informed of new local or remote MD // updates, which change the BlockPointers of some subset of the // nodes in this TLF, it calls nodeCache.UpdatePointer for each // change. Then, when a caller passes some old Node they have // lying around into an FBO call, we can translate it to its // current path using fbo.nodeCache. Note that on every TLF // modification, we are guaranteed that the BlockPointer of the // root directory will change (because of the merkle-ish tree of // content hashes we use to assign BlockPointers). // // fbo.nodeCache needs to maintain the absolute latest mappings // for the TLF, or else FBO calls won't see up-to-date data. The // tension in search comes from the fact that we are trying to // discover the BlockPointers of certain files at a specific point // in the MD history, which is not necessarily the same as the // most-recently-seen MD update. Specifically, some callers // process a specific range of MDs, but folderBranchOps may have // heard about a newer one before, or during, when the caller // started processing. That means fbo.nodeCache may have been // updated to reflect the newest BlockPointers, and is no longer // correct as a cache for our search for the data at the old point // in time. if cache == fbo.nodeCache { // Root node should already exist if we have an up-to-date md. node = cache.Get(rootPtr.Ref()) if node == nil { return nil, searchWithOutOfDateCacheError{} } } else { // Root node may or may not exist. var err error node, err = cache.GetOrCreate(rootPtr, string(kmd.GetTlfHandle().GetCanonicalName()), nil) if err != nil { return nil, err } } if node == nil { return nil, fmt.Errorf("Cannot find root node corresponding to %v", rootPtr) } // are they looking for the root directory? numNodesFound := 0 if _, ok := nodeMap[rootPtr]; ok { nodeMap[rootPtr] = node numNodesFound++ if numNodesFound >= len(nodeMap) { return nodeMap, nil } } rootPath := cache.PathFromNode(node) if len(rootPath.path) != 1 { return nil, fmt.Errorf("Invalid root path for %v: %s", rootPtr, rootPath) } _, err := fbo.searchForNodesInDirLocked(ctx, lState, cache, newPtrs, kmd, node, rootPath, nodeMap, numNodesFound) if err != nil { return nil, err } if rootPtr != cache.PathFromNode(node).tailPointer() { return nil, searchWithOutOfDateCacheError{} } return nodeMap, nil } func (fbo *folderBlockOps) searchForNodesLocked(ctx context.Context, lState *lockState, cache NodeCache, ptrs []BlockPointer, newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootPtr BlockPointer) ( map[BlockPointer]Node, NodeCache, error) { fbo.blockLock.AssertAnyLocked(lState) // First try the passed-in cache. If it doesn't work because the // cache is out of date, try again with a clean cache. nodeMap, err := fbo.trySearchWithCacheLocked(ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr) if _, ok := err.(searchWithOutOfDateCacheError); ok { // The md is out-of-date, so use a throwaway cache so we // don't pollute the real node cache with stale nodes. fbo.log.CDebugf(ctx, "Root node %v doesn't exist in the node "+ "cache; using a throwaway node cache instead", rootPtr) cache = newNodeCacheStandard(fbo.folderBranch) nodeMap, err = fbo.trySearchWithCacheLocked(ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr) } if err != nil { return nil, nil, err } // Return the whole map even if some nodes weren't found. return nodeMap, cache, nil } // SearchForNodes tries to resolve all the given pointers to a Node // object, using only the updated pointers specified in newPtrs. // Returns an error if any subset of the pointer paths do not exist; // it is the caller's responsibility to decide to error on particular // unresolved nodes. It also returns the cache that ultimately // contains the nodes -- this might differ from the passed-in cache if // another goroutine updated that cache and it no longer contains the // root pointer specified in md. func (fbo *folderBlockOps) SearchForNodes(ctx context.Context, cache NodeCache, ptrs []BlockPointer, newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootPtr BlockPointer) ( map[BlockPointer]Node, NodeCache, error) { lState := makeFBOLockState() fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.searchForNodesLocked( ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr) } // SearchForPaths is like SearchForNodes, except it returns a // consistent view of all the paths of the searched-for pointers. func (fbo *folderBlockOps) SearchForPaths(ctx context.Context, cache NodeCache, ptrs []BlockPointer, newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootPtr BlockPointer) (map[BlockPointer]path, error) { lState := makeFBOLockState() // Hold the lock while processing the paths so they can't be changed. fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) nodeMap, cache, err := fbo.searchForNodesLocked( ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr) if err != nil { return nil, err } paths := make(map[BlockPointer]path) for ptr, n := range nodeMap { if n == nil { paths[ptr] = path{} continue } p := cache.PathFromNode(n) if p.tailPointer() != ptr { return nil, NodeNotFoundError{ptr} } paths[ptr] = p } return paths, nil } // getUndirtiedEntry returns the clean entry for the given path // corresponding to a cached dirty entry. If there is no dirty or // clean entry, nil is returned. func (fbo *folderBlockOps) getUndirtiedEntry( ctx context.Context, lState *lockState, kmd KeyMetadata, file path) (*DirEntry, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) _, ok := fbo.deCache[file.tailPointer().Ref()] if !ok { return nil, nil } // Get the undirtied dir block. dblock, err := fbo.getDirLocked( ctx, lState, kmd, *file.parentPath(), blockRead) if err != nil { return nil, err } undirtiedEntry, ok := dblock.Children[file.tailName()] if !ok { return nil, nil } return &undirtiedEntry, nil } func (fbo *folderBlockOps) setCachedAttr( lState *lockState, ref BlockRef, attr attrChange, realEntry *DirEntry, doCreate bool) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) fbo.setCachedAttrLocked(lState, ref, attr, realEntry, doCreate) } // UpdateCachedEntryAttributes updates any cached entry for the given // path according to the given op. The node for the path is returned // if there is one. func (fbo *folderBlockOps) UpdateCachedEntryAttributes( ctx context.Context, lState *lockState, kmd KeyMetadata, dir path, op *setAttrOp) (Node, error) { childPath := dir.ChildPathNoPtr(op.Name) // find the node for the actual change; requires looking up // the child entry to get the BlockPointer, unfortunately. de, err := fbo.GetDirtyEntry(ctx, lState, kmd, childPath) if err != nil { return nil, err } childNode := fbo.nodeCache.Get(de.Ref()) if childNode == nil { // Nothing to do, since the cache entry won't be // accessible from any node. return nil, nil } childPath = dir.ChildPath(op.Name, de.BlockPointer) // If there's a cache entry, we need to update it, so try and // fetch the undirtied entry. cleanEntry, err := fbo.getUndirtiedEntry(ctx, lState, kmd, childPath) if err != nil { return nil, err } if cleanEntry != nil { fbo.setCachedAttr(lState, de.Ref(), op.Attr, cleanEntry, false) } return childNode, nil } // UpdateCachedEntryAttributesOnRemovedFile updates any cached entry // for the given path of an unlinked file, according to the given op, // and it makes a new dirty cache entry if one doesn't exist yet. We // assume Sync will be called eventually on the corresponding open // file handle, which will clear out the entry. func (fbo *folderBlockOps) UpdateCachedEntryAttributesOnRemovedFile( ctx context.Context, lState *lockState, op *setAttrOp, de DirEntry) { fbo.setCachedAttr(lState, de.Ref(), op.Attr, &de, true) } func (fbo *folderBlockOps) getDeferredWriteCountForTest(lState *lockState) int { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return len(fbo.deferredWrites) } func (fbo *folderBlockOps) updatePointer(kmd KeyMetadata, oldPtr BlockPointer, newPtr BlockPointer, shouldPrefetch bool) { updated := fbo.nodeCache.UpdatePointer(oldPtr.Ref(), newPtr) if !updated { return } // Only prefetch if the updated pointer is a new block ID. if oldPtr.ID != newPtr.ID { // TODO: Remove this comment when we're done debugging because it'll be everywhere. fbo.log.CDebugf(context.TODO(), "Updated reference for pointer %s to %s.", oldPtr.ID, newPtr.ID) if shouldPrefetch { // Prefetch the new ref, but only if the old ref already exists in // the block cache. Ideally we'd always prefetch it, but we need // the type of the block so that we can call `NewEmpty`. // TODO KBFS-1850: Eventually we should use the codec library's // ability to decode into a nil interface to no longer need to // pre-initialize the correct type. block, _, _, err := fbo.config.BlockCache().GetWithPrefetch(oldPtr) if err != nil { return } fbo.config.BlockOps().Prefetcher().PrefetchBlock( block.NewEmpty(), newPtr, kmd, updatePointerPrefetchPriority, ) } } } // UpdatePointers updates all the pointers in the node cache // atomically. If `afterUpdateFn` is non-nil, it's called under the // same block lock under which the pointers were updated. func (fbo *folderBlockOps) UpdatePointers(kmd KeyMetadata, lState *lockState, op op, shouldPrefetch bool, afterUpdateFn func() error) error { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) for _, update := range op.allUpdates() { fbo.updatePointer(kmd, update.Unref, update.Ref, shouldPrefetch) } if afterUpdateFn == nil { return nil } return afterUpdateFn() } func (fbo *folderBlockOps) unlinkDuringFastForwardLocked(ctx context.Context, lState *lockState, ref BlockRef) { fbo.blockLock.AssertLocked(lState) oldNode := fbo.nodeCache.Get(ref) if oldNode == nil { return } oldPath := fbo.nodeCache.PathFromNode(oldNode) fbo.log.CDebugf(ctx, "Unlinking missing node %s/%v during "+ "fast-forward", oldPath, ref) fbo.nodeCache.Unlink(ref, oldPath) } func (fbo *folderBlockOps) fastForwardDirAndChildrenLocked(ctx context.Context, lState *lockState, currDir path, children map[string]map[pathNode]bool, kmd KeyMetadata) ([]NodeChange, error) { fbo.blockLock.AssertLocked(lState) dirBlock, err := fbo.getDirLocked(ctx, lState, kmd, currDir, blockRead) if err != nil { return nil, err } prefix := currDir.String() // TODO: parallelize me? var changes []NodeChange for child := range children[prefix] { entry, ok := dirBlock.Children[child.Name] if !ok { fbo.unlinkDuringFastForwardLocked( ctx, lState, child.BlockPointer.Ref()) continue } fbo.log.CDebugf(ctx, "Fast-forwarding %v -> %v", child.BlockPointer, entry.BlockPointer) fbo.updatePointer(kmd, child.BlockPointer, entry.BlockPointer, true) node := fbo.nodeCache.Get(entry.BlockPointer.Ref()) newPath := fbo.nodeCache.PathFromNode(node) if entry.Type == Dir { if node != nil { change := NodeChange{Node: node} for subchild := range children[newPath.String()] { change.DirUpdated = append(change.DirUpdated, subchild.Name) } changes = append(changes, change) } childChanges, err := fbo.fastForwardDirAndChildrenLocked( ctx, lState, newPath, children, kmd) if err != nil { return nil, err } changes = append(changes, childChanges...) } else if node != nil { // File -- invalidate the entire file contents. changes = append(changes, NodeChange{ Node: node, FileUpdated: []WriteRange{{Len: 0, Off: 0}}, }) } } delete(children, prefix) return changes, nil } // FastForwardAllNodes attempts to update the block pointers // associated with nodes in the cache by searching for their paths in // the current version of the TLF. If it can't find a corresponding // node, it assumes it's been deleted and unlinks it. Returns the set // of node changes that resulted. If there are no nodes, it returns a // nil error because there's nothing to be done. func (fbo *folderBlockOps) FastForwardAllNodes(ctx context.Context, lState *lockState, md ReadOnlyRootMetadata) ( changes []NodeChange, err error) { // Take a hard lock through this whole process. TODO: is there // any way to relax this? It could lead to file system operation // timeouts, even on reads, if we hold it too long. fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) nodes := fbo.nodeCache.AllNodes() if len(nodes) == 0 { // Nothing needs to be done! return nil, nil } fbo.log.CDebugf(ctx, "Fast-forwarding %d nodes", len(nodes)) defer func() { fbo.log.CDebugf(ctx, "Fast-forward complete: %v", err) }() // Build a "tree" representation for each interesting path prefix. children := make(map[string]map[pathNode]bool) var rootPath path for _, n := range nodes { p := fbo.nodeCache.PathFromNode(n) if len(p.path) == 1 { rootPath = p } prevPath := "" for _, pn := range p.path { if prevPath != "" { childPNs := children[prevPath] if childPNs == nil { childPNs = make(map[pathNode]bool) children[prevPath] = childPNs } childPNs[pn] = true } prevPath = filepath.Join(prevPath, pn.Name) } } if !rootPath.isValid() { return nil, errors.New("Couldn't find the root path") } fbo.log.CDebugf(ctx, "Fast-forwarding root %v -> %v", rootPath.path[0].BlockPointer, md.data.Dir.BlockPointer) fbo.updatePointer(md, rootPath.path[0].BlockPointer, md.data.Dir.BlockPointer, false) rootPath.path[0].BlockPointer = md.data.Dir.BlockPointer rootNode := fbo.nodeCache.Get(md.data.Dir.BlockPointer.Ref()) if rootNode != nil { change := NodeChange{Node: rootNode} for child := range children[rootPath.String()] { change.DirUpdated = append(change.DirUpdated, child.Name) } changes = append(changes, change) } childChanges, err := fbo.fastForwardDirAndChildrenLocked( ctx, lState, rootPath, children, md) if err != nil { return nil, err } changes = append(changes, childChanges...) // Unlink any children that remain. for _, childPNs := range children { for child := range childPNs { fbo.unlinkDuringFastForwardLocked( ctx, lState, child.BlockPointer.Ref()) } } return changes, nil } type chainsPathPopulator interface { populateChainPaths(context.Context, logger.Logger, *crChains, bool) error } // populateChainPaths updates all the paths in all the ops tracked by // `chains`, using the main nodeCache. func (fbo *folderBlockOps) populateChainPaths(ctx context.Context, log logger.Logger, chains *crChains, includeCreates bool) error { _, err := chains.getPaths(ctx, fbo, log, fbo.nodeCache, includeCreates) return err } var _ chainsPathPopulator = (*folderBlockOps)(nil) folder_block_ops: reset `doDeferWrite` after recoverable error This was leading to test flakes by leaving around dirty blocks in the test cache, even after the test did its final sync. Issue: KBFS-2093 // Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "errors" "fmt" "path/filepath" "time" "github.com/keybase/client/go/logger" "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/kbfs/kbfsblock" "github.com/keybase/kbfs/kbfscodec" "github.com/keybase/kbfs/tlf" "golang.org/x/net/context" "golang.org/x/sync/errgroup" ) type overallBlockState int const ( // cleanState: no outstanding local writes. cleanState overallBlockState = iota // dirtyState: there are outstanding local writes that haven't yet been // synced. dirtyState ) // blockReqType indicates whether an operation makes block // modifications or not type blockReqType int const ( // A block read request. blockRead blockReqType = iota // A block write request. blockWrite // A block read request that is happening from a different // goroutine than the blockLock rlock holder, using the same lState. blockReadParallel // We are looking up a block for the purposes of creating a new // node in the node cache for it; avoid any unlocks as part of the // lookup process. blockLookup ) type mdToCleanIfUnused struct { md ReadOnlyRootMetadata bps *blockPutState } type syncInfo struct { oldInfo BlockInfo op *syncOp unrefs []BlockInfo bps *blockPutState refBytes uint64 unrefBytes uint64 toCleanIfUnused []mdToCleanIfUnused } func (si *syncInfo) DeepCopy(codec kbfscodec.Codec) (*syncInfo, error) { newSi := &syncInfo{ oldInfo: si.oldInfo, refBytes: si.refBytes, unrefBytes: si.unrefBytes, } newSi.unrefs = make([]BlockInfo, len(si.unrefs)) copy(newSi.unrefs, si.unrefs) if si.bps != nil { newSi.bps = si.bps.DeepCopy() } if si.op != nil { err := kbfscodec.Update(codec, &newSi.op, si.op) if err != nil { return nil, err } } newSi.toCleanIfUnused = make([]mdToCleanIfUnused, len(si.toCleanIfUnused)) for i, toClean := range si.toCleanIfUnused { // It might be overkill to deep-copy these MDs and bpses, // which are probably immutable, but for now let's do the safe // thing. copyMd, err := toClean.md.deepCopy(codec) if err != nil { return nil, err } newSi.toCleanIfUnused[i].md = copyMd.ReadOnly() newSi.toCleanIfUnused[i].bps = toClean.bps.DeepCopy() } return newSi, nil } func (si *syncInfo) removeReplacedBlock(ctx context.Context, log logger.Logger, ptr BlockPointer) { for i, ref := range si.op.RefBlocks { if ref == ptr { log.CDebugf(ctx, "Replacing old ref %v", ptr) si.op.RefBlocks = append(si.op.RefBlocks[:i], si.op.RefBlocks[i+1:]...) for j, unref := range si.unrefs { if unref.BlockPointer == ptr { // Don't completely remove the unref, // since it contains size info that we // need to incorporate into the MD // usage calculations. si.unrefs[j].BlockPointer = zeroPtr } } break } } } func (si *syncInfo) mergeUnrefCache(md *RootMetadata) { for _, info := range si.unrefs { // it's ok if we push the same ptr.ID/RefNonce multiple times, // because the subsequent ones should have a QuotaSize of 0. md.AddUnrefBlock(info) } } type deCacheEntry struct { // dirEntry is the dirty directory entry corresponding to the // BlockPointer that maps to this struct. dirEntry DirEntry // adds is a map of the pointers for new entry names that have // been added to the DirBlock for the BlockPointer that maps to // this struct. adds map[string]BlockPointer // dels is a set of the names that have been removed from the // DirBlock for the BlockPointer that maps to this struct. dels map[string]bool } // folderBlockOps contains all the fields that must be synchronized by // blockLock. It will eventually also contain all the methods that // must be synchronized by blockLock, so that folderBranchOps will // have no knowledge of blockLock. // // -- And now, a primer on tracking dirty bytes -- // // The DirtyBlockCache tracks the number of bytes that are dirtied // system-wide, as the number of bytes that haven't yet been synced // ("unsynced"), and a number of bytes that haven't yet been resolved // yet because the overall file Sync hasn't finished yet ("total"). // This data helps us decide when we need to block incoming Writes, in // order to keep memory usage from exploding. // // It's the responsibility of folderBlockOps (and its helper struct // dirtyFile) to update these totals in DirtyBlockCache for the // individual files within this TLF. This is complicated by a few things: // * New writes to a file are "deferred" while a Sync is happening, and // are replayed after the Sync finishes. // * Syncs can be canceled or error out halfway through syncing the blocks, // leaving the file in a dirty state until the next Sync. // * Syncs can fail with a /recoverable/ error, in which case they get // retried automatically by folderBranchOps. In that case, the retried // Sync also sucks in any outstanding deferred writes. // // With all that in mind, here is the rough breakdown of how this // bytes-tracking is implemented: // * On a Write/Truncate to a block, folderBranchOps counts all the // newly-dirtied bytes in a file as "unsynced". That is, if the block was // already in the dirty cache (and not already being synced), only // extensions to the block count as "unsynced" bytes. // * When a Sync starts, dirtyFile remembers the total of bytes being synced, // and the size of each block being synced. // * When each block put finishes successfully, dirtyFile subtracts the size // of that block from "unsynced". // * When a Sync finishes successfully, the total sum of bytes in that sync // are subtracted from the "total" dirty bytes outstanding. // * If a Sync fails, but some blocks were put successfully, those blocks // are "re-dirtied", which means they count as unsynced bytes again. // dirtyFile handles this. // * When a Write/Truncate is deferred due to an ongoing Sync, its bytes // still count towards the "unsynced" total. In fact, this essentially // creates a new copy of those blocks, and the whole size of that block // (not just the newly-dirtied bytes) count for the total. However, // when the write gets replayed, folderBlockOps first subtracts those bytes // from the system-wide numbers, since they are about to be replayed. // * When a Sync is retried after a recoverable failure, dirtyFile adds // the newly-dirtied deferred bytes to the system-wide numbers, since they // are now being assimilated into this Sync. // * dirtyFile also exposes a concept of "orphaned" blocks. These are child // blocks being synced that are now referenced via a new, permanent block // ID from the parent indirect block. This matters for when hard failures // occur during a Sync -- the blocks will no longer be accessible under // their previous old pointers, and so dirtyFile needs to know their old // bytes can be cleaned up now. type folderBlockOps struct { config Config log logger.Logger folderBranch FolderBranch observers *observerList // forceSyncChan can be sent on to trigger an immediate // Sync(). It is a blocking channel. forceSyncChan chan<- struct{} // protects access to blocks in this folder and all fields // below. blockLock blockLock // Which files are currently dirty and have dirty blocks that are either // currently syncing, or waiting to be sync'd. dirtyFiles map[BlockPointer]*dirtyFile // For writes and truncates, track the unsynced to-be-unref'd // block infos, per-path. unrefCache map[BlockRef]*syncInfo // For writes and truncates, track the modified (but not yet // committed) directory entries. Maps the entry BlockRef to a // modified entry. deCache map[BlockRef]deCacheEntry // Writes and truncates for blocks that were being sync'd, and // need to be replayed after the sync finishes on top of the new // versions of the blocks. deferredWrites []func(context.Context, *lockState, KeyMetadata, path) error // Blocks that need to be deleted from the dirty cache before any // deferred writes are replayed. deferredDirtyDeletes []BlockPointer deferredWaitBytes int64 // set to true if this write or truncate should be deferred doDeferWrite bool // nodeCache itself is goroutine-safe, but write/truncate must // call PathFromNode() only under blockLock (see nodeCache // comments in folder_branch_ops.go). nodeCache NodeCache } // Only exported methods of folderBlockOps should be used outside of this // file. // // Although, temporarily, folderBranchOps is allowed to reach in and // manipulate folderBlockOps fields and methods directly. func (fbo *folderBlockOps) id() tlf.ID { return fbo.folderBranch.Tlf } func (fbo *folderBlockOps) branch() BranchName { return fbo.folderBranch.Branch } // GetState returns the overall block state of this TLF. func (fbo *folderBlockOps) GetState(lState *lockState) overallBlockState { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) if len(fbo.deCache) == 0 { return cleanState } return dirtyState } // getCleanEncodedBlockHelperLocked retrieves the encoded size of the // clean block pointed to by ptr, which must be valid, either from the // cache or from the server. If `rtype` is `blockReadParallel`, it's // assumed that some coordinating goroutine is holding the correct // locks, and in that case `lState` must be `nil`. func (fbo *folderBlockOps) getCleanEncodedBlockSizeLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName, rtype blockReqType) (uint32, error) { if rtype != blockReadParallel { if rtype == blockWrite { panic("Cannot get the size of a block for writing") } fbo.blockLock.AssertAnyLocked(lState) } else if lState != nil { panic("Non-nil lState passed to getCleanEncodedBlockSizeLocked " + "with blockReadParallel") } if !ptr.IsValid() { return 0, InvalidBlockRefError{ptr.Ref()} } if block, err := fbo.config.BlockCache().Get(ptr); err == nil { return block.GetEncodedSize(), nil } if err := checkDataVersion(fbo.config, path{}, ptr); err != nil { return 0, err } // Unlock the blockLock while we wait for the network, only if // it's locked for reading by a single goroutine. If it's locked // for writing, that indicates we are performing an atomic write // operation, and we need to ensure that nothing else comes in and // modifies the blocks, so don't unlock. // // If there may be multiple goroutines fetching blocks under the // same lState, we can't safely unlock since some of the other // goroutines may be operating on the data assuming they have the // lock. bops := fbo.config.BlockOps() var size uint32 var err error if rtype != blockReadParallel && rtype != blockLookup { fbo.blockLock.DoRUnlockedIfPossible(lState, func(*lockState) { size, err = bops.GetEncodedSize(ctx, kmd, ptr) }) } else { size, err = bops.GetEncodedSize(ctx, kmd, ptr) } if err != nil { return 0, err } return size, nil } // getBlockHelperLocked retrieves the block pointed to by ptr, which // must be valid, either from the cache or from the server. If // notifyPath is valid and the block isn't cached, trigger a read // notification. If `rtype` is `blockReadParallel`, it's assumed that // some coordinating goroutine is holding the correct locks, and // in that case `lState` must be `nil`. // // This must be called only by get{File,Dir}BlockHelperLocked(). func (fbo *folderBlockOps) getBlockHelperLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName, newBlock makeNewBlock, lifetime BlockCacheLifetime, notifyPath path, rtype blockReqType) (Block, error) { if rtype != blockReadParallel { fbo.blockLock.AssertAnyLocked(lState) } else if lState != nil { panic("Non-nil lState passed to getBlockHelperLocked " + "with blockReadParallel") } if !ptr.IsValid() { return nil, InvalidBlockRefError{ptr.Ref()} } if block, err := fbo.config.DirtyBlockCache().Get( fbo.id(), ptr, branch); err == nil { return block, nil } if block, hasPrefetched, lifetime, err := fbo.config.BlockCache().GetWithPrefetch(ptr); err == nil { // If the block was cached in the past, we need to handle it as if it's // an on-demand request so that its downstream prefetches are triggered // correctly according to the new on-demand fetch priority. fbo.config.BlockOps().BlockRetriever().CacheAndPrefetch(ctx, ptr, block, kmd, defaultOnDemandRequestPriority, lifetime, hasPrefetched) return block, nil } if err := checkDataVersion(fbo.config, notifyPath, ptr); err != nil { return nil, err } if notifyPath.isValidForNotification() { fbo.config.Reporter().Notify(ctx, readNotification(notifyPath, false)) defer fbo.config.Reporter().Notify(ctx, readNotification(notifyPath, true)) } // Unlock the blockLock while we wait for the network, only if // it's locked for reading by a single goroutine. If it's locked // for writing, that indicates we are performing an atomic write // operation, and we need to ensure that nothing else comes in and // modifies the blocks, so don't unlock. // // If there may be multiple goroutines fetching blocks under the // same lState, we can't safely unlock since some of the other // goroutines may be operating on the data assuming they have the // lock. // fetch the block, and add to cache block := newBlock() bops := fbo.config.BlockOps() var err error if rtype != blockReadParallel && rtype != blockLookup { fbo.blockLock.DoRUnlockedIfPossible(lState, func(*lockState) { err = bops.Get(ctx, kmd, ptr, block, lifetime) }) } else { err = bops.Get(ctx, kmd, ptr, block, lifetime) } if err != nil { return nil, err } return block, nil } // getFileBlockHelperLocked retrieves the block pointed to by ptr, // which must be valid, either from an internal cache, the block // cache, or from the server. An error is returned if the retrieved // block is not a file block. If `rtype` is `blockReadParallel`, it's // assumed that some coordinating goroutine is holding the correct // locks, and in that case `lState` must be `nil`. // // This must be called only by GetFileBlockForReading(), // getFileBlockLocked(), and getFileLocked(). // // p is used only when reporting errors and sending read // notifications, and can be empty. func (fbo *folderBlockOps) getFileBlockHelperLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName, p path, rtype blockReqType) ( *FileBlock, error) { if rtype != blockReadParallel { fbo.blockLock.AssertAnyLocked(lState) } else if lState != nil { panic("Non-nil lState passed to getFileBlockHelperLocked " + "with blockReadParallel") } block, err := fbo.getBlockHelperLocked( ctx, lState, kmd, ptr, branch, NewFileBlock, TransientEntry, p, rtype) if err != nil { return nil, err } fblock, ok := block.(*FileBlock) if !ok { return nil, NotFileBlockError{ptr, branch, p} } return fblock, nil } // GetBlockForReading retrieves the block pointed to by ptr, which // must be valid, either from the cache or from the server. The // returned block may have a generic type (not DirBlock or FileBlock). // // This should be called for "internal" operations, like conflict // resolution and state checking, which don't know what kind of block // the pointer refers to. The block will not be cached, if it wasn't // in the cache already. func (fbo *folderBlockOps) GetBlockForReading(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName) ( Block, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.getBlockHelperLocked(ctx, lState, kmd, ptr, branch, NewCommonBlock, NoCacheEntry, path{}, blockRead) } // GetCleanEncodedBlocksSizeSum retrieves the sum of the encoded sizes // of the blocks pointed to by ptrs, all of which must be valid, // either from the cache or from the server. // // The caller can specify a set of pointers using // `ignoreRecoverableForRemovalErrors` for which "recoverable" fetch // errors are tolerated. In that case, the returned sum will not // include the size for any pointers in the // `ignoreRecoverableForRemovalErrors` set that hit such an error. // // This should be called for "internal" operations, like conflict // resolution and state checking, which don't know what kind of block // the pointers refer to. Any downloaded blocks will not be cached, // if they weren't in the cache already. func (fbo *folderBlockOps) GetCleanEncodedBlocksSizeSum(ctx context.Context, lState *lockState, kmd KeyMetadata, ptrs []BlockPointer, ignoreRecoverableForRemovalErrors map[BlockPointer]bool, branch BranchName) (uint64, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) sumCh := make(chan uint32, len(ptrs)) eg, groupCtx := errgroup.WithContext(ctx) for _, ptr := range ptrs { ptr := ptr // capture range variable eg.Go(func() error { size, err := fbo.getCleanEncodedBlockSizeLocked(groupCtx, nil, kmd, ptr, branch, blockReadParallel) // TODO: we might be able to recover the size of the // top-most block of a removed file using the merged // directory entry, the same way we do in // `folderBranchOps.unrefEntry`. if isRecoverableBlockErrorForRemoval(err) && ignoreRecoverableForRemovalErrors[ptr] { fbo.log.CDebugf(groupCtx, "Hit an ignorable, recoverable "+ "error for block %v: %v", ptr, err) return nil } if err != nil { return err } sumCh <- size return nil }) } if err := eg.Wait(); err != nil { return 0, err } close(sumCh) var sum uint64 for size := range sumCh { sum += uint64(size) } return sum, nil } // getDirBlockHelperLocked retrieves the block pointed to by ptr, which // must be valid, either from the cache or from the server. An error // is returned if the retrieved block is not a dir block. // // This must be called only by GetDirBlockForReading() and // getDirLocked(). // // p is used only when reporting errors, and can be empty. func (fbo *folderBlockOps) getDirBlockHelperLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName, p path, rtype blockReqType) (*DirBlock, error) { if rtype != blockReadParallel { fbo.blockLock.AssertAnyLocked(lState) } // Pass in an empty notify path because notifications should only // trigger for file reads. block, err := fbo.getBlockHelperLocked( ctx, lState, kmd, ptr, branch, NewDirBlock, TransientEntry, path{}, rtype) if err != nil { return nil, err } dblock, ok := block.(*DirBlock) if !ok { return nil, NotDirBlockError{ptr, branch, p} } return dblock, nil } // GetFileBlockForReading retrieves the block pointed to by ptr, which // must be valid, either from the cache or from the server. An error // is returned if the retrieved block is not a file block. // // This should be called for "internal" operations, like conflict // resolution and state checking. "Real" operations should use // getFileBlockLocked() and getFileLocked() instead. // // p is used only when reporting errors, and can be empty. func (fbo *folderBlockOps) GetFileBlockForReading(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName, p path) (*FileBlock, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.getFileBlockHelperLocked( ctx, lState, kmd, ptr, branch, p, blockRead) } // GetDirBlockForReading retrieves the block pointed to by ptr, which // must be valid, either from the cache or from the server. An error // is returned if the retrieved block is not a dir block. // // This should be called for "internal" operations, like conflict // resolution and state checking. "Real" operations should use // getDirLocked() instead. // // p is used only when reporting errors, and can be empty. func (fbo *folderBlockOps) GetDirBlockForReading(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName, p path) (*DirBlock, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.getDirBlockHelperLocked( ctx, lState, kmd, ptr, branch, p, blockRead) } // getFileBlockLocked retrieves the block pointed to by ptr, which // must be valid, either from the cache or from the server. An error // is returned if the retrieved block is not a file block. // // The given path must be valid, and the given pointer must be its // tail pointer or an indirect pointer from it. A read notification is // triggered for the given path only if the block isn't in the cache. // // This shouldn't be called for "internal" operations, like conflict // resolution and state checking -- use GetFileBlockForReading() for // those instead. // // When rtype == blockWrite and the cached version of the block is // currently clean, or the block is currently being synced, this // method makes a copy of the file block and returns it. If this // method might be called again for the same block within a single // operation, it is the caller's responsibility to write that block // back to the cache as dirty. // // Note that blockLock must be locked exactly when rtype == // blockWrite, and must be r-locked when rtype == blockRead. (This // differs from getDirLocked.) This is because a write operation // (like write, truncate and sync which lock blockLock) fetching a // file block will almost always need to modify that block, and so // will pass in blockWrite. If rtype == blockReadParallel, it's // assumed that some coordinating goroutine is holding the correct // locks, and in that case `lState` must be `nil`. // // file is used only when reporting errors and sending read // notifications, and can be empty except that file.Branch must be set // correctly. // // This method also returns whether the block was already dirty. func (fbo *folderBlockOps) getFileBlockLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, ptr BlockPointer, file path, rtype blockReqType) ( fblock *FileBlock, wasDirty bool, err error) { switch rtype { case blockRead: fbo.blockLock.AssertRLocked(lState) case blockWrite: fbo.blockLock.AssertLocked(lState) case blockReadParallel: // This goroutine might not be the official lock holder, so // don't make any assertions. if lState != nil { panic("Non-nil lState passed to getFileBlockLocked " + "with blockReadParallel") } case blockLookup: panic("blockLookup should only be used for directory blocks") default: panic(fmt.Sprintf("Unknown block req type: %d", rtype)) } fblock, err = fbo.getFileBlockHelperLocked( ctx, lState, kmd, ptr, file.Branch, file, rtype) if err != nil { return nil, false, err } wasDirty = fbo.config.DirtyBlockCache().IsDirty(fbo.id(), ptr, file.Branch) if rtype == blockWrite { // Copy the block if it's for writing, and either the // block is not yet dirty or the block is currently // being sync'd and needs a copy even though it's // already dirty. df := fbo.dirtyFiles[file.tailPointer()] if !wasDirty || (df != nil && df.blockNeedsCopy(ptr)) { fblock = fblock.DeepCopy() } } return fblock, wasDirty, nil } // getFileLocked is getFileBlockLocked called with file.tailPointer(). func (fbo *folderBlockOps) getFileLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, file path, rtype blockReqType) (*FileBlock, error) { // Callers should have already done this check, but it doesn't // hurt to do it again. if !file.isValid() { return nil, InvalidPathError{file} } fblock, _, err := fbo.getFileBlockLocked( ctx, lState, kmd, file.tailPointer(), file, rtype) return fblock, err } // GetIndirectFileBlockInfos returns a list of BlockInfos for all // indirect blocks of the given file. If the returned error is a // recoverable one (as determined by // isRecoverableBlockErrorForRemoval), the returned list may still be // non-empty, and holds all the BlockInfos for all found indirect // blocks. func (fbo *folderBlockOps) GetIndirectFileBlockInfos(ctx context.Context, lState *lockState, kmd KeyMetadata, file path) ([]BlockInfo, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) var uid keybase1.UID // Data reads don't depend on the uid. fd := fbo.newFileData(lState, file, uid, kmd) return fd.getIndirectFileBlockInfos(ctx) } // GetIndirectFileBlockInfosWithTopBlock returns a list of BlockInfos // for all indirect blocks of the given file, starting from the given // top-most block. If the returned error is a recoverable one (as // determined by isRecoverableBlockErrorForRemoval), the returned list // may still be non-empty, and holds all the BlockInfos for all found // indirect blocks. (This will be relevant when we handle multiple // levels of indirection.) func (fbo *folderBlockOps) GetIndirectFileBlockInfosWithTopBlock( ctx context.Context, lState *lockState, kmd KeyMetadata, file path, topBlock *FileBlock) ( []BlockInfo, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) var uid keybase1.UID // Data reads don't depend on the uid. fd := fbo.newFileData(lState, file, uid, kmd) return fd.getIndirectFileBlockInfosWithTopBlock(ctx, topBlock) } // DeepCopyFile makes a complete copy of the given file, deduping leaf // blocks and making new random BlockPointers for all indirect blocks. // It returns the new top pointer of the copy, and all the new child // pointers in the copy. It takes a custom DirtyBlockCache, which // directs where the resulting block copies are stored. func (fbo *folderBlockOps) DeepCopyFile( ctx context.Context, lState *lockState, kmd KeyMetadata, file path, dirtyBcache DirtyBlockCache, dataVer DataVer) ( newTopPtr BlockPointer, allChildPtrs []BlockPointer, err error) { // Deep copying doesn't alter any data in use, it only makes copy, // so only a read lock is needed. fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return BlockPointer{}, nil, err } fd := fbo.newFileDataWithCache( lState, file, session.UID, kmd, dirtyBcache) return fd.deepCopy(ctx, dataVer) } func (fbo *folderBlockOps) UndupChildrenInCopy(ctx context.Context, lState *lockState, kmd KeyMetadata, file path, bps *blockPutState, dirtyBcache DirtyBlockCache, topBlock *FileBlock) ([]BlockInfo, error) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return nil, err } fd := fbo.newFileDataWithCache( lState, file, session.UID, kmd, dirtyBcache) return fd.undupChildrenInCopy(ctx, fbo.config.BlockCache(), fbo.config.BlockOps(), bps, topBlock) } func (fbo *folderBlockOps) ReadyNonLeafBlocksInCopy(ctx context.Context, lState *lockState, kmd KeyMetadata, file path, bps *blockPutState, dirtyBcache DirtyBlockCache, topBlock *FileBlock) ([]BlockInfo, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return nil, err } fd := fbo.newFileDataWithCache( lState, file, session.UID, kmd, dirtyBcache) return fd.readyNonLeafBlocksInCopy(ctx, fbo.config.BlockCache(), fbo.config.BlockOps(), bps, topBlock) } // getDirLocked retrieves the block pointed to by the tail pointer of // the given path, which must be valid, either from the cache or from // the server. An error is returned if the retrieved block is not a // dir block. // // This shouldn't be called for "internal" operations, like conflict // resolution and state checking -- use GetDirBlockForReading() for // those instead. // // When rtype == blockWrite and the cached version of the block is // currently clean, this method makes a copy of the directory block // and returns it. If this method might be called again for the same // block within a single operation, it is the caller's responsibility // to write that block back to the cache as dirty. // // Note that blockLock must be either r-locked or locked, but // independently of rtype. (This differs from getFileLocked and // getFileBlockLocked.) File write operations (which lock blockLock) // don't need a copy of parent dir blocks, and non-file write // operations do need to copy dir blocks for modifications. func (fbo *folderBlockOps) getDirLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, dir path, rtype blockReqType) ( *DirBlock, error) { fbo.blockLock.AssertAnyLocked(lState) // Callers should have already done this check, but it doesn't // hurt to do it again. if !dir.isValid() { return nil, InvalidPathError{dir} } // Get the block for the last element in the path. dblock, err := fbo.getDirBlockHelperLocked( ctx, lState, kmd, dir.tailPointer(), dir.Branch, dir, rtype) if err != nil { return nil, err } if rtype == blockWrite && !fbo.config.DirtyBlockCache().IsDirty( fbo.id(), dir.tailPointer(), dir.Branch) { // Copy the block if it's for writing and the block is // not yet dirty. dblock = dblock.DeepCopy() } return dblock, nil } // GetDir retrieves the block pointed to by the tail pointer of the // given path, which must be valid, either from the cache or from the // server. An error is returned if the retrieved block is not a dir // block. // // This shouldn't be called for "internal" operations, like conflict // resolution and state checking -- use GetDirBlockForReading() for // those instead. // // When rtype == blockWrite and the cached version of the block is // currently clean, this method makes a copy of the directory block // and returns it. If this method might be called again for the same // block within a single operation, it is the caller's responsibility // to write that block back to the cache as dirty. func (fbo *folderBlockOps) GetDir( ctx context.Context, lState *lockState, kmd KeyMetadata, dir path, rtype blockReqType) (*DirBlock, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.getDirLocked(ctx, lState, kmd, dir, rtype) } func (fbo *folderBlockOps) addDirEntryInCacheLocked(lState *lockState, dir path, newName string, newDe DirEntry) { fbo.blockLock.AssertLocked(lState) cacheEntry := fbo.deCache[dir.tailPointer().Ref()] if cacheEntry.adds == nil { cacheEntry.adds = make(map[string]BlockPointer) } cacheEntry.adds[newName] = newDe.BlockPointer // In case it was removed in the cache but not flushed yet. delete(cacheEntry.dels, newName) fbo.deCache[dir.tailPointer().Ref()] = cacheEntry } // AddDirEntryInCache adds a brand new entry to the given directory in // the cache, which will get applied to the dirty block on subsequent // fetches for the directory. The new entry must not yet have a cache // entry itself. func (fbo *folderBlockOps) AddDirEntryInCache(lState *lockState, dir path, newName string, newDe DirEntry) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) fbo.addDirEntryInCacheLocked(lState, dir, newName, newDe) // Add target dir entry as well. if newDe.IsInitialized() { cacheEntry, ok := fbo.deCache[newDe.Ref()] if ok { panic("New entry shouldn't already exist") } cacheEntry.dirEntry = newDe fbo.deCache[newDe.Ref()] = cacheEntry } } func (fbo *folderBlockOps) removeDirEntryInCacheLocked(lState *lockState, dir path, oldName string) { fbo.blockLock.AssertLocked(lState) cacheEntry := fbo.deCache[dir.tailPointer().Ref()] if cacheEntry.dels == nil { cacheEntry.dels = make(map[string]bool) } cacheEntry.dels[oldName] = true // In case it was added in the cache but not flushed yet. delete(cacheEntry.adds, oldName) fbo.deCache[dir.tailPointer().Ref()] = cacheEntry } // RemoveDirEntryInCache removes an entry from the given directory in // the cache, which will get applied to the dirty block on subsequent // fetches for the directory. func (fbo *folderBlockOps) RemoveDirEntryInCache(lState *lockState, dir path, oldName string) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) fbo.removeDirEntryInCacheLocked(lState, dir, oldName) } // RenameDirEntryInCache updates the entries of both the old and new // parent dirs for the given target dir atomically (with respect to // blockLock). It also updates the cache entry for the target, which // would have its Ctime changed. The updates will get applied to the // dirty blocks on subsequent fetches. // // The returned bool indicates whether or not the caller should clean // up the target cache entry when the effects of the operation are no // longer needed. func (fbo *folderBlockOps) RenameDirEntryInCache(lState *lockState, oldParent path, oldName string, newParent path, newName string, newDe DirEntry) (deleteTargetDirEntry bool) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) fbo.addDirEntryInCacheLocked(lState, newParent, newName, newDe) fbo.removeDirEntryInCacheLocked(lState, oldParent, oldName) // If there's already an entry for the target, only update the // Ctime on a rename. cacheEntry, ok := fbo.deCache[newDe.Ref()] if ok && cacheEntry.dirEntry.IsInitialized() { cacheEntry.dirEntry.Ctime = newDe.Ctime } else { cacheEntry.dirEntry = newDe deleteTargetDirEntry = true } fbo.deCache[newDe.Ref()] = cacheEntry return deleteTargetDirEntry } func (fbo *folderBlockOps) setCachedAttrLocked( lState *lockState, ref BlockRef, attr attrChange, realEntry *DirEntry, doCreate bool) { fbo.blockLock.AssertLocked(lState) fileEntry, ok := fbo.deCache[ref] if !ok { if !doCreate { return } fileEntry.dirEntry = *realEntry } switch attr { case exAttr: fileEntry.dirEntry.Type = realEntry.Type case mtimeAttr: fileEntry.dirEntry.Mtime = realEntry.Mtime } fileEntry.dirEntry.Ctime = realEntry.Ctime fbo.deCache[ref] = fileEntry } // SetAttrInDirEntryInCache removes an entry from the given directory // in the cache, which will get applied to the dirty block on // subsequent fetches for the directory. // // The returned bool indicates whether or not the caller should clean // up the cache entry when the effects of the operation are no longer // needed. func (fbo *folderBlockOps) SetAttrInDirEntryInCache(lState *lockState, newDe DirEntry, attr attrChange) (deleteTargetDirEntry bool) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) // If there's already an entry for the target, only update the // Ctime on a rename. _, ok := fbo.deCache[newDe.Ref()] if !ok { deleteTargetDirEntry = true } fbo.setCachedAttrLocked( lState, newDe.Ref(), attr, &newDe, true /* create the deCache entry if it doesn't exist yet */) return deleteTargetDirEntry } // ClearCachedAddsAndRemoves clears out any cached directory entry // adds and removes for the given dir. func (fbo *folderBlockOps) ClearCachedAddsAndRemoves( lState *lockState, dir path) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) cacheEntry, ok := fbo.deCache[dir.tailPointer().Ref()] if !ok { return } // If there's no dirEntry, we can just delete the whole thing. if !cacheEntry.dirEntry.IsInitialized() { delete(fbo.deCache, dir.tailPointer().Ref()) return } // Otherwise just nil out the adds and dels. cacheEntry.adds = nil cacheEntry.dels = nil fbo.deCache[dir.tailPointer().Ref()] = cacheEntry } // updateWithDirtyEntriesLocked checks if the given DirBlock has any // entries that are in deCache (i.e., entries pointing to dirty // files). If so, it makes a copy with all such entries replaced with // the ones in deCache and returns it. If not, it just returns the // given one. func (fbo *folderBlockOps) updateWithDirtyEntriesLocked(ctx context.Context, lState *lockState, dir path, dblock *DirBlock) (*DirBlock, error) { fbo.blockLock.AssertAnyLocked(lState) // see if this directory has any outstanding writes/truncates that // require an updated DirEntry // Save some time for the common case of having no dirty // files. if len(fbo.deCache) == 0 { return dblock, nil } var dblockCopy *DirBlock dirCacheEntry := fbo.deCache[dir.tailPointer().Ref()] // TODO: We should get rid of deCache completely and use only // DirtyBlockCache to store the dirtied version of the DirBlock. // We can't do that yet, because there might be multiple // outstanding dirty files in one directory, and the KBFSOps API // allows for syncing one at a time, so keeping a single dirtied // DirBlock would accidentally sync the DirEntry of file A when a // sync of file B is requested. // // Soon a sync will sync everything that's dirty at once, and so // we can remove deCache at that point. Until then, we must // incrementally build it up each time. // Add cached additions to the copy. for k, ptr := range dirCacheEntry.adds { de, ok := fbo.deCache[ptr.Ref()] if !ok { return nil, fmt.Errorf("No cached dir entry found for new entry "+ "%s in dir %s (%v)", k, dir, dir.tailPointer()) } if dblockCopy == nil { dblockCopy = dblock.DeepCopy() } dblockCopy.Children[k] = de.dirEntry } // Remove cached removals from the copy. for k := range dirCacheEntry.adds { _, ok := dblock.Children[k] if !ok { continue } if dblockCopy == nil { dblockCopy = dblock.DeepCopy() } delete(dblockCopy.Children, k) } // Update dir entries for any modified files. for k, v := range dblock.Children { de, ok := fbo.deCache[v.Ref()] if !ok { continue } if dblockCopy == nil { dblockCopy = dblock.DeepCopy() } dblockCopy.Children[k] = de.dirEntry } if dblockCopy == nil { return dblock, nil } return dblockCopy, nil } // getDirtyDirLocked composes getDirLocked and // updatedWithDirtyEntriesLocked. Note that a dirty dir means that it // has entries possibly pointing to dirty files, not that it's dirty // itself. func (fbo *folderBlockOps) getDirtyDirLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, dir path, rtype blockReqType) ( *DirBlock, error) { fbo.blockLock.AssertAnyLocked(lState) dblock, err := fbo.getDirLocked(ctx, lState, kmd, dir, rtype) if err != nil { return nil, err } return fbo.updateWithDirtyEntriesLocked(ctx, lState, dir, dblock) } // GetDirtyDirChildren returns a map of EntryInfos for the (possibly // dirty) children entries of the given directory. func (fbo *folderBlockOps) GetDirtyDirChildren( ctx context.Context, lState *lockState, kmd KeyMetadata, dir path) ( map[string]EntryInfo, error) { dblock, err := func() (*DirBlock, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.getDirtyDirLocked(ctx, lState, kmd, dir, blockRead) }() if err != nil { return nil, err } children := make(map[string]EntryInfo) for k, de := range dblock.Children { children[k] = de.EntryInfo } return children, nil } // file must have a valid parent. func (fbo *folderBlockOps) getDirtyParentAndEntryLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, file path, rtype blockReqType) ( *DirBlock, DirEntry, error) { fbo.blockLock.AssertAnyLocked(lState) if !file.hasValidParent() { return nil, DirEntry{}, InvalidParentPathError{file} } parentPath := file.parentPath() dblock, err := fbo.getDirtyDirLocked( ctx, lState, kmd, *parentPath, rtype) if err != nil { return nil, DirEntry{}, err } // make sure it exists name := file.tailName() de, ok := dblock.Children[name] if !ok { return nil, DirEntry{}, NoSuchNameError{name} } return dblock, de, err } // GetDirtyParentAndEntry returns a copy of the parent DirBlock // (suitable for modification) of the given file, which may contain // entries pointing to other dirty files, and its possibly-dirty // DirEntry in that directory. file must have a valid parent. Use // GetDirtyEntry() if you only need the DirEntry. func (fbo *folderBlockOps) GetDirtyParentAndEntry( ctx context.Context, lState *lockState, kmd KeyMetadata, file path) ( *DirBlock, DirEntry, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.getDirtyParentAndEntryLocked( ctx, lState, kmd, file, blockWrite) } // file must have a valid parent. func (fbo *folderBlockOps) getDirtyEntryLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, file path) (DirEntry, error) { // TODO: Since we only need a single DirEntry, avoid having to // look up every entry in the DirBlock. _, de, err := fbo.getDirtyParentAndEntryLocked( ctx, lState, kmd, file, blockLookup) return de, err } // GetDirtyEntry returns the possibly-dirty DirEntry of the given file // in its parent DirBlock. file must have a valid parent. func (fbo *folderBlockOps) GetDirtyEntry( ctx context.Context, lState *lockState, kmd KeyMetadata, file path) (DirEntry, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.getDirtyEntryLocked(ctx, lState, kmd, file) } // Lookup returns the possibly-dirty DirEntry of the given file in its // parent DirBlock, and a Node for the file if it exists. It has to // do all of this under the block lock to avoid races with // UpdatePointers. func (fbo *folderBlockOps) Lookup( ctx context.Context, lState *lockState, kmd KeyMetadata, dir Node, name string) (Node, DirEntry, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) dirPath := fbo.nodeCache.PathFromNode(dir) if !dirPath.isValid() { return nil, DirEntry{}, InvalidPathError{dirPath} } childPath := dirPath.ChildPathNoPtr(name) de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, childPath) if err != nil { return nil, DirEntry{}, err } if de.Type == Sym { return nil, de, nil } err = checkDataVersion(fbo.config, childPath, de.BlockPointer) if err != nil { return nil, DirEntry{}, err } node, err := fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir) if err != nil { return nil, DirEntry{}, err } return node, de, nil } func (fbo *folderBlockOps) getOrCreateDirtyFileLocked(lState *lockState, file path) *dirtyFile { fbo.blockLock.AssertLocked(lState) ptr := file.tailPointer() df := fbo.dirtyFiles[ptr] if df == nil { df = newDirtyFile(file, fbo.config.DirtyBlockCache()) fbo.dirtyFiles[ptr] = df } return df } // cacheBlockIfNotYetDirtyLocked puts a block into the cache, but only // does so if the block isn't already marked as dirty in the cache. // This is useful when operating on a dirty copy of a block that may // already be in the cache. func (fbo *folderBlockOps) cacheBlockIfNotYetDirtyLocked( lState *lockState, ptr BlockPointer, file path, block Block) error { fbo.blockLock.AssertLocked(lState) df := fbo.getOrCreateDirtyFileLocked(lState, file) needsCaching, isSyncing := df.setBlockDirty(ptr) if needsCaching { err := fbo.config.DirtyBlockCache().Put(fbo.id(), ptr, file.Branch, block) if err != nil { return err } } if isSyncing { fbo.doDeferWrite = true } return nil } func (fbo *folderBlockOps) getOrCreateSyncInfoLocked( lState *lockState, de DirEntry) (*syncInfo, error) { fbo.blockLock.AssertLocked(lState) ref := de.Ref() si, ok := fbo.unrefCache[ref] if !ok { so, err := newSyncOp(de.BlockPointer) if err != nil { return nil, err } si = &syncInfo{ oldInfo: de.BlockInfo, op: so, } fbo.unrefCache[ref] = si } return si, nil } // GetDirtyRefs returns a list of references of all known dirty // blocks. func (fbo *folderBlockOps) GetDirtyRefs(lState *lockState) []BlockRef { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) var dirtyRefs []BlockRef for ref := range fbo.deCache { dirtyRefs = append(dirtyRefs, ref) } return dirtyRefs } // fixChildBlocksAfterRecoverableErrorLocked should be called when a sync // failed with a recoverable block error on a multi-block file. It // makes sure that any outstanding dirty versions of the file are // fixed up to reflect the fact that some of the indirect pointers now // need to change. func (fbo *folderBlockOps) fixChildBlocksAfterRecoverableErrorLocked( ctx context.Context, lState *lockState, file path, kmd KeyMetadata, redirtyOnRecoverableError map[BlockPointer]BlockPointer) { fbo.blockLock.AssertLocked(lState) defer func() { // Below, this function can end up writing dirty blocks back // to the cache, which will set `doDeferWrite` to `true`. // This leads to future writes being unnecessarily deferred // when a Sync is not happening, and can lead to dirty data // being synced twice and sticking around for longer than // needed. So just reset `doDeferWrite` once we're // done. We're under `blockLock`, so this is safe. fbo.doDeferWrite = false }() df := fbo.dirtyFiles[file.tailPointer()] if df != nil { // Un-orphan old blocks, since we are reverting back to the // previous state. for _, oldPtr := range redirtyOnRecoverableError { fbo.log.CDebugf(ctx, "Un-orphaning %v", oldPtr) df.setBlockOrphaned(oldPtr, false) } } dirtyBcache := fbo.config.DirtyBlockCache() topBlock, err := dirtyBcache.Get(fbo.id(), file.tailPointer(), fbo.branch()) fblock, ok := topBlock.(*FileBlock) if err != nil || !ok { fbo.log.CWarningf(ctx, "Couldn't find dirtied "+ "top-block for %v: %v", file.tailPointer(), err) return } session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { fbo.log.CWarningf(ctx, "Couldn't find uid during recovery: %v", err) return } fd := fbo.newFileData(lState, file, session.UID, kmd) // If a copy of the top indirect block was made, we need to // redirty all the sync'd blocks under their new IDs, so that // future syncs will know they failed. newPtrs := make(map[BlockPointer]bool, len(redirtyOnRecoverableError)) for newPtr := range redirtyOnRecoverableError { newPtrs[newPtr] = true } found, err := fd.findIPtrsAndClearSize(ctx, fblock, newPtrs) if err != nil { fbo.log.CWarningf( ctx, "Couldn't find and clear iptrs during recovery: %v", err) return } for newPtr, oldPtr := range redirtyOnRecoverableError { if !found[newPtr] { continue } fbo.log.CDebugf(ctx, "Re-dirtying %v (and deleting dirty block %v)", newPtr, oldPtr) // These blocks would have been permanent, so they're // definitely still in the cache. b, err := fbo.config.BlockCache().Get(newPtr) if err != nil { fbo.log.CWarningf(ctx, "Couldn't re-dirty %v: %v", newPtr, err) continue } if err = fbo.cacheBlockIfNotYetDirtyLocked( lState, newPtr, file, b); err != nil { fbo.log.CWarningf(ctx, "Couldn't re-dirty %v: %v", newPtr, err) } fbo.log.CDebugf(ctx, "Deleting dirty ptr %v after recoverable error", oldPtr) err = dirtyBcache.Delete(fbo.id(), oldPtr, fbo.branch()) if err != nil { fbo.log.CDebugf(ctx, "Couldn't del-dirty %v: %v", oldPtr, err) } } } func (fbo *folderBlockOps) nowUnixNano() int64 { return fbo.config.Clock().Now().UnixNano() } // PrepRename prepares the given rename operation. It returns copies // of the old and new parent block (which may be the same), what is to // be the new DirEntry, and a local block cache. It also modifies md, // which must be a copy. func (fbo *folderBlockOps) PrepRename( ctx context.Context, lState *lockState, md *RootMetadata, oldParent path, oldName string, newParent path, newName string) ( oldPBlock, newPBlock *DirBlock, newDe DirEntry, lbc localBcache, err error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) // look up in the old path oldPBlock, err = fbo.getDirLocked( ctx, lState, md, oldParent, blockWrite) if err != nil { return nil, nil, DirEntry{}, nil, err } newDe, ok := oldPBlock.Children[oldName] // does the name exist? if !ok { return nil, nil, DirEntry{}, nil, NoSuchNameError{oldName} } ro, err := newRenameOp(oldName, oldParent.tailPointer(), newName, newParent.tailPointer(), newDe.BlockPointer, newDe.Type) if err != nil { return nil, nil, DirEntry{}, nil, err } // A renameOp doesn't have a single path to represent it, so we // can't call setFinalPath here unfortunately. That means any // rename may force a manual paths population at other layers // (e.g., for journal statuses). TODO: allow a way to set more // than one final path for renameOps? md.AddOp(ro) lbc = make(localBcache) // TODO: Write a SameBlock() function that can deal properly with // dedup'd blocks that share an ID but can be updated separately. if oldParent.tailPointer().ID == newParent.tailPointer().ID { newPBlock = oldPBlock } else { newPBlock, err = fbo.getDirLocked( ctx, lState, md, newParent, blockWrite) if err != nil { return nil, nil, DirEntry{}, nil, err } now := fbo.nowUnixNano() oldGrandparent := *oldParent.parentPath() if len(oldGrandparent.path) > 0 { // Update the old parent's mtime/ctime, unless the // oldGrandparent is the same as newParent (in which // case, the syncBlockAndCheckEmbedLocked call by the // caller will take care of it). if oldGrandparent.tailPointer().ID != newParent.tailPointer().ID { b, err := fbo.getDirLocked(ctx, lState, md, oldGrandparent, blockWrite) if err != nil { return nil, nil, DirEntry{}, nil, err } if de, ok := b.Children[oldParent.tailName()]; ok { de.Ctime = now de.Mtime = now b.Children[oldParent.tailName()] = de // Put this block back into the local cache as dirty lbc[oldGrandparent.tailPointer()] = b } } } else { md.data.Dir.Ctime = now md.data.Dir.Mtime = now } } return oldPBlock, newPBlock, newDe, lbc, nil } func (fbo *folderBlockOps) newFileData(lState *lockState, file path, uid keybase1.UID, kmd KeyMetadata) *fileData { fbo.blockLock.AssertAnyLocked(lState) return newFileData(file, uid, fbo.config.Crypto(), fbo.config.BlockSplitter(), kmd, func(ctx context.Context, kmd KeyMetadata, ptr BlockPointer, file path, rtype blockReqType) (*FileBlock, bool, error) { lState := lState if rtype == blockReadParallel { lState = nil } return fbo.getFileBlockLocked( ctx, lState, kmd, ptr, file, rtype) }, func(ptr BlockPointer, block Block) error { return fbo.cacheBlockIfNotYetDirtyLocked( lState, ptr, file, block) }, fbo.log) } func (fbo *folderBlockOps) newFileDataWithCache(lState *lockState, file path, uid keybase1.UID, kmd KeyMetadata, dirtyBcache DirtyBlockCache) *fileData { fbo.blockLock.AssertAnyLocked(lState) return newFileData(file, uid, fbo.config.Crypto(), fbo.config.BlockSplitter(), kmd, func(ctx context.Context, kmd KeyMetadata, ptr BlockPointer, file path, rtype blockReqType) (*FileBlock, bool, error) { block, err := dirtyBcache.Get(file.Tlf, ptr, file.Branch) if fblock, ok := block.(*FileBlock); ok && err == nil { return fblock, true, nil } lState := lState if rtype == blockReadParallel { lState = nil } return fbo.getFileBlockLocked( ctx, lState, kmd, ptr, file, rtype) }, func(ptr BlockPointer, block Block) error { return dirtyBcache.Put(file.Tlf, ptr, file.Branch, block) }, fbo.log) } // Read reads from the given file into the given buffer at the given // offset. It returns the number of bytes read and nil, or 0 and the // error if there was one. func (fbo *folderBlockOps) Read( ctx context.Context, lState *lockState, kmd KeyMetadata, file path, dest []byte, off int64) (int64, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) fbo.log.CDebugf(ctx, "Reading from %v", file.tailPointer()) var uid keybase1.UID // Data reads don't depend on the uid. fd := fbo.newFileData(lState, file, uid, kmd) return fd.read(ctx, dest, off) } func (fbo *folderBlockOps) maybeWaitOnDeferredWrites( ctx context.Context, lState *lockState, file Node, c DirtyPermChan) error { var errListener chan error err := func() error { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file) if err != nil { return err } df := fbo.getOrCreateDirtyFileLocked(lState, filePath) errListener = make(chan error, 1) df.addErrListener(errListener) return nil }() if err != nil { return err } logTimer := time.After(100 * time.Millisecond) doLogUnblocked := false for { select { case <-c: if doLogUnblocked { fbo.log.CDebugf(ctx, "Write unblocked") } // Make sure there aren't any queued errors. select { case err := <-errListener: return err default: } return nil case <-logTimer: // Print a log message once if it's taking too long. fbo.log.CDebugf(ctx, "Blocking a write because of a full dirty buffer") doLogUnblocked = true case err := <-errListener: // XXX: should we ignore non-fatal errors (like // context.Canceled), or errors that are specific only to // some other file being sync'd (e.g., "recoverable" block // errors from which we couldn't recover)? return err } } } func (fbo *folderBlockOps) pathFromNodeForBlockWriteLocked( lState *lockState, n Node) (path, error) { fbo.blockLock.AssertLocked(lState) p := fbo.nodeCache.PathFromNode(n) if !p.isValid() { return path{}, InvalidPathError{p} } return p, nil } // writeGetFileLocked checks write permissions explicitly for // writeDataLocked, truncateLocked etc and returns func (fbo *folderBlockOps) writeGetFileLocked( ctx context.Context, lState *lockState, kmd KeyMetadata, file path) (*FileBlock, keybase1.UID, error) { fbo.blockLock.AssertLocked(lState) session, err := fbo.config.KBPKI().GetCurrentSession(ctx) if err != nil { return nil, "", err } if !kmd.GetTlfHandle().IsWriter(session.UID) { return nil, "", NewWriteAccessError(kmd.GetTlfHandle(), session.Name, file.String()) } fblock, err := fbo.getFileLocked(ctx, lState, kmd, file, blockWrite) if err != nil { return nil, "", err } return fblock, session.UID, nil } // Returns the set of blocks dirtied during this write that might need // to be cleaned up if the write is deferred. func (fbo *folderBlockOps) writeDataLocked( ctx context.Context, lState *lockState, kmd KeyMetadata, file path, data []byte, off int64) (latestWrite WriteRange, dirtyPtrs []BlockPointer, newlyDirtiedChildBytes int64, err error) { if jServer, err := GetJournalServer(fbo.config); err == nil { jServer.dirtyOpStart(fbo.id()) defer jServer.dirtyOpEnd(fbo.id()) } fbo.blockLock.AssertLocked(lState) fbo.log.CDebugf(ctx, "writeDataLocked on file pointer %v", file.tailPointer()) defer func() { fbo.log.CDebugf(ctx, "writeDataLocked done: %v", err) }() fblock, uid, err := fbo.writeGetFileLocked(ctx, lState, kmd, file) if err != nil { return WriteRange{}, nil, 0, err } fd := fbo.newFileData(lState, file, uid, kmd) dirtyBcache := fbo.config.DirtyBlockCache() df := fbo.getOrCreateDirtyFileLocked(lState, file) defer func() { // Always update unsynced bytes and potentially force a sync, // even on an error, since the previously-dirty bytes stay in // the cache. df.updateNotYetSyncingBytes(newlyDirtiedChildBytes) if dirtyBcache.ShouldForceSync(fbo.id()) { select { // If we can't send on the channel, that means a sync is // already in progress. case fbo.forceSyncChan <- struct{}{}: fbo.log.CDebugf(ctx, "Forcing a sync due to full buffer") default: } } }() de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, file) if err != nil { return WriteRange{}, nil, 0, err } if de.BlockPointer != file.tailPointer() { fbo.log.CDebugf(ctx, "DirEntry and file tail pointer don't match: "+ "%v vs %v", de.BlockPointer, file.tailPointer()) } si, err := fbo.getOrCreateSyncInfoLocked(lState, de) if err != nil { return WriteRange{}, nil, 0, err } newDe, dirtyPtrs, unrefs, newlyDirtiedChildBytes, bytesExtended, err := fd.write(ctx, data, off, fblock, de, df) // Record the unrefs before checking the error so we remember the // state of newly dirtied blocks. si.unrefs = append(si.unrefs, unrefs...) if err != nil { return WriteRange{}, nil, newlyDirtiedChildBytes, err } // Put it in the `deCache` even if the size didn't change, since // the `deCache` is used to determine whether there are any dirty // files. TODO: combine `deCache` with `dirtyFiles` and // `unrefCache`. cacheEntry := fbo.deCache[file.tailPointer().Ref()] cacheEntry.dirEntry = newDe fbo.deCache[file.tailPointer().Ref()] = cacheEntry if fbo.doDeferWrite { df.addDeferredNewBytes(bytesExtended) } latestWrite = si.op.addWrite(uint64(off), uint64(len(data))) return latestWrite, dirtyPtrs, newlyDirtiedChildBytes, nil } // Write writes the given data to the given file. May block if there // is too much unflushed data; in that case, it will be unblocked by a // future sync. func (fbo *folderBlockOps) Write( ctx context.Context, lState *lockState, kmd KeyMetadata, file Node, data []byte, off int64) error { // If there is too much unflushed data, we should wait until some // of it gets flush so our memory usage doesn't grow without // bound. c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx, fbo.id(), int64(len(data))) if err != nil { return err } defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(), -int64(len(data)), false) err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c) if err != nil { return err } fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file) if err != nil { return err } defer func() { fbo.doDeferWrite = false }() latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.writeDataLocked( ctx, lState, kmd, filePath, data, off) if err != nil { return err } fbo.observers.localChange(ctx, file, latestWrite) if fbo.doDeferWrite { // There's an ongoing sync, and this write altered dirty // blocks that are in the process of syncing. So, we have to // redo this write once the sync is complete, using the new // file path. // // There is probably a less terrible of doing this that // doesn't involve so much copying and rewriting, but this is // the most obviously correct way. dataCopy := make([]byte, len(data)) copy(dataCopy, data) fbo.log.CDebugf(ctx, "Deferring a write to file %v off=%d len=%d", filePath.tailPointer(), off, len(data)) fbo.deferredDirtyDeletes = append(fbo.deferredDirtyDeletes, dirtyPtrs...) fbo.deferredWrites = append(fbo.deferredWrites, func(ctx context.Context, lState *lockState, kmd KeyMetadata, f path) error { // We are about to re-dirty these bytes, so mark that // they will no longer be synced via the old file. df := fbo.getOrCreateDirtyFileLocked(lState, filePath) df.updateNotYetSyncingBytes(-newlyDirtiedChildBytes) // Write the data again. We know this won't be // deferred, so no need to check the new ptrs. _, _, _, err = fbo.writeDataLocked( ctx, lState, kmd, f, dataCopy, off) return err }) fbo.deferredWaitBytes += newlyDirtiedChildBytes } return nil } // truncateExtendLocked is called by truncateLocked to extend a file and // creates a hole. func (fbo *folderBlockOps) truncateExtendLocked( ctx context.Context, lState *lockState, kmd KeyMetadata, file path, size uint64, parentBlocks []parentBlockAndChildIndex) ( WriteRange, []BlockPointer, error) { fblock, uid, err := fbo.writeGetFileLocked(ctx, lState, kmd, file) if err != nil { return WriteRange{}, nil, err } fd := fbo.newFileData(lState, file, uid, kmd) de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, file) if err != nil { return WriteRange{}, nil, err } df := fbo.getOrCreateDirtyFileLocked(lState, file) newDe, dirtyPtrs, err := fd.truncateExtend( ctx, size, fblock, parentBlocks, de, df) if err != nil { return WriteRange{}, nil, err } cacheEntry := fbo.deCache[file.tailPointer().Ref()] cacheEntry.dirEntry = newDe fbo.deCache[file.tailPointer().Ref()] = cacheEntry si, err := fbo.getOrCreateSyncInfoLocked(lState, de) if err != nil { return WriteRange{}, nil, err } latestWrite := si.op.addTruncate(size) if fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) { select { // If we can't send on the channel, that means a sync is // already in progress case fbo.forceSyncChan <- struct{}{}: fbo.log.CDebugf(ctx, "Forcing a sync due to full buffer") default: } } fbo.log.CDebugf(ctx, "truncateExtendLocked: done") return latestWrite, dirtyPtrs, nil } // truncateExtendCutoffPoint is the amount of data in extending // truncate that will trigger the extending with a hole algorithm. const truncateExtendCutoffPoint = 128 * 1024 // Returns the set of newly-ID'd blocks created during this truncate // that might need to be cleaned up if the truncate is deferred. func (fbo *folderBlockOps) truncateLocked( ctx context.Context, lState *lockState, kmd KeyMetadata, file path, size uint64) (*WriteRange, []BlockPointer, int64, error) { if jServer, err := GetJournalServer(fbo.config); err == nil { jServer.dirtyOpStart(fbo.id()) defer jServer.dirtyOpEnd(fbo.id()) } fblock, uid, err := fbo.writeGetFileLocked(ctx, lState, kmd, file) if err != nil { return &WriteRange{}, nil, 0, err } fd := fbo.newFileData(lState, file, uid, kmd) // find the block where the file should now end iSize := int64(size) // TODO: deal with overflow _, parentBlocks, block, nextBlockOff, startOff, _, err := fd.getFileBlockAtOffset(ctx, fblock, iSize, blockWrite) if err != nil { return &WriteRange{}, nil, 0, err } currLen := int64(startOff) + int64(len(block.Contents)) if currLen+truncateExtendCutoffPoint < iSize { latestWrite, dirtyPtrs, err := fbo.truncateExtendLocked( ctx, lState, kmd, file, uint64(iSize), parentBlocks) if err != nil { return &latestWrite, dirtyPtrs, 0, err } return &latestWrite, dirtyPtrs, 0, err } else if currLen < iSize { moreNeeded := iSize - currLen latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.writeDataLocked(ctx, lState, kmd, file, make([]byte, moreNeeded, moreNeeded), currLen) if err != nil { return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err } return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err } else if currLen == iSize && nextBlockOff < 0 { // same size! return nil, nil, 0, nil } // update the local entry size de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, file) if err != nil { return nil, nil, 0, err } si, err := fbo.getOrCreateSyncInfoLocked(lState, de) if err != nil { return nil, nil, 0, err } newDe, dirtyPtrs, unrefs, newlyDirtiedChildBytes, err := fd.truncateShrink( ctx, size, fblock, de) // Record the unrefs before checking the error so we remember the // state of newly dirtied blocks. si.unrefs = append(si.unrefs, unrefs...) if err != nil { return nil, nil, newlyDirtiedChildBytes, err } // Update dirtied bytes and unrefs regardless of error. df := fbo.getOrCreateDirtyFileLocked(lState, file) df.updateNotYetSyncingBytes(newlyDirtiedChildBytes) latestWrite := si.op.addTruncate(size) cacheEntry := fbo.deCache[file.tailPointer().Ref()] cacheEntry.dirEntry = newDe fbo.deCache[file.tailPointer().Ref()] = cacheEntry return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, nil } // Truncate truncates or extends the given file to the given size. // May block if there is too much unflushed data; in that case, it // will be unblocked by a future sync. func (fbo *folderBlockOps) Truncate( ctx context.Context, lState *lockState, kmd KeyMetadata, file Node, size uint64) error { // If there is too much unflushed data, we should wait until some // of it gets flush so our memory usage doesn't grow without // bound. // // Assume the whole remaining file will be dirty after this // truncate. TODO: try to figure out how many bytes actually will // be dirtied ahead of time? c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx, fbo.id(), int64(size)) if err != nil { return err } defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(), -int64(size), false) err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c) if err != nil { return err } fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file) if err != nil { return err } defer func() { fbo.doDeferWrite = false }() latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.truncateLocked( ctx, lState, kmd, filePath, size) if err != nil { return err } if latestWrite != nil { fbo.observers.localChange(ctx, file, *latestWrite) } if fbo.doDeferWrite { // There's an ongoing sync, and this truncate altered // dirty blocks that are in the process of syncing. So, // we have to redo this truncate once the sync is complete, // using the new file path. fbo.log.CDebugf(ctx, "Deferring a truncate to file %v", filePath.tailPointer()) fbo.deferredDirtyDeletes = append(fbo.deferredDirtyDeletes, dirtyPtrs...) fbo.deferredWrites = append(fbo.deferredWrites, func(ctx context.Context, lState *lockState, kmd KeyMetadata, f path) error { // We are about to re-dirty these bytes, so mark that // they will no longer be synced via the old file. df := fbo.getOrCreateDirtyFileLocked(lState, filePath) df.updateNotYetSyncingBytes(-newlyDirtiedChildBytes) // Truncate the file again. We know this won't be // deferred, so no need to check the new ptrs. _, _, _, err := fbo.truncateLocked( ctx, lState, kmd, f, size) return err }) fbo.deferredWaitBytes += newlyDirtiedChildBytes } return nil } // IsDirty returns whether the given file is dirty; if false is // returned, then the file doesn't need to be synced. func (fbo *folderBlockOps) IsDirty(lState *lockState, file path) bool { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) // Definitely dirty if a block is dirty. if fbo.config.DirtyBlockCache().IsDirty( fbo.id(), file.tailPointer(), file.Branch) { return true } // The deCache entry could still be dirty, if a file had an // attribute set (like mtime or exec) after the file was removed. // Still count the file as dirty in that case; most likely, the // caller will next call `ClearCacheInfo` to remove this entry. // (See comments in `folderBranchOps.syncLocked`.) _, ok := fbo.deCache[file.tailPointer().Ref()] return ok } func (fbo *folderBlockOps) clearCacheInfoLocked(lState *lockState, file path) error { fbo.blockLock.AssertLocked(lState) ref := file.tailPointer().Ref() delete(fbo.deCache, ref) delete(fbo.unrefCache, ref) df := fbo.dirtyFiles[file.tailPointer()] if df != nil { err := df.finishSync() if err != nil { return err } delete(fbo.dirtyFiles, file.tailPointer()) } return nil } // ClearCacheInfo removes any cached info for the the given file. func (fbo *folderBlockOps) ClearCacheInfo(lState *lockState, file path) error { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) return fbo.clearCacheInfoLocked(lState, file) } // revertSyncInfoAfterRecoverableError updates the saved sync info to // include all the blocks from before the error, except for those that // have encountered recoverable block errors themselves. func (fbo *folderBlockOps) revertSyncInfoAfterRecoverableError( blocksToRemove []BlockPointer, result fileSyncState) { si := result.si savedSi := result.savedSi // Save the blocks we need to clean up on the next attempt. toClean := si.toCleanIfUnused newIndirect := make(map[BlockPointer]bool) for _, ptr := range result.newIndirectFileBlockPtrs { newIndirect[ptr] = true } // Propagate all unrefs forward, except those that belong to new // blocks that were created during the sync. unrefs := make([]BlockInfo, 0, len(si.unrefs)) for _, unref := range si.unrefs { if newIndirect[unref.BlockPointer] { fbo.log.CDebugf(nil, "Dropping unref %v", unref) continue } unrefs = append(unrefs, unref) } // This sync will be retried and needs new blocks, so // reset everything in the sync info. *si = *savedSi si.toCleanIfUnused = toClean si.unrefs = unrefs if si.bps == nil { return } si.bps.blockStates = nil // Mark any bad pointers so they get skipped next time. blocksToRemoveSet := make(map[BlockPointer]bool) for _, ptr := range blocksToRemove { blocksToRemoveSet[ptr] = true } for _, bs := range savedSi.bps.blockStates { // Only save the good pointers if !blocksToRemoveSet[bs.blockPtr] { si.bps.blockStates = append(si.bps.blockStates, bs) } } } // ReadyBlock is a thin wrapper around BlockOps.Ready() that handles // checking for duplicates. func ReadyBlock(ctx context.Context, bcache BlockCache, bops BlockOps, crypto cryptoPure, kmd KeyMetadata, block Block, uid keybase1.UID, bType keybase1.BlockType) ( info BlockInfo, plainSize int, readyBlockData ReadyBlockData, err error) { var ptr BlockPointer directType := IndirectBlock if fBlock, ok := block.(*FileBlock); ok && !fBlock.IsInd { directType = DirectBlock // first see if we are duplicating any known blocks in this folder ptr, err = bcache.CheckForKnownPtr(kmd.TlfID(), fBlock) if err != nil { return } } else if dBlock, ok := block.(*DirBlock); ok { if dBlock.IsInd { panic("Indirect directory blocks aren't supported yet") } // TODO: support indirect directory blocks. directType = DirectBlock } // Ready the block, even in the case where we can reuse an // existing block, just so that we know what the size of the // encrypted data will be. id, plainSize, readyBlockData, err := bops.Ready(ctx, kmd, block) if err != nil { return } if ptr.IsInitialized() { ptr.RefNonce, err = crypto.MakeBlockRefNonce() if err != nil { return } ptr.SetWriter(uid) // In case we're deduping an old pointer with an unknown block type. ptr.DirectType = directType } else { ptr = BlockPointer{ ID: id, KeyGen: kmd.LatestKeyGeneration(), DataVer: block.DataVersion(), DirectType: directType, Context: kbfsblock.MakeFirstContext(uid, bType), } } info = BlockInfo{ BlockPointer: ptr, EncodedSize: uint32(readyBlockData.GetEncodedSize()), } return } // fileSyncState holds state for a sync operation for a single // file. type fileSyncState struct { // If fblock is non-nil, the (dirty, indirect, cached) block // it points to will be set to savedFblock on a recoverable // error. fblock, savedFblock *FileBlock // redirtyOnRecoverableError, which is non-nil only when fblock is // non-nil, contains pointers that need to be re-dirtied if the // top block gets copied during the sync, and a recoverable error // happens. Maps to the old block pointer for the block, which // would need a DirtyBlockCache.Delete. redirtyOnRecoverableError map[BlockPointer]BlockPointer // If si is non-nil, its updated state will be reset on // error. Also, if the error is recoverable, it will be // reverted to savedSi. // // TODO: Working with si in this way is racy, since si is a // member of unrefCache. si, savedSi *syncInfo // oldFileBlockPtrs is a list of transient entries in the // block cache for the file, which should be removed when the // sync finishes. oldFileBlockPtrs []BlockPointer // newIndirectFileBlockPtrs is a list of permanent entries // added to the block cache for the file, which should be // removed after the blocks have been sent to the server. // They are not removed on an error, because in that case the // file is still dirty locally and may get another chance to // be sync'd. // // TODO: This can be a list of IDs instead. newIndirectFileBlockPtrs []BlockPointer } // startSyncWrite contains the portion of StartSync() that's done // while write-locking blockLock. If there is no dirty de cache // entry, dirtyDe will be nil. func (fbo *folderBlockOps) startSyncWrite(ctx context.Context, lState *lockState, md *RootMetadata, uid keybase1.UID, file path) ( fblock *FileBlock, bps *blockPutState, syncState fileSyncState, dirtyDe *DirEntry, err error) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) // update the parent directories, and write all the new blocks out // to disk fblock, err = fbo.getFileLocked(ctx, lState, md.ReadOnly(), file, blockWrite) if err != nil { return nil, nil, syncState, nil, err } fileRef := file.tailPointer().Ref() si, ok := fbo.unrefCache[fileRef] if !ok { return nil, nil, syncState, nil, fmt.Errorf("No syncOp found for file ref %v", fileRef) } // Collapse the write range to reduce the size of the sync op. si.op.Writes = si.op.collapseWriteRange(nil) // If this function returns a success, we need to make sure the op // in `md` is not the same variable as the op in `unrefCache`, // because the latter could get updated still by local writes // before `md` is flushed to the server. We don't copy it here // because code below still needs to modify it (and by extension, // the one stored in `syncState.si`). si.op.setFinalPath(file) md.AddOp(si.op) // Fill in syncState. if fblock.IsInd { fblockCopy := fblock.DeepCopy() syncState.fblock = fblock syncState.savedFblock = fblockCopy syncState.redirtyOnRecoverableError = make(map[BlockPointer]BlockPointer) } syncState.si = si syncState.savedSi, err = si.DeepCopy(fbo.config.Codec()) if err != nil { return nil, nil, syncState, nil, err } if si.bps == nil { si.bps = newBlockPutState(1) } else { // reinstate byte accounting from the previous Sync md.SetRefBytes(si.refBytes) md.AddDiskUsage(si.refBytes) md.SetUnrefBytes(si.unrefBytes) md.SetMDRefBytes(0) // this will be calculated anew md.SetDiskUsage(md.DiskUsage() - si.unrefBytes) syncState.newIndirectFileBlockPtrs = append( syncState.newIndirectFileBlockPtrs, si.op.Refs()...) } defer func() { si.refBytes = md.RefBytes() si.unrefBytes = md.UnrefBytes() }() dirtyBcache := fbo.config.DirtyBlockCache() df := fbo.getOrCreateDirtyFileLocked(lState, file) fd := fbo.newFileData(lState, file, uid, md.ReadOnly()) // Note: below we add possibly updated file blocks as "unref" and // "ref" blocks. This is fine, since conflict resolution or // notifications will never happen within a file. // If needed, split the children blocks up along new boundaries // (e.g., if using a fingerprint-based block splitter). unrefs, err := fd.split(ctx, fbo.id(), dirtyBcache, fblock, df) // Preserve any unrefs before checking the error. for _, unref := range unrefs { md.AddUnrefBlock(unref) } if err != nil { return nil, nil, syncState, nil, err } // Ready all children blocks, if any. oldPtrs, err := fd.ready(ctx, fbo.id(), fbo.config.BlockCache(), fbo.config.DirtyBlockCache(), fbo.config.BlockOps(), si.bps, fblock, df) if err != nil { return nil, nil, syncState, nil, err } for newInfo, oldPtr := range oldPtrs { syncState.newIndirectFileBlockPtrs = append( syncState.newIndirectFileBlockPtrs, newInfo.BlockPointer) df.setBlockOrphaned(oldPtr, true) // Defer the DirtyBlockCache.Delete until after the new path // is ready, in case anyone tries to read the dirty file in // the meantime. syncState.oldFileBlockPtrs = append(syncState.oldFileBlockPtrs, oldPtr) md.AddRefBlock(newInfo) // If this block is replacing a block from a previous, failed // Sync, we need to take that block out of the refs list, and // avoid unrefing it as well. si.removeReplacedBlock(ctx, fbo.log, oldPtr) err = df.setBlockSyncing(oldPtr) if err != nil { return nil, nil, syncState, nil, err } syncState.redirtyOnRecoverableError[newInfo.BlockPointer] = oldPtr } err = df.setBlockSyncing(file.tailPointer()) if err != nil { return nil, nil, syncState, nil, err } syncState.oldFileBlockPtrs = append( syncState.oldFileBlockPtrs, file.tailPointer()) // Capture the current de before we release the block lock, so // other deferred writes don't slip in. if de, ok := fbo.deCache[fileRef]; ok { dirtyDe = &de.dirEntry } // Leave a copy of the syncOp in `unrefCache`, since it may be // modified by future local writes while the syncOp in `md` should // only be modified by the rest of this sync process. var syncOpCopy *syncOp err = kbfscodec.Update(fbo.config.Codec(), &syncOpCopy, si.op) if err != nil { return nil, nil, syncState, nil, err } fbo.unrefCache[fileRef].op = syncOpCopy // If there are any deferred bytes, it must be because this is // a retried sync and some blocks snuck in between sync. Those // blocks will get transferred now, but they are also on the // deferred list and will be retried on the next sync as well. df.assimilateDeferredNewBytes() // TODO: Returning si.bps in this way is racy, since si is a // member of unrefCache. return fblock, si.bps, syncState, dirtyDe, nil } func (fbo *folderBlockOps) makeLocalBcache(ctx context.Context, lState *lockState, md *RootMetadata, file path, si *syncInfo, dirtyDe *DirEntry) (lbc localBcache, err error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) parentPath := file.parentPath() dblock, err := fbo.getDirLocked( ctx, lState, md.ReadOnly(), *parentPath, blockWrite) if err != nil { return nil, err } // Add in the cached unref'd blocks. si.mergeUnrefCache(md) lbc = make(localBcache) // Update the file's directory entry to the cached copy. if dirtyDe != nil { dirtyDe.EncodedSize = si.oldInfo.EncodedSize dblock.Children[file.tailName()] = *dirtyDe lbc[parentPath.tailPointer()] = dblock } return lbc, nil } // StartSync starts a sync for the given file. It returns the new // FileBlock which has the readied top-level block which includes all // writes since the last sync. Must be used with CleanupSyncState() // and UpdatePointers/FinishSyncLocked() like so: // // fblock, bps, lbc, syncState, err := // ...fbo.StartSync(ctx, lState, md, uid, file) // defer func() { // ...fbo.CleanupSyncState( // ctx, lState, md, file, ..., syncState, err) // }() // if err != nil { // ... // } // ... // // // ... = fbo.UpdatePointers(..., func() error { // ...fbo.FinishSyncLocked(ctx, lState, file, ..., syncState) // }) func (fbo *folderBlockOps) StartSync(ctx context.Context, lState *lockState, md *RootMetadata, uid keybase1.UID, file path) ( fblock *FileBlock, bps *blockPutState, lbc localBcache, syncState fileSyncState, err error) { if jServer, err := GetJournalServer(fbo.config); err == nil { jServer.dirtyOpStart(fbo.id()) } fblock, bps, syncState, dirtyDe, err := fbo.startSyncWrite( ctx, lState, md, uid, file) if err != nil { return nil, nil, nil, syncState, err } lbc, err = fbo.makeLocalBcache(ctx, lState, md, file, syncState.savedSi, dirtyDe) if err != nil { return nil, nil, nil, syncState, err } return fblock, bps, lbc, syncState, err } // Does any clean-up for a sync of the given file, given an error // (which may be nil) that happens during or after StartSync() and // before FinishSync(). blocksToRemove may be nil. func (fbo *folderBlockOps) CleanupSyncState( ctx context.Context, lState *lockState, md ReadOnlyRootMetadata, file path, blocksToRemove []BlockPointer, result fileSyncState, err error) { if jServer, err := GetJournalServer(fbo.config); err == nil { defer jServer.dirtyOpEnd(fbo.id()) } if err == nil { return } fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) // Notify error listeners before we reset the dirty blocks and // permissions to be granted. fbo.notifyErrListenersLocked(lState, file.tailPointer(), err) // If there was an error, we need to back out any changes that // might have been filled into the sync op, because it could // get reused again in a later Sync call. if result.si != nil { result.si.op.resetUpdateState() // Save this MD for later, so we can clean up its // newly-referenced block pointers if necessary. result.si.toCleanIfUnused = append(result.si.toCleanIfUnused, mdToCleanIfUnused{md, result.si.bps.DeepCopy()}) } if isRecoverableBlockError(err) { if result.si != nil { fbo.revertSyncInfoAfterRecoverableError(blocksToRemove, result) } if result.fblock != nil { result.fblock.Set(result.savedFblock) fbo.fixChildBlocksAfterRecoverableErrorLocked( ctx, lState, file, md, result.redirtyOnRecoverableError) } } else { // Since the sync has errored out unrecoverably, the deferred // bytes are already accounted for. if df := fbo.dirtyFiles[file.tailPointer()]; df != nil { df.updateNotYetSyncingBytes(-fbo.deferredWaitBytes) // Some blocks that were dirty are now clean under their // readied block ID, and now live in the bps rather than // the dirty bcache, so we can delete them from the dirty // bcache. dirtyBcache := fbo.config.DirtyBlockCache() for _, ptr := range result.oldFileBlockPtrs { if df.isBlockOrphaned(ptr) { fbo.log.CDebugf(ctx, "Deleting dirty orphan: %v", ptr) if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil { fbo.log.CDebugf(ctx, "Couldn't delete %v", ptr) } } } } // On an unrecoverable error, the deferred writes aren't // needed anymore since they're already part of the // (still-)dirty blocks. fbo.deferredDirtyDeletes = nil fbo.deferredWrites = nil fbo.deferredWaitBytes = 0 } // The sync is over, due to an error, so reset the map so that we // don't defer any subsequent writes. // Old syncing blocks are now just dirty if df := fbo.dirtyFiles[file.tailPointer()]; df != nil { df.resetSyncingBlocksToDirty() } } // cleanUpUnusedBlocks cleans up the blocks from any previous failed // sync attempts. func (fbo *folderBlockOps) cleanUpUnusedBlocks(ctx context.Context, md ReadOnlyRootMetadata, syncState fileSyncState, fbm *folderBlockManager) error { numToClean := len(syncState.si.toCleanIfUnused) if numToClean == 0 { return nil } // What blocks are referenced in the successful MD? refs := make(map[BlockPointer]bool) for _, op := range md.data.Changes.Ops { for _, ptr := range op.Refs() { if ptr == zeroPtr { panic("Unexpected zero ref ptr in a sync MD revision") } refs[ptr] = true } for _, update := range op.allUpdates() { if update.Ref == zeroPtr { panic("Unexpected zero update ref ptr in a sync MD revision") } refs[update.Ref] = true } } // For each MD to clean, clean up the old failed blocks // immediately if the merge status matches the successful put, if // they didn't get referenced in the successful put. If the merge // status is different (e.g., we ended up on a conflict branch), // clean it up only if the original revision failed. If the same // block appears more than once, the one with a different merged // status takes precedence (which will always come earlier in the // list of MDs). blocksSeen := make(map[BlockPointer]bool) for _, oldMD := range syncState.si.toCleanIfUnused { bdType := blockDeleteAlways if oldMD.md.MergedStatus() != md.MergedStatus() { bdType = blockDeleteOnMDFail } failedBps := newBlockPutState(len(oldMD.bps.blockStates)) for _, bs := range oldMD.bps.blockStates { if bs.blockPtr == zeroPtr { panic("Unexpected zero block ptr in an old sync MD revision") } if blocksSeen[bs.blockPtr] { continue } blocksSeen[bs.blockPtr] = true if refs[bs.blockPtr] && bdType == blockDeleteAlways { continue } failedBps.blockStates = append(failedBps.blockStates, blockState{blockPtr: bs.blockPtr}) fbo.log.CDebugf(ctx, "Cleaning up block %v from a previous "+ "failed revision %d (oldMD is %s, bdType=%d)", bs.blockPtr, oldMD.md.Revision(), oldMD.md.MergedStatus(), bdType) } if len(failedBps.blockStates) > 0 { fbm.cleanUpBlockState(oldMD.md, failedBps, bdType) } } return nil } func (fbo *folderBlockOps) doDeferredWritesLocked(ctx context.Context, lState *lockState, kmd KeyMetadata, newPath path) ( stillDirty bool, err error) { fbo.blockLock.AssertLocked(lState) // Redo any writes or truncates that happened to our file while // the sync was happening. deletes := fbo.deferredDirtyDeletes writes := fbo.deferredWrites stillDirty = len(fbo.deferredWrites) != 0 fbo.deferredDirtyDeletes = nil fbo.deferredWrites = nil fbo.deferredWaitBytes = 0 // Clear any dirty blocks that resulted from a write/truncate // happening during the sync, since we're redoing them below. dirtyBcache := fbo.config.DirtyBlockCache() for _, ptr := range deletes { fbo.log.CDebugf(ctx, "Deleting deferred dirty ptr %v", ptr) if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil { return true, err } } for _, f := range writes { err = f(ctx, lState, kmd, newPath) if err != nil { // It's a little weird to return an error from a deferred // write here. Hopefully that will never happen. return true, err } } return stillDirty, nil } // FinishSyncLocked finishes the sync process for a file, given the // state from StartSync. Specifically, it re-applies any writes that // happened since the call to StartSync. func (fbo *folderBlockOps) FinishSyncLocked( ctx context.Context, lState *lockState, oldPath, newPath path, md ReadOnlyRootMetadata, syncState fileSyncState, fbm *folderBlockManager) ( stillDirty bool, err error) { fbo.blockLock.AssertLocked(lState) dirtyBcache := fbo.config.DirtyBlockCache() for _, ptr := range syncState.oldFileBlockPtrs { fbo.log.CDebugf(ctx, "Deleting dirty ptr %v", ptr) if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil { return true, err } } bcache := fbo.config.BlockCache() for _, ptr := range syncState.newIndirectFileBlockPtrs { err := bcache.DeletePermanent(ptr.ID) if err != nil { fbo.log.CWarningf(ctx, "Error when deleting %v from cache: %v", ptr.ID, err) } } stillDirty, err = fbo.doDeferredWritesLocked(ctx, lState, md, newPath) if err != nil { return true, err } // Clear cached info for the old path. We are guaranteed that any // concurrent write to this file was deferred, even if it was to a // block that wasn't currently being sync'd, since the top-most // block is always in dirtyFiles and is always dirtied during a // write/truncate. // // Also, we can get rid of all the sync state that might have // happened during the sync, since we will replay the writes // below anyway. if err := fbo.clearCacheInfoLocked(lState, oldPath); err != nil { return true, err } if err := fbo.cleanUpUnusedBlocks(ctx, md, syncState, fbm); err != nil { return true, err } return stillDirty, nil } // notifyErrListeners notifies any write operations that are blocked // on a file so that they can learn about unrecoverable sync errors. func (fbo *folderBlockOps) notifyErrListenersLocked(lState *lockState, ptr BlockPointer, err error) { fbo.blockLock.AssertLocked(lState) if isRecoverableBlockError(err) { // Don't bother any listeners with this error, since the sync // will be retried. Unless the sync has reached its retry // limit, but in that case the listeners will just proceed as // normal once the dirty block cache bytes are freed, and // that's ok since this error isn't fatal. return } df := fbo.dirtyFiles[ptr] if df != nil { df.notifyErrListeners(err) } } type searchWithOutOfDateCacheError struct { } func (e searchWithOutOfDateCacheError) Error() string { return fmt.Sprintf("Search is using an out-of-date node cache; " + "try again with a clean cache.") } // searchForNodesInDirLocked recursively tries to find a path, and // ultimately a node, to ptr, given the set of pointers that were // updated in a particular operation. The keys in nodeMap make up the // set of BlockPointers that are being searched for, and nodeMap is // updated in place to include the corresponding discovered nodes. // // Returns the number of nodes found by this invocation. If the error // it returns is searchWithOutOfDateCache, the search should be // retried by the caller with a clean cache. func (fbo *folderBlockOps) searchForNodesInDirLocked(ctx context.Context, lState *lockState, cache NodeCache, newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootNode Node, currDir path, nodeMap map[BlockPointer]Node, numNodesFoundSoFar int) (int, error) { fbo.blockLock.AssertAnyLocked(lState) dirBlock, err := fbo.getDirLocked( ctx, lState, kmd, currDir, blockRead) if err != nil { return 0, err } // getDirLocked may have unlocked blockLock, which means the cache // could have changed out from under us. Verify that didn't // happen, so we can avoid messing it up with nodes from an old MD // version. If it did happen, return a special error that lets // the caller know they should retry with a fresh cache. if currDir.path[0].BlockPointer != cache.PathFromNode(rootNode).tailPointer() { return 0, searchWithOutOfDateCacheError{} } if numNodesFoundSoFar >= len(nodeMap) { return 0, nil } numNodesFound := 0 for name, de := range dirBlock.Children { if _, ok := nodeMap[de.BlockPointer]; ok { childPath := currDir.ChildPath(name, de.BlockPointer) // make a node for every pathnode n := rootNode for i, pn := range childPath.path[1:] { if !pn.BlockPointer.IsValid() { // Temporary debugging output for KBFS-1764 -- the // GetOrCreate call below will panic. fbo.log.CDebugf(ctx, "Invalid block pointer, path=%s, "+ "path.path=%v (index %d), name=%s, de=%#v, "+ "nodeMap=%v, newPtrs=%v, kmd=%#v", childPath, childPath.path, i, name, de, nodeMap, newPtrs, kmd) } n, err = cache.GetOrCreate(pn.BlockPointer, pn.Name, n) if err != nil { return 0, err } } nodeMap[de.BlockPointer] = n numNodesFound++ if numNodesFoundSoFar+numNodesFound >= len(nodeMap) { return numNodesFound, nil } } // otherwise, recurse if this represents an updated block if _, ok := newPtrs[de.BlockPointer]; de.Type == Dir && ok { childPath := currDir.ChildPath(name, de.BlockPointer) n, err := fbo.searchForNodesInDirLocked(ctx, lState, cache, newPtrs, kmd, rootNode, childPath, nodeMap, numNodesFoundSoFar+numNodesFound) if err != nil { return 0, err } numNodesFound += n if numNodesFoundSoFar+numNodesFound >= len(nodeMap) { return numNodesFound, nil } } } return numNodesFound, nil } func (fbo *folderBlockOps) trySearchWithCacheLocked(ctx context.Context, lState *lockState, cache NodeCache, ptrs []BlockPointer, newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootPtr BlockPointer) ( map[BlockPointer]Node, error) { fbo.blockLock.AssertAnyLocked(lState) nodeMap := make(map[BlockPointer]Node) for _, ptr := range ptrs { nodeMap[ptr] = nil } if len(ptrs) == 0 { return nodeMap, nil } var node Node // The node cache used by the main part of KBFS is // fbo.nodeCache. This basically maps from BlockPointers to // Nodes. Nodes are used by the callers of the library, but // internally we need to know the series of BlockPointers and // file/dir names that make up the path of the corresponding // file/dir. fbo.nodeCache is long-lived and never invalidated. // // As folderBranchOps gets informed of new local or remote MD // updates, which change the BlockPointers of some subset of the // nodes in this TLF, it calls nodeCache.UpdatePointer for each // change. Then, when a caller passes some old Node they have // lying around into an FBO call, we can translate it to its // current path using fbo.nodeCache. Note that on every TLF // modification, we are guaranteed that the BlockPointer of the // root directory will change (because of the merkle-ish tree of // content hashes we use to assign BlockPointers). // // fbo.nodeCache needs to maintain the absolute latest mappings // for the TLF, or else FBO calls won't see up-to-date data. The // tension in search comes from the fact that we are trying to // discover the BlockPointers of certain files at a specific point // in the MD history, which is not necessarily the same as the // most-recently-seen MD update. Specifically, some callers // process a specific range of MDs, but folderBranchOps may have // heard about a newer one before, or during, when the caller // started processing. That means fbo.nodeCache may have been // updated to reflect the newest BlockPointers, and is no longer // correct as a cache for our search for the data at the old point // in time. if cache == fbo.nodeCache { // Root node should already exist if we have an up-to-date md. node = cache.Get(rootPtr.Ref()) if node == nil { return nil, searchWithOutOfDateCacheError{} } } else { // Root node may or may not exist. var err error node, err = cache.GetOrCreate(rootPtr, string(kmd.GetTlfHandle().GetCanonicalName()), nil) if err != nil { return nil, err } } if node == nil { return nil, fmt.Errorf("Cannot find root node corresponding to %v", rootPtr) } // are they looking for the root directory? numNodesFound := 0 if _, ok := nodeMap[rootPtr]; ok { nodeMap[rootPtr] = node numNodesFound++ if numNodesFound >= len(nodeMap) { return nodeMap, nil } } rootPath := cache.PathFromNode(node) if len(rootPath.path) != 1 { return nil, fmt.Errorf("Invalid root path for %v: %s", rootPtr, rootPath) } _, err := fbo.searchForNodesInDirLocked(ctx, lState, cache, newPtrs, kmd, node, rootPath, nodeMap, numNodesFound) if err != nil { return nil, err } if rootPtr != cache.PathFromNode(node).tailPointer() { return nil, searchWithOutOfDateCacheError{} } return nodeMap, nil } func (fbo *folderBlockOps) searchForNodesLocked(ctx context.Context, lState *lockState, cache NodeCache, ptrs []BlockPointer, newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootPtr BlockPointer) ( map[BlockPointer]Node, NodeCache, error) { fbo.blockLock.AssertAnyLocked(lState) // First try the passed-in cache. If it doesn't work because the // cache is out of date, try again with a clean cache. nodeMap, err := fbo.trySearchWithCacheLocked(ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr) if _, ok := err.(searchWithOutOfDateCacheError); ok { // The md is out-of-date, so use a throwaway cache so we // don't pollute the real node cache with stale nodes. fbo.log.CDebugf(ctx, "Root node %v doesn't exist in the node "+ "cache; using a throwaway node cache instead", rootPtr) cache = newNodeCacheStandard(fbo.folderBranch) nodeMap, err = fbo.trySearchWithCacheLocked(ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr) } if err != nil { return nil, nil, err } // Return the whole map even if some nodes weren't found. return nodeMap, cache, nil } // SearchForNodes tries to resolve all the given pointers to a Node // object, using only the updated pointers specified in newPtrs. // Returns an error if any subset of the pointer paths do not exist; // it is the caller's responsibility to decide to error on particular // unresolved nodes. It also returns the cache that ultimately // contains the nodes -- this might differ from the passed-in cache if // another goroutine updated that cache and it no longer contains the // root pointer specified in md. func (fbo *folderBlockOps) SearchForNodes(ctx context.Context, cache NodeCache, ptrs []BlockPointer, newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootPtr BlockPointer) ( map[BlockPointer]Node, NodeCache, error) { lState := makeFBOLockState() fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return fbo.searchForNodesLocked( ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr) } // SearchForPaths is like SearchForNodes, except it returns a // consistent view of all the paths of the searched-for pointers. func (fbo *folderBlockOps) SearchForPaths(ctx context.Context, cache NodeCache, ptrs []BlockPointer, newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootPtr BlockPointer) (map[BlockPointer]path, error) { lState := makeFBOLockState() // Hold the lock while processing the paths so they can't be changed. fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) nodeMap, cache, err := fbo.searchForNodesLocked( ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr) if err != nil { return nil, err } paths := make(map[BlockPointer]path) for ptr, n := range nodeMap { if n == nil { paths[ptr] = path{} continue } p := cache.PathFromNode(n) if p.tailPointer() != ptr { return nil, NodeNotFoundError{ptr} } paths[ptr] = p } return paths, nil } // getUndirtiedEntry returns the clean entry for the given path // corresponding to a cached dirty entry. If there is no dirty or // clean entry, nil is returned. func (fbo *folderBlockOps) getUndirtiedEntry( ctx context.Context, lState *lockState, kmd KeyMetadata, file path) (*DirEntry, error) { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) _, ok := fbo.deCache[file.tailPointer().Ref()] if !ok { return nil, nil } // Get the undirtied dir block. dblock, err := fbo.getDirLocked( ctx, lState, kmd, *file.parentPath(), blockRead) if err != nil { return nil, err } undirtiedEntry, ok := dblock.Children[file.tailName()] if !ok { return nil, nil } return &undirtiedEntry, nil } func (fbo *folderBlockOps) setCachedAttr( lState *lockState, ref BlockRef, attr attrChange, realEntry *DirEntry, doCreate bool) { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) fbo.setCachedAttrLocked(lState, ref, attr, realEntry, doCreate) } // UpdateCachedEntryAttributes updates any cached entry for the given // path according to the given op. The node for the path is returned // if there is one. func (fbo *folderBlockOps) UpdateCachedEntryAttributes( ctx context.Context, lState *lockState, kmd KeyMetadata, dir path, op *setAttrOp) (Node, error) { childPath := dir.ChildPathNoPtr(op.Name) // find the node for the actual change; requires looking up // the child entry to get the BlockPointer, unfortunately. de, err := fbo.GetDirtyEntry(ctx, lState, kmd, childPath) if err != nil { return nil, err } childNode := fbo.nodeCache.Get(de.Ref()) if childNode == nil { // Nothing to do, since the cache entry won't be // accessible from any node. return nil, nil } childPath = dir.ChildPath(op.Name, de.BlockPointer) // If there's a cache entry, we need to update it, so try and // fetch the undirtied entry. cleanEntry, err := fbo.getUndirtiedEntry(ctx, lState, kmd, childPath) if err != nil { return nil, err } if cleanEntry != nil { fbo.setCachedAttr(lState, de.Ref(), op.Attr, cleanEntry, false) } return childNode, nil } // UpdateCachedEntryAttributesOnRemovedFile updates any cached entry // for the given path of an unlinked file, according to the given op, // and it makes a new dirty cache entry if one doesn't exist yet. We // assume Sync will be called eventually on the corresponding open // file handle, which will clear out the entry. func (fbo *folderBlockOps) UpdateCachedEntryAttributesOnRemovedFile( ctx context.Context, lState *lockState, op *setAttrOp, de DirEntry) { fbo.setCachedAttr(lState, de.Ref(), op.Attr, &de, true) } func (fbo *folderBlockOps) getDeferredWriteCountForTest(lState *lockState) int { fbo.blockLock.RLock(lState) defer fbo.blockLock.RUnlock(lState) return len(fbo.deferredWrites) } func (fbo *folderBlockOps) updatePointer(kmd KeyMetadata, oldPtr BlockPointer, newPtr BlockPointer, shouldPrefetch bool) { updated := fbo.nodeCache.UpdatePointer(oldPtr.Ref(), newPtr) if !updated { return } // Only prefetch if the updated pointer is a new block ID. if oldPtr.ID != newPtr.ID { // TODO: Remove this comment when we're done debugging because it'll be everywhere. fbo.log.CDebugf(context.TODO(), "Updated reference for pointer %s to %s.", oldPtr.ID, newPtr.ID) if shouldPrefetch { // Prefetch the new ref, but only if the old ref already exists in // the block cache. Ideally we'd always prefetch it, but we need // the type of the block so that we can call `NewEmpty`. // TODO KBFS-1850: Eventually we should use the codec library's // ability to decode into a nil interface to no longer need to // pre-initialize the correct type. block, _, _, err := fbo.config.BlockCache().GetWithPrefetch(oldPtr) if err != nil { return } fbo.config.BlockOps().Prefetcher().PrefetchBlock( block.NewEmpty(), newPtr, kmd, updatePointerPrefetchPriority, ) } } } // UpdatePointers updates all the pointers in the node cache // atomically. If `afterUpdateFn` is non-nil, it's called under the // same block lock under which the pointers were updated. func (fbo *folderBlockOps) UpdatePointers(kmd KeyMetadata, lState *lockState, op op, shouldPrefetch bool, afterUpdateFn func() error) error { fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) for _, update := range op.allUpdates() { fbo.updatePointer(kmd, update.Unref, update.Ref, shouldPrefetch) } if afterUpdateFn == nil { return nil } return afterUpdateFn() } func (fbo *folderBlockOps) unlinkDuringFastForwardLocked(ctx context.Context, lState *lockState, ref BlockRef) { fbo.blockLock.AssertLocked(lState) oldNode := fbo.nodeCache.Get(ref) if oldNode == nil { return } oldPath := fbo.nodeCache.PathFromNode(oldNode) fbo.log.CDebugf(ctx, "Unlinking missing node %s/%v during "+ "fast-forward", oldPath, ref) fbo.nodeCache.Unlink(ref, oldPath) } func (fbo *folderBlockOps) fastForwardDirAndChildrenLocked(ctx context.Context, lState *lockState, currDir path, children map[string]map[pathNode]bool, kmd KeyMetadata) ([]NodeChange, error) { fbo.blockLock.AssertLocked(lState) dirBlock, err := fbo.getDirLocked(ctx, lState, kmd, currDir, blockRead) if err != nil { return nil, err } prefix := currDir.String() // TODO: parallelize me? var changes []NodeChange for child := range children[prefix] { entry, ok := dirBlock.Children[child.Name] if !ok { fbo.unlinkDuringFastForwardLocked( ctx, lState, child.BlockPointer.Ref()) continue } fbo.log.CDebugf(ctx, "Fast-forwarding %v -> %v", child.BlockPointer, entry.BlockPointer) fbo.updatePointer(kmd, child.BlockPointer, entry.BlockPointer, true) node := fbo.nodeCache.Get(entry.BlockPointer.Ref()) newPath := fbo.nodeCache.PathFromNode(node) if entry.Type == Dir { if node != nil { change := NodeChange{Node: node} for subchild := range children[newPath.String()] { change.DirUpdated = append(change.DirUpdated, subchild.Name) } changes = append(changes, change) } childChanges, err := fbo.fastForwardDirAndChildrenLocked( ctx, lState, newPath, children, kmd) if err != nil { return nil, err } changes = append(changes, childChanges...) } else if node != nil { // File -- invalidate the entire file contents. changes = append(changes, NodeChange{ Node: node, FileUpdated: []WriteRange{{Len: 0, Off: 0}}, }) } } delete(children, prefix) return changes, nil } // FastForwardAllNodes attempts to update the block pointers // associated with nodes in the cache by searching for their paths in // the current version of the TLF. If it can't find a corresponding // node, it assumes it's been deleted and unlinks it. Returns the set // of node changes that resulted. If there are no nodes, it returns a // nil error because there's nothing to be done. func (fbo *folderBlockOps) FastForwardAllNodes(ctx context.Context, lState *lockState, md ReadOnlyRootMetadata) ( changes []NodeChange, err error) { // Take a hard lock through this whole process. TODO: is there // any way to relax this? It could lead to file system operation // timeouts, even on reads, if we hold it too long. fbo.blockLock.Lock(lState) defer fbo.blockLock.Unlock(lState) nodes := fbo.nodeCache.AllNodes() if len(nodes) == 0 { // Nothing needs to be done! return nil, nil } fbo.log.CDebugf(ctx, "Fast-forwarding %d nodes", len(nodes)) defer func() { fbo.log.CDebugf(ctx, "Fast-forward complete: %v", err) }() // Build a "tree" representation for each interesting path prefix. children := make(map[string]map[pathNode]bool) var rootPath path for _, n := range nodes { p := fbo.nodeCache.PathFromNode(n) if len(p.path) == 1 { rootPath = p } prevPath := "" for _, pn := range p.path { if prevPath != "" { childPNs := children[prevPath] if childPNs == nil { childPNs = make(map[pathNode]bool) children[prevPath] = childPNs } childPNs[pn] = true } prevPath = filepath.Join(prevPath, pn.Name) } } if !rootPath.isValid() { return nil, errors.New("Couldn't find the root path") } fbo.log.CDebugf(ctx, "Fast-forwarding root %v -> %v", rootPath.path[0].BlockPointer, md.data.Dir.BlockPointer) fbo.updatePointer(md, rootPath.path[0].BlockPointer, md.data.Dir.BlockPointer, false) rootPath.path[0].BlockPointer = md.data.Dir.BlockPointer rootNode := fbo.nodeCache.Get(md.data.Dir.BlockPointer.Ref()) if rootNode != nil { change := NodeChange{Node: rootNode} for child := range children[rootPath.String()] { change.DirUpdated = append(change.DirUpdated, child.Name) } changes = append(changes, change) } childChanges, err := fbo.fastForwardDirAndChildrenLocked( ctx, lState, rootPath, children, md) if err != nil { return nil, err } changes = append(changes, childChanges...) // Unlink any children that remain. for _, childPNs := range children { for child := range childPNs { fbo.unlinkDuringFastForwardLocked( ctx, lState, child.BlockPointer.Ref()) } } return changes, nil } type chainsPathPopulator interface { populateChainPaths(context.Context, logger.Logger, *crChains, bool) error } // populateChainPaths updates all the paths in all the ops tracked by // `chains`, using the main nodeCache. func (fbo *folderBlockOps) populateChainPaths(ctx context.Context, log logger.Logger, chains *crChains, includeCreates bool) error { _, err := chains.getPaths(ctx, fbo, log, fbo.nodeCache, includeCreates) return err } var _ chainsPathPopulator = (*folderBlockOps)(nil)
package authtoken import ( "fmt" "io/ioutil" "log" "testing" . "github.com/smartystreets/goconvey/convey" "bytes" "os" "path/filepath" "time" ) func tempDir() string { dir, err := ioutil.TempDir("", "skydb.auth.test") if err != nil { panic(err) } return dir } func TestNewToken(t *testing.T) { token := New("com.oursky.skygear", "46709394", time.Time{}) if token.AppName != "com.oursky.skygear" { t.Fatalf("got token.AppName = %v, want com.oursky.skygear", token.AppName) } if token.UserInfoID != "46709394" { t.Fatalf("got token.UserInfoID = %v, want 46709394", token.UserInfoID) } if token.AccessToken == "" { t.Fatal("got empty token, want non-empty AccessToken value") } if token.ExpiredAt.IsZero() { t.Fatalf("got token = %v, want non-zero ExpiredAt value", token) } } func TestNewTokenWithExpiry(t *testing.T) { expiredAt := time.Unix(0, 1) token := New("com.oursky.skygear", "46709394", expiredAt) if !token.ExpiredAt.Equal(expiredAt) { t.Fatalf("got token.ExpiredAt = %v, want %v", token.ExpiredAt, expiredAt) } } func TestTokenIsExpired(t *testing.T) { now := time.Now() token := Token{} token.ExpiredAt = now.Add(1 * time.Second) if token.IsExpired() { t.Fatalf("got expired token = %v, now = %v, want it not expired", token, now) } token.ExpiredAt = now.Add(-1 * time.Second) if !token.IsExpired() { t.Fatalf("got non-expired token = %v, now = %v, want it expired", token, now) } } func TestEmptyTokenIsExpired(t *testing.T) { token := Token{} if !token.IsExpired() { t.Fatalf("got non-expired empty token = %v, want it expired", token) } } func TestFileStorePut(t *testing.T) { const savedFileContent = `{"accessToken":"sometoken","expiredAt":1000000001,"appName":"com.oursky.skygear","userInfoID":"someuserinfoid"} ` token := Token{ AccessToken: "sometoken", ExpiredAt: time.Unix(1, 1).UTC(), AppName: "com.oursky.skygear", UserInfoID: "someuserinfoid", } dir := tempDir() defer os.RemoveAll(dir) store := FileStore(dir) if err := store.Put(&token); err != nil { t.Fatalf("got err = %v, want nil", err) } filePath := filepath.Join(dir, "sometoken") file, err := os.Open(filePath) if err != nil { panic(err) } fileBytes, err := ioutil.ReadAll(file) if err != nil { panic(err) } if !bytes.Equal(fileBytes, []byte(savedFileContent)) { t.Fatalf("got file content = %#v, want %#v", string(fileBytes), savedFileContent) } } func TestFileStoreGet(t *testing.T) { Convey("FileStore", t, func() { dir := tempDir() defer os.RemoveAll(dir) store := FileStore(dir) token := Token{} Convey("gets an non-expired file token", func() { tomorrow := time.Now().AddDate(0, 0, 1) So(store.Put(&Token{ AccessToken: "sometoken", ExpiredAt: tomorrow, AppName: "com.oursky.skygear", UserInfoID: "someuserinfoid", }), ShouldBeNil) err := store.Get("sometoken", &token) So(err, ShouldBeNil) So(token, ShouldResemble, Token{ AccessToken: "sometoken", ExpiredAt: tomorrow, AppName: "com.oursky.skygear", UserInfoID: "someuserinfoid", }) }) Convey("returns an NotFoundError when the token to get is expired", func() { yesterday := time.Now().AddDate(0, 0, -1) tokenString := fmt.Sprintf(` { "accessToken": "sometoken", "expiredAt": %v, "appName": "com.oursky.skygear", "userInfoID": "someuserinfoid" } `, yesterday.UnixNano()) err := ioutil.WriteFile(filepath.Join(dir, "sometoken"), []byte(tokenString), 0644) So(err, ShouldBeNil) err = store.Get("sometoken", &token) So(err, ShouldHaveSameTypeAs, &NotFoundError{}) Convey("and deletes the token file", func() { _, err := os.Stat(filepath.Join(dir, "sometoken")) So(os.IsNotExist(err), ShouldBeTrue) }) }) Convey("returns a NotFoundError when the token to get does not existed", func() { err := store.Get("notexisttoken", &token) So(err, ShouldHaveSameTypeAs, &NotFoundError{}) }) }) } func TestFileStoreEscape(t *testing.T) { Convey("FileStore", t, func() { tDir := tempDir() defer os.RemoveAll(tDir) dir := filepath.Join(tDir, "inner") mdErr := os.Mkdir(dir, 0755) So(mdErr, ShouldBeNil) store := FileStore(dir) token := Token{} Convey("Get not escaping dir", func() { outterFilepath := filepath.Join(tDir, "outerfile") err := ioutil.WriteFile(outterFilepath, []byte(`{}`), 0644) So(err, ShouldBeNil) err = store.Get("../outerfile", &token) So(err.Error(), ShouldEqual, `get "../outerfile": invalid access token`) }) Convey("Put not escaping dir", func() { token := Token{ AccessToken: "../outerfile", ExpiredAt: time.Unix(1, 1).UTC(), AppName: "com.oursky.skygear", UserInfoID: "someuserinfoid", } err := store.Put(&token) So(err.Error(), ShouldEqual, `get "../outerfile": invalid access token`) }) Convey("Delete not escaping dir", func() { outterFilepath := filepath.Join(tDir, "outerfile") err := ioutil.WriteFile(outterFilepath, []byte(`{}`), 0644) So(err, ShouldBeNil) err = store.Delete("../outerfile") So(err.Error(), ShouldEqual, `get "../outerfile": invalid access token`) }) }) } func TestFileStoreDelete(t *testing.T) { Convey("FileStore", t, func() { dir := tempDir() // defer os.RemoveAll(dir) store := FileStore(dir) Convey("delete an existing token", func() { accessTokenPath := filepath.Join(dir, "accesstoken") log.Println(accessTokenPath) So(ioutil.WriteFile(accessTokenPath, []byte(`{}`), 0644), ShouldBeNil) So(exists(accessTokenPath), ShouldBeTrue) err := store.Delete("accesstoken") So(err, ShouldBeNil) So(exists(accessTokenPath), ShouldBeFalse) }) Convey("delete an not existing token", func() { err := store.Delete("notexistaccesstoken") So(err, ShouldBeNil) }) Convey("delete an empty token", func() { err := store.Delete("") So(err, ShouldHaveSameTypeAs, &NotFoundError{}) So(err.Error(), ShouldEqual, `get "": invalid access token`) }) }) } func exists(path string) bool { _, err := os.Stat(path) return !os.IsNotExist(err) } func tempRedisStore() *RedisStore { defaultTo := func(envvar string, value string) { if os.Getenv(envvar) == "" { os.Setenv(envvar, value) } } // 15 is the default max DB number of redis defaultTo("REDISTEST", "redis://127.0.0.1:6379/15") return NewRedisStore(os.Getenv("REDISTEST")) } func (r *RedisStore) clearRedisStore() { c := r.pool.Get() defer c.Close() c.Do("FLUSHDB") } func TestRedisStoreGet(t *testing.T) { Convey("RedisStore", t, func() { r := tempRedisStore() defer r.clearRedisStore() Convey("Get Non-Expired Token", func() { tokenName := "someToken" tomorrow := time.Now().AddDate(0, 0, 1).UTC() token := Token{ AccessToken: tokenName, ExpiredAt: tomorrow, AppName: "com.oursky.skygear", UserInfoID: "someuserinfoid", } err := r.Put(&token) So(err, ShouldBeNil) result := Token{} err = r.Get(tokenName, &result) So(err, ShouldBeNil) So(result, ShouldResemble, token) }) Convey("Get Expired Token", func() { tokenName := "expiredToken" yesterday := time.Now().AddDate(0, 0, -1).UTC() token := Token{ AccessToken: tokenName, ExpiredAt: yesterday, AppName: "com.oursky.skygear", UserInfoID: "someuserinfoid", } err := r.Put(&token) So(err, ShouldBeNil) result := Token{} err = r.Get(tokenName, &result) So(err, ShouldHaveSameTypeAs, &NotFoundError{}) }) Convey("Get Updated Token", func() { tokenName := "updatedToken" tomorrow := time.Now().AddDate(0, 0, 1).UTC() token := Token{ AccessToken: tokenName, ExpiredAt: tomorrow, AppName: "com.oursky.skygear", UserInfoID: "someuserinfoid", } err := r.Put(&token) So(err, ShouldBeNil) result := Token{} err = r.Get(tokenName, &result) So(err, ShouldBeNil) So(result, ShouldResemble, token) Convey("update to future", func() { future := time.Now().AddDate(0, 0, 10).UTC() token := Token{ AccessToken: tokenName, ExpiredAt: future, AppName: "com.oursky.skygear", UserInfoID: "someuserinfoid", } err := r.Put(&token) So(err, ShouldBeNil) result := Token{} err = r.Get(tokenName, &result) So(err, ShouldBeNil) So(result, ShouldResemble, token) }) Convey("update to the past", func() { past := time.Now().AddDate(0, 0, -10).UTC() token := Token{ AccessToken: tokenName, ExpiredAt: past, AppName: "com.oursky.skygear", UserInfoID: "someuserinfoid", } err := r.Put(&token) So(err, ShouldBeNil) result := Token{} err = r.Get(tokenName, &result) So(err, ShouldHaveSameTypeAs, &NotFoundError{}) }) }) Convey("Get Nonexistent Token", func() { tokenName := "nonexistentToken" result := Token{} err := r.Get(tokenName, &result) So(err, ShouldHaveSameTypeAs, &NotFoundError{}) }) }) } func TestRedisStorePut(t *testing.T) { Convey("RedisStore", t, func() { tokenName := "" r := tempRedisStore() defer r.clearRedisStore() tomorrow := time.Now().AddDate(0, 0, 1).UTC() token := Token{ AccessToken: tokenName, ExpiredAt: tomorrow, AppName: "com.oursky.skygear", UserInfoID: "someuserinfoid", } err := r.Put(&token) So(err, ShouldBeNil) }) } func TestRedisStoreDelete(t *testing.T) { Convey("RedisStore", t, func() { r := tempRedisStore() defer r.clearRedisStore() Convey("Delete existing token", func() { tokenName := "someToken" tomorrow := time.Now().AddDate(0, 0, 1).UTC() token := Token{ AccessToken: tokenName, ExpiredAt: tomorrow, AppName: "com.oursky.skygear", UserInfoID: "someuserinfoid", } err := r.Put(&token) So(err, ShouldBeNil) err = r.Delete(tokenName) So(err, ShouldBeNil) result := Token{} err = r.Get(tokenName, &result) So(err, ShouldHaveSameTypeAs, &NotFoundError{}) }) Convey("Delete nonexistent token", func() { tokenName := "nonexistentToken" err := r.Delete(tokenName) So(err, ShouldBeNil) }) }) } Make AppName at authtoken test be alphanumeric or underscore package authtoken import ( "fmt" "io/ioutil" "log" "testing" . "github.com/smartystreets/goconvey/convey" "bytes" "os" "path/filepath" "time" ) func tempDir() string { dir, err := ioutil.TempDir("", "skydb.auth.test") if err != nil { panic(err) } return dir } func TestNewToken(t *testing.T) { token := New("com_oursky_skygear", "46709394", time.Time{}) if token.AppName != "com_oursky_skygear" { t.Fatalf("got token.AppName = %v, want com_oursky_skygear", token.AppName) } if token.UserInfoID != "46709394" { t.Fatalf("got token.UserInfoID = %v, want 46709394", token.UserInfoID) } if token.AccessToken == "" { t.Fatal("got empty token, want non-empty AccessToken value") } if token.ExpiredAt.IsZero() { t.Fatalf("got token = %v, want non-zero ExpiredAt value", token) } } func TestNewTokenWithExpiry(t *testing.T) { expiredAt := time.Unix(0, 1) token := New("com_oursky_skygear", "46709394", expiredAt) if !token.ExpiredAt.Equal(expiredAt) { t.Fatalf("got token.ExpiredAt = %v, want %v", token.ExpiredAt, expiredAt) } } func TestTokenIsExpired(t *testing.T) { now := time.Now() token := Token{} token.ExpiredAt = now.Add(1 * time.Second) if token.IsExpired() { t.Fatalf("got expired token = %v, now = %v, want it not expired", token, now) } token.ExpiredAt = now.Add(-1 * time.Second) if !token.IsExpired() { t.Fatalf("got non-expired token = %v, now = %v, want it expired", token, now) } } func TestEmptyTokenIsExpired(t *testing.T) { token := Token{} if !token.IsExpired() { t.Fatalf("got non-expired empty token = %v, want it expired", token) } } func TestFileStorePut(t *testing.T) { const savedFileContent = `{"accessToken":"sometoken","expiredAt":1000000001,"appName":"com_oursky_skygear","userInfoID":"someuserinfoid"} ` token := Token{ AccessToken: "sometoken", ExpiredAt: time.Unix(1, 1).UTC(), AppName: "com_oursky_skygear", UserInfoID: "someuserinfoid", } dir := tempDir() defer os.RemoveAll(dir) store := FileStore(dir) if err := store.Put(&token); err != nil { t.Fatalf("got err = %v, want nil", err) } filePath := filepath.Join(dir, "sometoken") file, err := os.Open(filePath) if err != nil { panic(err) } fileBytes, err := ioutil.ReadAll(file) if err != nil { panic(err) } if !bytes.Equal(fileBytes, []byte(savedFileContent)) { t.Fatalf("got file content = %#v, want %#v", string(fileBytes), savedFileContent) } } func TestFileStoreGet(t *testing.T) { Convey("FileStore", t, func() { dir := tempDir() defer os.RemoveAll(dir) store := FileStore(dir) token := Token{} Convey("gets an non-expired file token", func() { tomorrow := time.Now().AddDate(0, 0, 1) So(store.Put(&Token{ AccessToken: "sometoken", ExpiredAt: tomorrow, AppName: "com_oursky_skygear", UserInfoID: "someuserinfoid", }), ShouldBeNil) err := store.Get("sometoken", &token) So(err, ShouldBeNil) So(token, ShouldResemble, Token{ AccessToken: "sometoken", ExpiredAt: tomorrow, AppName: "com_oursky_skygear", UserInfoID: "someuserinfoid", }) }) Convey("returns an NotFoundError when the token to get is expired", func() { yesterday := time.Now().AddDate(0, 0, -1) tokenString := fmt.Sprintf(` { "accessToken": "sometoken", "expiredAt": %v, "appName": "com_oursky_skygear", "userInfoID": "someuserinfoid" } `, yesterday.UnixNano()) err := ioutil.WriteFile(filepath.Join(dir, "sometoken"), []byte(tokenString), 0644) So(err, ShouldBeNil) err = store.Get("sometoken", &token) So(err, ShouldHaveSameTypeAs, &NotFoundError{}) Convey("and deletes the token file", func() { _, err := os.Stat(filepath.Join(dir, "sometoken")) So(os.IsNotExist(err), ShouldBeTrue) }) }) Convey("returns a NotFoundError when the token to get does not existed", func() { err := store.Get("notexisttoken", &token) So(err, ShouldHaveSameTypeAs, &NotFoundError{}) }) }) } func TestFileStoreEscape(t *testing.T) { Convey("FileStore", t, func() { tDir := tempDir() defer os.RemoveAll(tDir) dir := filepath.Join(tDir, "inner") mdErr := os.Mkdir(dir, 0755) So(mdErr, ShouldBeNil) store := FileStore(dir) token := Token{} Convey("Get not escaping dir", func() { outterFilepath := filepath.Join(tDir, "outerfile") err := ioutil.WriteFile(outterFilepath, []byte(`{}`), 0644) So(err, ShouldBeNil) err = store.Get("../outerfile", &token) So(err.Error(), ShouldEqual, `get "../outerfile": invalid access token`) }) Convey("Put not escaping dir", func() { token := Token{ AccessToken: "../outerfile", ExpiredAt: time.Unix(1, 1).UTC(), AppName: "com_oursky_skygear", UserInfoID: "someuserinfoid", } err := store.Put(&token) So(err.Error(), ShouldEqual, `get "../outerfile": invalid access token`) }) Convey("Delete not escaping dir", func() { outterFilepath := filepath.Join(tDir, "outerfile") err := ioutil.WriteFile(outterFilepath, []byte(`{}`), 0644) So(err, ShouldBeNil) err = store.Delete("../outerfile") So(err.Error(), ShouldEqual, `get "../outerfile": invalid access token`) }) }) } func TestFileStoreDelete(t *testing.T) { Convey("FileStore", t, func() { dir := tempDir() // defer os.RemoveAll(dir) store := FileStore(dir) Convey("delete an existing token", func() { accessTokenPath := filepath.Join(dir, "accesstoken") log.Println(accessTokenPath) So(ioutil.WriteFile(accessTokenPath, []byte(`{}`), 0644), ShouldBeNil) So(exists(accessTokenPath), ShouldBeTrue) err := store.Delete("accesstoken") So(err, ShouldBeNil) So(exists(accessTokenPath), ShouldBeFalse) }) Convey("delete an not existing token", func() { err := store.Delete("notexistaccesstoken") So(err, ShouldBeNil) }) Convey("delete an empty token", func() { err := store.Delete("") So(err, ShouldHaveSameTypeAs, &NotFoundError{}) So(err.Error(), ShouldEqual, `get "": invalid access token`) }) }) } func exists(path string) bool { _, err := os.Stat(path) return !os.IsNotExist(err) } func tempRedisStore() *RedisStore { defaultTo := func(envvar string, value string) { if os.Getenv(envvar) == "" { os.Setenv(envvar, value) } } // 15 is the default max DB number of redis defaultTo("REDISTEST", "redis://127.0.0.1:6379/15") return NewRedisStore(os.Getenv("REDISTEST")) } func (r *RedisStore) clearRedisStore() { c := r.pool.Get() defer c.Close() c.Do("FLUSHDB") } func TestRedisStoreGet(t *testing.T) { Convey("RedisStore", t, func() { r := tempRedisStore() defer r.clearRedisStore() Convey("Get Non-Expired Token", func() { tokenName := "someToken" tomorrow := time.Now().AddDate(0, 0, 1).UTC() token := Token{ AccessToken: tokenName, ExpiredAt: tomorrow, AppName: "com_oursky_skygear", UserInfoID: "someuserinfoid", } err := r.Put(&token) So(err, ShouldBeNil) result := Token{} err = r.Get(tokenName, &result) So(err, ShouldBeNil) So(result, ShouldResemble, token) }) Convey("Get Expired Token", func() { tokenName := "expiredToken" yesterday := time.Now().AddDate(0, 0, -1).UTC() token := Token{ AccessToken: tokenName, ExpiredAt: yesterday, AppName: "com_oursky_skygear", UserInfoID: "someuserinfoid", } err := r.Put(&token) So(err, ShouldBeNil) result := Token{} err = r.Get(tokenName, &result) So(err, ShouldHaveSameTypeAs, &NotFoundError{}) }) Convey("Get Updated Token", func() { tokenName := "updatedToken" tomorrow := time.Now().AddDate(0, 0, 1).UTC() token := Token{ AccessToken: tokenName, ExpiredAt: tomorrow, AppName: "com_oursky_skygear", UserInfoID: "someuserinfoid", } err := r.Put(&token) So(err, ShouldBeNil) result := Token{} err = r.Get(tokenName, &result) So(err, ShouldBeNil) So(result, ShouldResemble, token) Convey("update to future", func() { future := time.Now().AddDate(0, 0, 10).UTC() token := Token{ AccessToken: tokenName, ExpiredAt: future, AppName: "com_oursky_skygear", UserInfoID: "someuserinfoid", } err := r.Put(&token) So(err, ShouldBeNil) result := Token{} err = r.Get(tokenName, &result) So(err, ShouldBeNil) So(result, ShouldResemble, token) }) Convey("update to the past", func() { past := time.Now().AddDate(0, 0, -10).UTC() token := Token{ AccessToken: tokenName, ExpiredAt: past, AppName: "com_oursky_skygear", UserInfoID: "someuserinfoid", } err := r.Put(&token) So(err, ShouldBeNil) result := Token{} err = r.Get(tokenName, &result) So(err, ShouldHaveSameTypeAs, &NotFoundError{}) }) }) Convey("Get Nonexistent Token", func() { tokenName := "nonexistentToken" result := Token{} err := r.Get(tokenName, &result) So(err, ShouldHaveSameTypeAs, &NotFoundError{}) }) }) } func TestRedisStorePut(t *testing.T) { Convey("RedisStore", t, func() { tokenName := "" r := tempRedisStore() defer r.clearRedisStore() tomorrow := time.Now().AddDate(0, 0, 1).UTC() token := Token{ AccessToken: tokenName, ExpiredAt: tomorrow, AppName: "com_oursky_skygear", UserInfoID: "someuserinfoid", } err := r.Put(&token) So(err, ShouldBeNil) }) } func TestRedisStoreDelete(t *testing.T) { Convey("RedisStore", t, func() { r := tempRedisStore() defer r.clearRedisStore() Convey("Delete existing token", func() { tokenName := "someToken" tomorrow := time.Now().AddDate(0, 0, 1).UTC() token := Token{ AccessToken: tokenName, ExpiredAt: tomorrow, AppName: "com_oursky_skygear", UserInfoID: "someuserinfoid", } err := r.Put(&token) So(err, ShouldBeNil) err = r.Delete(tokenName) So(err, ShouldBeNil) result := Token{} err = r.Get(tokenName, &result) So(err, ShouldHaveSameTypeAs, &NotFoundError{}) }) Convey("Delete nonexistent token", func() { tokenName := "nonexistentToken" err := r.Delete(tokenName) So(err, ShouldBeNil) }) }) }
package hypervisor import ( "net" "time" "github.com/Symantec/Dominator/lib/tags" ) const ( StateStarting = 0 StateRunning = 1 StateFailedToStart = 2 StateStopping = 3 StateStopped = 4 StateDestroying = 5 StateMigrating = 6 StateExporting = 7 VolumeFormatRaw = 0 VolumeFormatQCOW2 = 1 ) type AcknowledgeVmRequest struct { IpAddress net.IP } type AcknowledgeVmResponse struct { Error string } type Address struct { IpAddress net.IP `json:",omitempty"` MacAddress string } type BecomePrimaryVmOwnerRequest struct { IpAddress net.IP } type BecomePrimaryVmOwnerResponse struct { Error string } type ChangeAddressPoolRequest struct { AddressesToAdd []Address // Will be added to free pool. AddressesToRemove []Address // Will be removed from free pool. MaximumFreeAddresses map[string]uint // Key: subnet ID. } type ChangeAddressPoolResponse struct { Error string } type ChangeOwnersRequest struct { OwnerGroups []string `json:",omitempty"` OwnerUsers []string `json:",omitempty"` } type ChangeOwnersResponse struct { Error string } type ChangeVmDestroyProtectionRequest struct { DestroyProtection bool IpAddress net.IP } type ChangeVmDestroyProtectionResponse struct { Error string } type ChangeVmOwnerUsersRequest struct { IpAddress net.IP OwnerUsers []string } type ChangeVmOwnerUsersResponse struct { Error string } type ChangeVmTagsRequest struct { IpAddress net.IP Tags tags.Tags } type ChangeVmTagsResponse struct { Error string } type CommitImportedVmRequest struct { IpAddress net.IP } type CommitImportedVmResponse struct { Error string } // The ConnectToVmSerialPort RPC is fully streamed. After the request/response, // the connection/client is hijacked and each side of the connection will send // a stream of bytes. type ConnectToVmSerialPortRequest struct { IpAddress net.IP PortNumber uint } type ConnectToVmSerialPortResponse struct { Error string } type CopyVmRequest struct { AccessToken []byte IpAddress net.IP SourceHypervisor string VmInfo } type CopyVmResponse struct { // Multiple responses are sent. Error string Final bool // If true, this is the final response. IpAddress net.IP ProgressMessage string } type CreateVmRequest struct { DhcpTimeout time.Duration ImageDataSize uint64 ImageTimeout time.Duration MinimumFreeBytes uint64 RoundupPower uint64 SecondaryVolumes []Volume SkipBootloader bool UserDataSize uint64 VmInfo } // RAW image data (length=ImageDataSize) and user data (length=UserDataSize) // are streamed afterwards. type CreateVmResponse struct { // Multiple responses are sent. DhcpTimedOut bool Final bool // If true, this is the final response. IpAddress net.IP ProgressMessage string Error string } type DeleteVmVolumeRequest struct { AccessToken []byte IpAddress net.IP VolumeIndex uint } type DeleteVmVolumeResponse struct { Error string } type DestroyVmRequest struct { AccessToken []byte IpAddress net.IP } type DestroyVmResponse struct { Error string } type DiscardVmAccessTokenRequest struct { AccessToken []byte IpAddress net.IP } type DiscardVmAccessTokenResponse struct { Error string } type DiscardVmOldImageRequest struct { IpAddress net.IP } type DiscardVmOldImageResponse struct { Error string } type DiscardVmOldUserDataRequest struct { IpAddress net.IP } type DiscardVmOldUserDataResponse struct { Error string } type DiscardVmSnapshotRequest struct { IpAddress net.IP } type DiscardVmSnapshotResponse struct { Error string } type ExportLocalVmRequest struct { IpAddress net.IP VerificationCookie []byte `json:",omitempty"` } type ExportLocalVmResponse struct { Error string VmInfo LocalVmInfo } // The GetUpdates() RPC is fully streamed. // The client may or may not send GetUpdateRequest messages to the server. // The server sends a stream of Update messages. type GetUpdateRequest struct{} type Update struct { HaveAddressPool bool `json:",omitempty"` AddressPool []Address `json:",omitempty"` // Used & free. NumFreeAddresses map[string]uint `json:",omitempty"` // Key: subnet ID. HealthStatus string `json:",omitempty"` HaveSerialNumber bool `json:",omitempty"` SerialNumber string `json:",omitempty"` HaveSubnets bool `json:",omitempty"` Subnets []Subnet `json:",omitempty"` HaveVMs bool `json:",omitempty"` VMs map[string]*VmInfo `json:",omitempty"` // Key: IP address. } type GetVmAccessTokenRequest struct { IpAddress net.IP Lifetime time.Duration } type GetVmAccessTokenResponse struct { Token []byte `json:",omitempty"` Error string } type GetVmInfoRequest struct { IpAddress net.IP } type GetVmInfoResponse struct { VmInfo VmInfo Error string } type GetVmUserDataRequest struct { AccessToken []byte IpAddress net.IP } type GetVmUserDataResponse struct { Error string Length uint64 } // Data (length=Length) are streamed afterwards. // The GetVmVolume() RPC is followed by the proto/rsync.GetBlocks message. type GetVmVolumeRequest struct { AccessToken []byte IpAddress net.IP VolumeIndex uint } type GetVmVolumeResponse struct { Error string } type ImportLocalVmRequest struct { VerificationCookie []byte `json:",omitempty"` VmInfo VolumeFilenames []string } type ImportLocalVmResponse struct { Error string } type ListVMsRequest struct { OwnerUsers []string Sort bool } type ListVMsResponse struct { IpAddresses []net.IP } type ListVolumeDirectoriesRequest struct{} type ListVolumeDirectoriesResponse struct { Directories []string Error string } type LocalVolume struct { DirectoryToCleanup string Filename string } type LocalVmInfo struct { VolumeLocations []LocalVolume VmInfo } type MigrateVmRequest struct { AccessToken []byte DhcpTimeout time.Duration IpAddress net.IP SourceHypervisor string } type MigrateVmResponse struct { // Multiple responses are sent. Error string Final bool // If true, this is the final response. ProgressMessage string RequestCommit bool } type MigrateVmResponseResponse struct { Commit bool } type NetbootMachineRequest struct { Address Address Files map[string][]byte FilesExpiration time.Duration Hostname string NumAcknowledgementsToWaitFor uint OfferExpiration time.Duration Subnet *Subnet WaitTimeout time.Duration } type NetbootMachineResponse struct { Error string } type PatchVmImageRequest struct { ImageName string ImageTimeout time.Duration IpAddress net.IP } type PatchVmImageResponse struct { // Multiple responses are sent. Final bool // If true, this is the final response. ProgressMessage string Error string } type PrepareVmForMigrationRequest struct { AccessToken []byte Enable bool IpAddress net.IP } type PrepareVmForMigrationResponse struct { Error string } type ProbeVmPortRequest struct { IpAddress net.IP PortNumber uint Timeout time.Duration } type ProbeVmPortResponse struct { PortIsOpen bool Error string } type ReplaceVmImageRequest struct { DhcpTimeout time.Duration ImageDataSize uint64 ImageName string `json:",omitempty"` ImageTimeout time.Duration ImageURL string `json:",omitempty"` IpAddress net.IP MinimumFreeBytes uint64 RoundupPower uint64 SkipBootloader bool } // RAW image data (length=ImageDataSize) is streamed afterwards. type ReplaceVmImageResponse struct { // Multiple responses are sent. DhcpTimedOut bool Final bool // If true, this is the final response. ProgressMessage string Error string } type ReplaceVmUserDataRequest struct { IpAddress net.IP Size uint64 } // User data (length=Size) are streamed afterwards. type ReplaceVmUserDataResponse struct { Error string } type RestoreVmFromSnapshotRequest struct { IpAddress net.IP ForceIfNotStopped bool } type RestoreVmFromSnapshotResponse struct { Error string } type RestoreVmImageRequest struct { IpAddress net.IP } type RestoreVmImageResponse struct { Error string } type RestoreVmUserDataRequest struct { IpAddress net.IP } type RestoreVmUserDataResponse struct { Error string } type SnapshotVmRequest struct { IpAddress net.IP ForceIfNotStopped bool RootOnly bool } type SnapshotVmResponse struct { Error string } type StartVmRequest struct { AccessToken []byte DhcpTimeout time.Duration IpAddress net.IP } type StartVmResponse struct { DhcpTimedOut bool Error string } type StopVmRequest struct { AccessToken []byte IpAddress net.IP } type StopVmResponse struct { Error string } type State uint type Subnet struct { Id string IpGateway net.IP IpMask net.IP // net.IPMask can't be JSON {en,de}coded. DomainName string `json:",omitempty"` DomainNameServers []net.IP Manage bool `json:",omitempty"` VlanId uint `json:",omitempty"` AllowedGroups []string `json:",omitempty"` AllowedUsers []string `json:",omitempty"` } type TraceVmMetadataRequest struct { IpAddress net.IP } type TraceVmMetadataResponse struct { Error string } // A stream of strings (trace paths) follow. type UpdateSubnetsRequest struct { Add []Subnet Change []Subnet Delete []string } type UpdateSubnetsResponse struct { Error string } type VmInfo struct { Address Address DestroyProtection bool `json:",omitempty"` Hostname string `json:",omitempty"` ImageName string `json:",omitempty"` ImageURL string `json:",omitempty"` MemoryInMiB uint64 MilliCPUs uint OwnerGroups []string `json:",omitempty"` OwnerUsers []string `json:",omitempty"` SpreadVolumes bool `json:",omitempty"` State State Tags tags.Tags `json:",omitempty"` SecondaryAddresses []Address `json:",omitempty"` SecondarySubnetIDs []string `json:",omitempty"` SubnetId string `json:",omitempty"` Uncommitted bool `json:",omitempty"` Volumes []Volume `json:",omitempty"` } type Volume struct { Size uint64 Format VolumeFormat } type VolumeFormat uint Reorder proto/hypervisor.LocalVmInfo fields. package hypervisor import ( "net" "time" "github.com/Symantec/Dominator/lib/tags" ) const ( StateStarting = 0 StateRunning = 1 StateFailedToStart = 2 StateStopping = 3 StateStopped = 4 StateDestroying = 5 StateMigrating = 6 StateExporting = 7 VolumeFormatRaw = 0 VolumeFormatQCOW2 = 1 ) type AcknowledgeVmRequest struct { IpAddress net.IP } type AcknowledgeVmResponse struct { Error string } type Address struct { IpAddress net.IP `json:",omitempty"` MacAddress string } type BecomePrimaryVmOwnerRequest struct { IpAddress net.IP } type BecomePrimaryVmOwnerResponse struct { Error string } type ChangeAddressPoolRequest struct { AddressesToAdd []Address // Will be added to free pool. AddressesToRemove []Address // Will be removed from free pool. MaximumFreeAddresses map[string]uint // Key: subnet ID. } type ChangeAddressPoolResponse struct { Error string } type ChangeOwnersRequest struct { OwnerGroups []string `json:",omitempty"` OwnerUsers []string `json:",omitempty"` } type ChangeOwnersResponse struct { Error string } type ChangeVmDestroyProtectionRequest struct { DestroyProtection bool IpAddress net.IP } type ChangeVmDestroyProtectionResponse struct { Error string } type ChangeVmOwnerUsersRequest struct { IpAddress net.IP OwnerUsers []string } type ChangeVmOwnerUsersResponse struct { Error string } type ChangeVmTagsRequest struct { IpAddress net.IP Tags tags.Tags } type ChangeVmTagsResponse struct { Error string } type CommitImportedVmRequest struct { IpAddress net.IP } type CommitImportedVmResponse struct { Error string } // The ConnectToVmSerialPort RPC is fully streamed. After the request/response, // the connection/client is hijacked and each side of the connection will send // a stream of bytes. type ConnectToVmSerialPortRequest struct { IpAddress net.IP PortNumber uint } type ConnectToVmSerialPortResponse struct { Error string } type CopyVmRequest struct { AccessToken []byte IpAddress net.IP SourceHypervisor string VmInfo } type CopyVmResponse struct { // Multiple responses are sent. Error string Final bool // If true, this is the final response. IpAddress net.IP ProgressMessage string } type CreateVmRequest struct { DhcpTimeout time.Duration ImageDataSize uint64 ImageTimeout time.Duration MinimumFreeBytes uint64 RoundupPower uint64 SecondaryVolumes []Volume SkipBootloader bool UserDataSize uint64 VmInfo } // RAW image data (length=ImageDataSize) and user data (length=UserDataSize) // are streamed afterwards. type CreateVmResponse struct { // Multiple responses are sent. DhcpTimedOut bool Final bool // If true, this is the final response. IpAddress net.IP ProgressMessage string Error string } type DeleteVmVolumeRequest struct { AccessToken []byte IpAddress net.IP VolumeIndex uint } type DeleteVmVolumeResponse struct { Error string } type DestroyVmRequest struct { AccessToken []byte IpAddress net.IP } type DestroyVmResponse struct { Error string } type DiscardVmAccessTokenRequest struct { AccessToken []byte IpAddress net.IP } type DiscardVmAccessTokenResponse struct { Error string } type DiscardVmOldImageRequest struct { IpAddress net.IP } type DiscardVmOldImageResponse struct { Error string } type DiscardVmOldUserDataRequest struct { IpAddress net.IP } type DiscardVmOldUserDataResponse struct { Error string } type DiscardVmSnapshotRequest struct { IpAddress net.IP } type DiscardVmSnapshotResponse struct { Error string } type ExportLocalVmRequest struct { IpAddress net.IP VerificationCookie []byte `json:",omitempty"` } type ExportLocalVmResponse struct { Error string VmInfo LocalVmInfo } // The GetUpdates() RPC is fully streamed. // The client may or may not send GetUpdateRequest messages to the server. // The server sends a stream of Update messages. type GetUpdateRequest struct{} type Update struct { HaveAddressPool bool `json:",omitempty"` AddressPool []Address `json:",omitempty"` // Used & free. NumFreeAddresses map[string]uint `json:",omitempty"` // Key: subnet ID. HealthStatus string `json:",omitempty"` HaveSerialNumber bool `json:",omitempty"` SerialNumber string `json:",omitempty"` HaveSubnets bool `json:",omitempty"` Subnets []Subnet `json:",omitempty"` HaveVMs bool `json:",omitempty"` VMs map[string]*VmInfo `json:",omitempty"` // Key: IP address. } type GetVmAccessTokenRequest struct { IpAddress net.IP Lifetime time.Duration } type GetVmAccessTokenResponse struct { Token []byte `json:",omitempty"` Error string } type GetVmInfoRequest struct { IpAddress net.IP } type GetVmInfoResponse struct { VmInfo VmInfo Error string } type GetVmUserDataRequest struct { AccessToken []byte IpAddress net.IP } type GetVmUserDataResponse struct { Error string Length uint64 } // Data (length=Length) are streamed afterwards. // The GetVmVolume() RPC is followed by the proto/rsync.GetBlocks message. type GetVmVolumeRequest struct { AccessToken []byte IpAddress net.IP VolumeIndex uint } type GetVmVolumeResponse struct { Error string } type ImportLocalVmRequest struct { VerificationCookie []byte `json:",omitempty"` VmInfo VolumeFilenames []string } type ImportLocalVmResponse struct { Error string } type ListVMsRequest struct { OwnerUsers []string Sort bool } type ListVMsResponse struct { IpAddresses []net.IP } type ListVolumeDirectoriesRequest struct{} type ListVolumeDirectoriesResponse struct { Directories []string Error string } type LocalVolume struct { DirectoryToCleanup string Filename string } type LocalVmInfo struct { VmInfo VolumeLocations []LocalVolume } type MigrateVmRequest struct { AccessToken []byte DhcpTimeout time.Duration IpAddress net.IP SourceHypervisor string } type MigrateVmResponse struct { // Multiple responses are sent. Error string Final bool // If true, this is the final response. ProgressMessage string RequestCommit bool } type MigrateVmResponseResponse struct { Commit bool } type NetbootMachineRequest struct { Address Address Files map[string][]byte FilesExpiration time.Duration Hostname string NumAcknowledgementsToWaitFor uint OfferExpiration time.Duration Subnet *Subnet WaitTimeout time.Duration } type NetbootMachineResponse struct { Error string } type PatchVmImageRequest struct { ImageName string ImageTimeout time.Duration IpAddress net.IP } type PatchVmImageResponse struct { // Multiple responses are sent. Final bool // If true, this is the final response. ProgressMessage string Error string } type PrepareVmForMigrationRequest struct { AccessToken []byte Enable bool IpAddress net.IP } type PrepareVmForMigrationResponse struct { Error string } type ProbeVmPortRequest struct { IpAddress net.IP PortNumber uint Timeout time.Duration } type ProbeVmPortResponse struct { PortIsOpen bool Error string } type ReplaceVmImageRequest struct { DhcpTimeout time.Duration ImageDataSize uint64 ImageName string `json:",omitempty"` ImageTimeout time.Duration ImageURL string `json:",omitempty"` IpAddress net.IP MinimumFreeBytes uint64 RoundupPower uint64 SkipBootloader bool } // RAW image data (length=ImageDataSize) is streamed afterwards. type ReplaceVmImageResponse struct { // Multiple responses are sent. DhcpTimedOut bool Final bool // If true, this is the final response. ProgressMessage string Error string } type ReplaceVmUserDataRequest struct { IpAddress net.IP Size uint64 } // User data (length=Size) are streamed afterwards. type ReplaceVmUserDataResponse struct { Error string } type RestoreVmFromSnapshotRequest struct { IpAddress net.IP ForceIfNotStopped bool } type RestoreVmFromSnapshotResponse struct { Error string } type RestoreVmImageRequest struct { IpAddress net.IP } type RestoreVmImageResponse struct { Error string } type RestoreVmUserDataRequest struct { IpAddress net.IP } type RestoreVmUserDataResponse struct { Error string } type SnapshotVmRequest struct { IpAddress net.IP ForceIfNotStopped bool RootOnly bool } type SnapshotVmResponse struct { Error string } type StartVmRequest struct { AccessToken []byte DhcpTimeout time.Duration IpAddress net.IP } type StartVmResponse struct { DhcpTimedOut bool Error string } type StopVmRequest struct { AccessToken []byte IpAddress net.IP } type StopVmResponse struct { Error string } type State uint type Subnet struct { Id string IpGateway net.IP IpMask net.IP // net.IPMask can't be JSON {en,de}coded. DomainName string `json:",omitempty"` DomainNameServers []net.IP Manage bool `json:",omitempty"` VlanId uint `json:",omitempty"` AllowedGroups []string `json:",omitempty"` AllowedUsers []string `json:",omitempty"` } type TraceVmMetadataRequest struct { IpAddress net.IP } type TraceVmMetadataResponse struct { Error string } // A stream of strings (trace paths) follow. type UpdateSubnetsRequest struct { Add []Subnet Change []Subnet Delete []string } type UpdateSubnetsResponse struct { Error string } type VmInfo struct { Address Address DestroyProtection bool `json:",omitempty"` Hostname string `json:",omitempty"` ImageName string `json:",omitempty"` ImageURL string `json:",omitempty"` MemoryInMiB uint64 MilliCPUs uint OwnerGroups []string `json:",omitempty"` OwnerUsers []string `json:",omitempty"` SpreadVolumes bool `json:",omitempty"` State State Tags tags.Tags `json:",omitempty"` SecondaryAddresses []Address `json:",omitempty"` SecondarySubnetIDs []string `json:",omitempty"` SubnetId string `json:",omitempty"` Uncommitted bool `json:",omitempty"` Volumes []Volume `json:",omitempty"` } type Volume struct { Size uint64 Format VolumeFormat } type VolumeFormat uint
package watch import ( "io/ioutil" "os" "reflect" "testing" "time" ) func dum() { return } func TestWatch(t *testing.T) { tests := []struct { paths map[string]func() expWatched []string expWatchers []string }{ { map[string]func(){ "testdata/dummy.txt": dum, "testdata/test.txt": dum, }, []string{"testdata/dummy.txt", "testdata/test.txt"}, []string{"testdata/dummy.txt", "testdata/test.txt"}, }, { map[string]func(){ "testdata": dum, "testdata/dummy.txt": dum, "testdata/test.txt": dum, }, []string{"testdata", "testdata/dummy.txt", "testdata/test.txt"}, []string{"testdata"}, }, { map[string]func(){ "testdata/dummy.txt": dum, "testdata/test.txt": dum, "testdata": dum, }, []string{"testdata", "testdata/dummy.txt", "testdata/test.txt"}, []string{"testdata"}, }, // 2 path refer same file or dir but different(e.g abs path and relative path) } for i, test := range tests { for path, action := range test.paths { Watch(path, action) } if len(watched) != len(test.expWatched) { t.Errorf("Test %d: Expected len of watched %d, but got %d", i, len(test.expWatched), len(watched)) } for _, p := range test.expWatched { if _, exist := watched[p]; !exist { t.Errorf("Test %d: Expected %s exist in watched", i, p) } } if !reflect.DeepEqual(test.expWatchers, watchers) { t.Errorf("Test %d: Expected watchers %v, but got %v", i, test.expWatchers, watchers) } for _, path := range test.expWatchers { UnWatch(path) } reinit() } } func TestUnWatch(t *testing.T) { tests := []struct { watchs []string unWatchs []string expWatched []string expWatchers []string }{ { []string{"testdata/dummy.txt", "testdata/test.txt"}, []string{"testdata/dummy.txt"}, []string{"testdata/test.txt"}, []string{"testdata/test.txt"}, }, { []string{"testdata", "testdata/dummy.txt", "testdata/test.txt"}, []string{"testdata"}, []string{"testdata/dummy.txt", "testdata/test.txt"}, []string{"testdata/test.txt", "testdata/dummy.txt"}, }, } for i, test := range tests { for _, path := range test.watchs { Watch(path, dum) } for _, path := range test.unWatchs { UnWatch(path) } if len(watched) != len(test.expWatched) { t.Errorf("Test %d: Expected len of watched %d, but got %d", i, len(test.expWatched), len(watched)) } for _, p := range test.expWatched { if _, exist := watched[p]; !exist { t.Errorf("Test %d: Expected %s exist in watched", i, p) } } if !reflect.DeepEqual(test.expWatchers, watchers) { t.Errorf("Test %d: Expected watchers %v, but got %v", i, test.expWatchers, watchers) } for _, path := range test.expWatchers { UnWatch(path) } reinit() } } type dumView struct { Text string Name string } func (d *dumView) Reload() { d.Text = "Reloaded" } func (d *dumView) Rename() { d.Name = "Renamed" } func TestObserve(t *testing.T) { path := "testdata/test.txt" v := new(dumView) Watch(path, v.Reload) go Observe() if err := ioutil.WriteFile(path, []byte("test"), 0644); err != nil { t.Fatalf("WriteFile error: %s", err) } time.Sleep(time.Millisecond * 50) if v.Text != "Reloaded" { t.Errorf("Expected dumView Text %s, but got %s", "Reloaded", v.Text) } ioutil.WriteFile(path, []byte(""), 0644) UnWatch(path) reinit() } func TestObserveDirectory(t *testing.T) { dir := "testdata" path := "testdata/test.txt" v := new(dumView) Watch(path, v.Reload) Watch(dir, nil) go Observe() if !reflect.DeepEqual(watchers, []string{"testdata"}) { t.Errorf("Expected watchers be equal to %v, but got %v", []string{"testdata"}, watchers) } if err := ioutil.WriteFile(path, []byte("test"), 0644); err != nil { t.Fatalf("WriteFile error: %s", err) } time.Sleep(time.Millisecond * 50) if v.Text != "Reloaded" { t.Errorf("Expected dumView Text %s, but got %s", "Reloaded", v.Text) } ioutil.WriteFile(path, []byte(""), 0644) UnWatch(dir) reinit() } func TestObserveCreateEvent(t *testing.T) { path := "testdata/new.txt" v := new(dumView) Watch(path, v.Reload) go Observe() if !reflect.DeepEqual(watchers, []string{"testdata"}) { t.Errorf("Expected watchers be equal to %v, but got %v", []string{path}, watchers) } if err := ioutil.WriteFile(path, []byte("test"), 0644); err != nil { t.Fatalf("WriteFile error: %s", err) } time.Sleep(time.Millisecond * 50) if v.Text != "Reloaded" { t.Errorf("Expected dumView Text %s, but got %s", "Reloaded", v.Text) } os.Remove(path) UnWatch(path) reinit() } func TestObserveDeleteEvent(t *testing.T) { path := "testdata/dummy.txt" v := new(dumView) Watch(path, v.Reload) go Observe() os.Remove(path) time.Sleep(time.Millisecond * 50) if v.Text != "Reloaded" { t.Errorf("Expected dumView Text %s, but got %s", "Reloaded", v.Text) } if !reflect.DeepEqual(watchers, []string{"testdata"}) { t.Errorf("Expected watchers be equal to %v, but got %v", []string{"testdata"}, watchers) } UnWatch("testdata") ioutil.WriteFile(path, []byte(""), 0644) reinit() } func TestObserveRenameEvent(t *testing.T) { path := "testdata/dummy.txt" v := new(dumView) Watch(path, v.Reload) go Observe() os.Rename(path, "testdata/rename.txt") time.Sleep(time.Millisecond * 50) if v.Text != "Reloaded" { t.Errorf("Expected dumView Text %s, but got %s", "Reloaded", v.Text) } os.Rename("testdata/rename.txt", path) UnWatch(path) reinit() } func TestExist(t *testing.T) { test := struct { array []string elms []string exps []bool }{ []string{"a", "b", "c", "d"}, []string{"a", "t", "A"}, []bool{true, false, false}, } for i, exp := range test.exps { if exist(test.array, test.elms[i]) != exp { t.Errorf("Expected in %v exist result of element %s be %v, but got %v", test.array, test.elms[i], exp, exist(test.array, test.elms[i])) } } } func TestRemove(t *testing.T) { tests := []struct { slice []string remove string exp []string }{ { []string{"a", "b", "c"}, "a", []string{"c", "b"}, }, { []string{"a", "b", "c"}, "k", []string{"a", "b", "c"}, }, } for i, test := range tests { if exp := remove(test.slice, test.remove); !reflect.DeepEqual(exp, test.exp) { t.Errorf("Test %d: Expected %v be equal to %v", i, exp, test.exp) } } } Changed tests to use watcher type package watch import ( "io/ioutil" "os" "reflect" "testing" "time" ) func dum() { return } func TestWatch(t *testing.T) { tests := []struct { paths map[string]func() expWatched []string expWatchers []string }{ { map[string]func(){ "testdata/dummy.txt": dum, "testdata/test.txt": dum, }, []string{"testdata/dummy.txt", "testdata/test.txt"}, []string{"testdata/dummy.txt", "testdata/test.txt"}, }, { map[string]func(){ "testdata": dum, "testdata/dummy.txt": dum, "testdata/test.txt": dum, }, []string{"testdata", "testdata/dummy.txt", "testdata/test.txt"}, []string{"testdata"}, }, { map[string]func(){ "testdata/dummy.txt": dum, "testdata/test.txt": dum, "testdata": dum, }, []string{"testdata", "testdata/dummy.txt", "testdata/test.txt"}, []string{"testdata"}, }, // 2 path refer same file or dir but different(e.g abs path and relative path) } for i, test := range tests { watcher, err := NewWatcher() if err != nil { t.Fatalf("Couldn't create watcher: %s", err) } for path, action := range test.paths { watcher.Watch(path, action) } if len(watcher.watched) != len(test.expWatched) { t.Errorf("Test %d: Expected len of watched %d, but got %d", i, len(test.expWatched), len(watcher.watched)) } for _, p := range test.expWatched { if _, exist := watcher.watched[p]; !exist { t.Errorf("Test %d: Expected %s exist in watched", i, p) } } if !reflect.DeepEqual(test.expWatchers, watcher.watchers) { t.Errorf("Test %d: Expected watchers %v, but got %v", i, test.expWatchers, watcher.watchers) } } } func TestUnWatch(t *testing.T) { tests := []struct { watchs []string unWatchs []string expWatched []string expWatchers []string }{ { []string{"testdata/dummy.txt", "testdata/test.txt"}, []string{"testdata/dummy.txt"}, []string{"testdata/test.txt"}, []string{"testdata/test.txt"}, }, { []string{"testdata", "testdata/dummy.txt", "testdata/test.txt"}, []string{"testdata"}, []string{"testdata/dummy.txt", "testdata/test.txt"}, []string{"testdata/test.txt", "testdata/dummy.txt"}, }, } for i, test := range tests { watcher, err := NewWatcher() if err != nil { t.Fatalf("Couldn't create watcher: %s", err) } for _, path := range test.watchs { watcher.Watch(path, dum) } for _, path := range test.unWatchs { watcher.UnWatch(path) } if len(watcher.watched) != len(test.expWatched) { t.Errorf("Test %d: Expected len of watched %d, but got %d", i, len(test.expWatched), len(watcher.watched)) } for _, p := range test.expWatched { if _, exist := watcher.watched[p]; !exist { t.Errorf("Test %d: Expected %s exist in watched", i, p) } } if !reflect.DeepEqual(test.expWatchers, watcher.watchers) { t.Errorf("Test %d: Expected watchers %v, but got %v", i, test.expWatchers, watcher.watchers) } } } type dumView struct { Text string Name string } func (d *dumView) Reload() { d.Text = "Reloaded" } func (d *dumView) Rename() { d.Name = "Renamed" } func TestObserve(t *testing.T) { path := "testdata/test.txt" watcher, err := NewWatcher() if err != nil { t.Fatalf("Couldn't create watcher: %s", err) } v := new(dumView) watcher.Watch(path, v.Reload) go watcher.Observe() if err := ioutil.WriteFile(path, []byte("test"), 0644); err != nil { t.Fatalf("WriteFile error: %s", err) } time.Sleep(time.Millisecond * 50) if v.Text != "Reloaded" { t.Errorf("Expected dumView Text %s, but got %s", "Reloaded", v.Text) } ioutil.WriteFile(path, []byte(""), 0644) } func TestObserveDirectory(t *testing.T) { dir := "testdata" path := "testdata/test.txt" watcher, err := NewWatcher() if err != nil { t.Fatalf("Couldn't create watcher: %s", err) } v := new(dumView) watcher.Watch(path, v.Reload) watcher.Watch(dir, nil) go watcher.Observe() if !reflect.DeepEqual(watcher.watchers, []string{"testdata"}) { t.Errorf("Expected watchers be equal to %v, but got %v", []string{"testdata"}, watcher.watchers) } if err := ioutil.WriteFile(path, []byte("test"), 0644); err != nil { t.Fatalf("WriteFile error: %s", err) } time.Sleep(time.Millisecond * 50) if v.Text != "Reloaded" { t.Errorf("Expected dumView Text %s, but got %s", "Reloaded", v.Text) } ioutil.WriteFile(path, []byte(""), 0644) } func TestObserveCreateEvent(t *testing.T) { path := "testdata/new.txt" watcher, err := NewWatcher() if err != nil { t.Fatalf("Couldn't create watcher: %s", err) } v := new(dumView) watcher.Watch(path, v.Reload) go watcher.Observe() if !reflect.DeepEqual(watcher.watchers, []string{"testdata"}) { t.Errorf("Expected watchers be equal to %v, but got %v", []string{path}, watcher.watchers) } if err := ioutil.WriteFile(path, []byte("test"), 0644); err != nil { t.Fatalf("WriteFile error: %s", err) } time.Sleep(time.Millisecond * 50) if v.Text != "Reloaded" { t.Errorf("Expected dumView Text %s, but got %s", "Reloaded", v.Text) } os.Remove(path) } func TestObserveDeleteEvent(t *testing.T) { path := "testdata/dummy.txt" watcher, err := NewWatcher() if err != nil { t.Fatalf("Couldn't create watcher: %s", err) } v := new(dumView) watcher.Watch(path, v.Reload) go watcher.Observe() os.Remove(path) time.Sleep(time.Millisecond * 50) if !reflect.DeepEqual(watcher.watchers, []string{"testdata"}) { t.Errorf("Expected watchers be equal to %v, but got %v", []string{"testdata"}, watcher.watchers) } if v.Text != "Reloaded" { t.Errorf("Expected dumView Text %s, but got %s", "Reloaded", v.Text) } ioutil.WriteFile(path, []byte(""), 0644) } func TestObserveRenameEvent(t *testing.T) { path := "testdata/dummy.txt" watcher, err := NewWatcher() if err != nil { t.Fatalf("Couldn't create watcher: %s", err) } v := new(dumView) watcher.Watch(path, v.Reload) go watcher.Observe() os.Rename(path, "testdata/rename.txt") time.Sleep(time.Millisecond * 50) if v.Text != "Reloaded" { t.Errorf("Expected dumView Text %s, but got %s", "Reloaded", v.Text) } os.Rename("testdata/rename.txt", path) } func TestExist(t *testing.T) { test := struct { array []string elms []string exps []bool }{ []string{"a", "b", "c", "d"}, []string{"a", "t", "A"}, []bool{true, false, false}, } for i, exp := range test.exps { if exist(test.array, test.elms[i]) != exp { t.Errorf("Expected in %v exist result of element %s be %v, but got %v", test.array, test.elms[i], exp, exist(test.array, test.elms[i])) } } } func TestRemove(t *testing.T) { tests := []struct { slice []string remove string exp []string }{ { []string{"a", "b", "c"}, "a", []string{"c", "b"}, }, { []string{"a", "b", "c"}, "k", []string{"a", "b", "c"}, }, } for i, test := range tests { if exp := remove(test.slice, test.remove); !reflect.DeepEqual(exp, test.exp) { t.Errorf("Test %d: Expected %v be equal to %v", i, exp, test.exp) } } }
package life import ( "errors" "fmt" "path/filepath" "runtime" "testing" "time" ) func TestLife(t *testing.T) { v := NewSingleLife() started := waitOnChan(v.started, 5*time.Millisecond) if started == nil { t.Fatalf("SingleLife started when it wasn't supposed to") } terminated := waitOnChan(v.terminated, 5*time.Millisecond) if terminated == nil { t.Fatalf("SingleLife terminated when it wasn't supposed to") } v.Start() started = waitOnChan(v.started, 5*time.Millisecond) ok(t, started) terminated = waitOnChan(v.terminated, 5*time.Millisecond) if terminated == nil { t.Fatalf("SingleLife terminated when it wasn't supposed to") } // Set up a maximum wait time before failing timer := time.NewTimer(50 * time.Millisecond) defer timer.Stop() errChan := make(chan error, 1) go func() { errChan <- v.Close() }() select { case <-timer.C: t.Fatalf("Timed out waiting for close to finish") case err := <-errChan: if err != nil { t.Fatalf("Error received from Close call: %s", err) } } terminated = waitOnChan(v.terminated, 5*time.Millisecond) ok(t, terminated) } func TestLife_multiRoutine(t *testing.T) { // TODO rename variable p := NewLifeWithChildren() started := waitOnChan(p.started, 5*time.Millisecond) if started == nil { t.Fatalf("LifeWithChildren started when it wasn't supposed to") } terminated := waitOnChan(p.terminated, 5*time.Millisecond) if terminated == nil { t.Fatalf("LifeWithChildren terminated when it wasn't supposed to") } if len(p.childrenStarted) > 0 { t.Fatalf("Subroutines have started when they weren't supposed to") } if len(p.childrenTerminated) > 0 { t.Fatalf("Subroutines have started when they weren't supposed to") } // Start LifeWithChildren and make sure that both the main goroutine and its subroutines are running p.Start() started = waitOnChan(p.started, 5*time.Millisecond) ok(t, started) terminated = waitOnChan(p.terminated, 5*time.Millisecond) if terminated == nil { t.Fatalf("LifeWithChildren terminated when it wasn't supposed to") } for i := 0; i < p.numChildren; i++ { err := waitOnChan(p.childrenStarted, 5*time.Millisecond) ok(t, err) } if len(p.childrenStarted) > 0 { t.Fatalf("Too many subroutines started") } terminated = waitOnChan(p.childrenTerminated, 5*time.Millisecond) if terminated == nil { t.Fatalf("Subroutines terminated when they weren't supposed to") } // Set up a maximum wait time before failing timer := time.NewTimer(50 * time.Millisecond) defer timer.Stop() errChan := make(chan error, 1) go func() { errChan <- p.Close() }() select { case <-timer.C: t.Fatalf("Timed out waiting for close to finish") case err := <-errChan: if err != nil { t.Fatalf("Error received from Close call: %s", err) } } terminated = waitOnChan(p.terminated, 5*time.Millisecond) ok(t, terminated) // Check on the subroutines for i := 0; i < p.numChildren; i++ { err := waitOnChan(p.childrenTerminated, 5*time.Millisecond) ok(t, err) } if len(p.childrenTerminated) > 0 { t.Fatalf("Too many subroutines terminated") } if len(p.childrenStarted) > 0 { t.Fatalf("Subroutine has started when it shouldn't have") } } type SingleLife struct { *Life started chan struct{} terminated chan struct{} } func NewSingleLife() SingleLife { l := SingleLife{ Life: NewLife(), started: make(chan struct{}, 0), terminated: make(chan struct{}, 0), } l.SetRun(l.run) return l } func (v SingleLife) run() { close(v.started) select { case <-v.Life.Done: // Sleep to make sure that life waits for this to finish rather than returning immediately time.Sleep(5 * time.Millisecond) close(v.terminated) } } type LifeWithChildren struct { *Life started chan struct{} terminated chan struct{} numChildren int childrenStarted chan struct{} childrenTerminated chan struct{} } func NewLifeWithChildren() LifeWithChildren { numSubRoutines := 5 p := LifeWithChildren{ Life: NewLife(), started: make(chan struct{}, 0), terminated: make(chan struct{}, 0), numChildren: numSubRoutines, childrenStarted: make(chan struct{}, numSubRoutines), childrenTerminated: make(chan struct{}, numSubRoutines), } p.SetRun(p.run) return p } func (p LifeWithChildren) run() { defer close(p.terminated) close(p.started) for i := 0; i < p.numChildren; i++ { p.Life.WGAdd(1) go p.subRoutine() } select { case <-p.Life.Done: return } } func (p LifeWithChildren) subRoutine() { defer p.Life.WGDone() p.childrenStarted <- struct{}{} select { case <-p.Life.Done: // Same as above: make sure that life waits for this to finish time.Sleep(5 * time.Millisecond) p.childrenTerminated <- struct{}{} } } func waitOnChan(c chan struct{}, wait time.Duration) (err error) { timer := time.NewTimer(wait) defer timer.Stop() select { case <-timer.C: return errors.New("timed out") case <-c: return nil } } // ok fails the test if an err is not nil. func ok(tb testing.TB, err error) { if err != nil { _, file, line, _ := runtime.Caller(1) fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) tb.FailNow() } } Remove TODO comment package life import ( "errors" "fmt" "path/filepath" "runtime" "testing" "time" ) func TestLife(t *testing.T) { v := NewSingleLife() started := waitOnChan(v.started, 5*time.Millisecond) if started == nil { t.Fatalf("SingleLife started when it wasn't supposed to") } terminated := waitOnChan(v.terminated, 5*time.Millisecond) if terminated == nil { t.Fatalf("SingleLife terminated when it wasn't supposed to") } v.Start() started = waitOnChan(v.started, 5*time.Millisecond) ok(t, started) terminated = waitOnChan(v.terminated, 5*time.Millisecond) if terminated == nil { t.Fatalf("SingleLife terminated when it wasn't supposed to") } // Set up a maximum wait time before failing timer := time.NewTimer(50 * time.Millisecond) defer timer.Stop() errChan := make(chan error, 1) go func() { errChan <- v.Close() }() select { case <-timer.C: t.Fatalf("Timed out waiting for close to finish") case err := <-errChan: if err != nil { t.Fatalf("Error received from Close call: %s", err) } } terminated = waitOnChan(v.terminated, 5*time.Millisecond) ok(t, terminated) } func TestLife_multiRoutine(t *testing.T) { p := NewLifeWithChildren() started := waitOnChan(p.started, 5*time.Millisecond) if started == nil { t.Fatalf("LifeWithChildren started when it wasn't supposed to") } terminated := waitOnChan(p.terminated, 5*time.Millisecond) if terminated == nil { t.Fatalf("LifeWithChildren terminated when it wasn't supposed to") } if len(p.childrenStarted) > 0 { t.Fatalf("Subroutines have started when they weren't supposed to") } if len(p.childrenTerminated) > 0 { t.Fatalf("Subroutines have started when they weren't supposed to") } // Start LifeWithChildren and make sure that both the main goroutine and its subroutines are running p.Start() started = waitOnChan(p.started, 5*time.Millisecond) ok(t, started) terminated = waitOnChan(p.terminated, 5*time.Millisecond) if terminated == nil { t.Fatalf("LifeWithChildren terminated when it wasn't supposed to") } for i := 0; i < p.numChildren; i++ { err := waitOnChan(p.childrenStarted, 5*time.Millisecond) ok(t, err) } if len(p.childrenStarted) > 0 { t.Fatalf("Too many subroutines started") } terminated = waitOnChan(p.childrenTerminated, 5*time.Millisecond) if terminated == nil { t.Fatalf("Subroutines terminated when they weren't supposed to") } // Set up a maximum wait time before failing timer := time.NewTimer(50 * time.Millisecond) defer timer.Stop() errChan := make(chan error, 1) go func() { errChan <- p.Close() }() select { case <-timer.C: t.Fatalf("Timed out waiting for close to finish") case err := <-errChan: if err != nil { t.Fatalf("Error received from Close call: %s", err) } } terminated = waitOnChan(p.terminated, 5*time.Millisecond) ok(t, terminated) // Check on the subroutines for i := 0; i < p.numChildren; i++ { err := waitOnChan(p.childrenTerminated, 5*time.Millisecond) ok(t, err) } if len(p.childrenTerminated) > 0 { t.Fatalf("Too many subroutines terminated") } if len(p.childrenStarted) > 0 { t.Fatalf("Subroutine has started when it shouldn't have") } } type SingleLife struct { *Life started chan struct{} terminated chan struct{} } func NewSingleLife() SingleLife { l := SingleLife{ Life: NewLife(), started: make(chan struct{}, 0), terminated: make(chan struct{}, 0), } l.SetRun(l.run) return l } func (v SingleLife) run() { close(v.started) select { case <-v.Life.Done: // Sleep to make sure that life waits for this to finish rather than returning immediately time.Sleep(5 * time.Millisecond) close(v.terminated) } } type LifeWithChildren struct { *Life started chan struct{} terminated chan struct{} numChildren int childrenStarted chan struct{} childrenTerminated chan struct{} } func NewLifeWithChildren() LifeWithChildren { numSubRoutines := 5 p := LifeWithChildren{ Life: NewLife(), started: make(chan struct{}, 0), terminated: make(chan struct{}, 0), numChildren: numSubRoutines, childrenStarted: make(chan struct{}, numSubRoutines), childrenTerminated: make(chan struct{}, numSubRoutines), } p.SetRun(p.run) return p } func (p LifeWithChildren) run() { defer close(p.terminated) close(p.started) for i := 0; i < p.numChildren; i++ { p.Life.WGAdd(1) go p.subRoutine() } select { case <-p.Life.Done: return } } func (p LifeWithChildren) subRoutine() { defer p.Life.WGDone() p.childrenStarted <- struct{}{} select { case <-p.Life.Done: // Same as above: make sure that life waits for this to finish time.Sleep(5 * time.Millisecond) p.childrenTerminated <- struct{}{} } } func waitOnChan(c chan struct{}, wait time.Duration) (err error) { timer := time.NewTimer(wait) defer timer.Stop() select { case <-timer.C: return errors.New("timed out") case <-c: return nil } } // ok fails the test if an err is not nil. func ok(tb testing.TB, err error) { if err != nil { _, file, line, _ := runtime.Caller(1) fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) tb.FailNow() } }
package benchmark_test import ( "io/ioutil" "testing" "time" tomlbs "github.com/BurntSushi/toml" tomlv1 "github.com/pelletier/go-toml-v1" "github.com/pelletier/go-toml/v2" "github.com/stretchr/testify/require" ) type runner struct { name string unmarshal func([]byte, interface{}) error } var runners = []runner{ {"v2", toml.Unmarshal}, {"v1", tomlv1.Unmarshal}, {"bs", tomlbs.Unmarshal}, } func bench(b *testing.B, f func(r runner, b *testing.B)) { for _, r := range runners { b.Run(r.name, func(b *testing.B) { f(r, b) }) } } func BenchmarkUnmarshalSimple(b *testing.B) { bench(b, func(r runner, b *testing.B) { d := struct { A string }{} doc := []byte(`A = "hello"`) for i := 0; i < b.N; i++ { err := r.unmarshal(doc, &d) if err != nil { panic(err) } } }) } type benchmarkDoc struct { Table struct { Key string Subtable struct { Key string } Inline struct { Name struct { First string Last string } Point struct { X int64 U int64 } } } String struct { Basic struct { Basic string } Multiline struct { Key1 string Key2 string Key3 string Continued struct { Key1 string Key2 string Key3 string } } Literal struct { Winpath string Winpath2 string Quoted string Regex string Multiline struct { Regex2 string Lines string } } } Integer struct { Key1 int64 Key2 int64 Key3 int64 Key4 int64 Underscores struct { Key1 int64 Key2 int64 Key3 int64 } } Float struct { Fractional struct { Key1 float64 Key2 float64 Key3 float64 } Exponent struct { Key1 float64 Key2 float64 Key3 float64 } Both struct { Key float64 } Underscores struct { Key1 float64 Key2 float64 } } Boolean struct { True bool False bool } Datetime struct { Key1 time.Time Key2 time.Time Key3 time.Time } Array struct { Key1 []int64 Key2 []string Key3 [][]int64 // TODO: Key4 not supported by go-toml's Unmarshal Key5 []int64 Key6 []int64 } Products []struct { Name string Sku int64 Color string } Fruit []struct { Name string Physical struct { Color string Shape string Variety []struct { Name string } } } } func BenchmarkReferenceFile(b *testing.B) { bench(b, func(r runner, b *testing.B) { bytes, err := ioutil.ReadFile("benchmark.toml") if err != nil { b.Fatal(err) } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { d := benchmarkDoc{} err := r.unmarshal(bytes, &d) if err != nil { panic(err) } } }) } func TestReferenceFile(t *testing.T) { bytes, err := ioutil.ReadFile("benchmark.toml") require.NoError(t, err) d := benchmarkDoc{} err = toml.Unmarshal(bytes, &d) require.NoError(t, err) } Set bytes in ReferenceFile benchmark to show throughput results (#489) package benchmark_test import ( "io/ioutil" "testing" "time" tomlbs "github.com/BurntSushi/toml" tomlv1 "github.com/pelletier/go-toml-v1" "github.com/pelletier/go-toml/v2" "github.com/stretchr/testify/require" ) type runner struct { name string unmarshal func([]byte, interface{}) error } var runners = []runner{ {"v2", toml.Unmarshal}, {"v1", tomlv1.Unmarshal}, {"bs", tomlbs.Unmarshal}, } func bench(b *testing.B, f func(r runner, b *testing.B)) { for _, r := range runners { b.Run(r.name, func(b *testing.B) { f(r, b) }) } } func BenchmarkUnmarshalSimple(b *testing.B) { bench(b, func(r runner, b *testing.B) { d := struct { A string }{} doc := []byte(`A = "hello"`) for i := 0; i < b.N; i++ { err := r.unmarshal(doc, &d) if err != nil { panic(err) } } }) } type benchmarkDoc struct { Table struct { Key string Subtable struct { Key string } Inline struct { Name struct { First string Last string } Point struct { X int64 U int64 } } } String struct { Basic struct { Basic string } Multiline struct { Key1 string Key2 string Key3 string Continued struct { Key1 string Key2 string Key3 string } } Literal struct { Winpath string Winpath2 string Quoted string Regex string Multiline struct { Regex2 string Lines string } } } Integer struct { Key1 int64 Key2 int64 Key3 int64 Key4 int64 Underscores struct { Key1 int64 Key2 int64 Key3 int64 } } Float struct { Fractional struct { Key1 float64 Key2 float64 Key3 float64 } Exponent struct { Key1 float64 Key2 float64 Key3 float64 } Both struct { Key float64 } Underscores struct { Key1 float64 Key2 float64 } } Boolean struct { True bool False bool } Datetime struct { Key1 time.Time Key2 time.Time Key3 time.Time } Array struct { Key1 []int64 Key2 []string Key3 [][]int64 // TODO: Key4 not supported by go-toml's Unmarshal Key5 []int64 Key6 []int64 } Products []struct { Name string Sku int64 Color string } Fruit []struct { Name string Physical struct { Color string Shape string Variety []struct { Name string } } } } func BenchmarkReferenceFile(b *testing.B) { bench(b, func(r runner, b *testing.B) { bytes, err := ioutil.ReadFile("benchmark.toml") if err != nil { b.Fatal(err) } b.SetBytes(int64(len(bytes))) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { d := benchmarkDoc{} err := r.unmarshal(bytes, &d) if err != nil { panic(err) } } }) } func TestReferenceFile(t *testing.T) { bytes, err := ioutil.ReadFile("benchmark.toml") require.NoError(t, err) d := benchmarkDoc{} err = toml.Unmarshal(bytes, &d) require.NoError(t, err) }
// Copyright (c) 2015 The gocql Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gocql import ( "bytes" "crypto/md5" "fmt" "math/big" "sort" "strconv" "strings" "github.com/gocql/gocql/internal/murmur" ) // a token partitioner type partitioner interface { Name() string Hash([]byte) token ParseString(string) token } // a token type token interface { fmt.Stringer Less(token) bool } // murmur3 partitioner and token type murmur3Partitioner struct{} type murmur3Token int64 func (p murmur3Partitioner) Name() string { return "Murmur3Partitioner" } func (p murmur3Partitioner) Hash(partitionKey []byte) token { h1 := murmur.Murmur3H1(partitionKey) return murmur3Token(int64(h1)) } // murmur3 little-endian, 128-bit hash, but returns only h1 func (p murmur3Partitioner) ParseString(str string) token { val, _ := strconv.ParseInt(str, 10, 64) return murmur3Token(val) } func (m murmur3Token) String() string { return strconv.FormatInt(int64(m), 10) } func (m murmur3Token) Less(token token) bool { return m < token.(murmur3Token) } // order preserving partitioner and token type orderedPartitioner struct{} type orderedToken []byte func (p orderedPartitioner) Name() string { return "OrderedPartitioner" } func (p orderedPartitioner) Hash(partitionKey []byte) token { // the partition key is the token return orderedToken(partitionKey) } func (p orderedPartitioner) ParseString(str string) token { return orderedToken([]byte(str)) } func (o orderedToken) String() string { return string([]byte(o)) } func (o orderedToken) Less(token token) bool { return -1 == bytes.Compare(o, token.(orderedToken)) } // random partitioner and token type randomPartitioner struct{} type randomToken big.Int func (r randomPartitioner) Name() string { return "RandomPartitioner" } // 2 ** 128 var maxHashInt, _ = new(big.Int).SetString("340282366920938463463374607431768211456", 10) func (p randomPartitioner) Hash(partitionKey []byte) token { sum := md5.Sum(partitionKey) val := new(big.Int) val.SetBytes(sum[:]) if sum[0] > 127 { val.Sub(val, maxHashInt) val.Abs(val) } return (*randomToken)(val) } func (p randomPartitioner) ParseString(str string) token { val := new(big.Int) val.SetString(str, 10) return (*randomToken)(val) } func (r *randomToken) String() string { return (*big.Int)(r).String() } func (r *randomToken) Less(token token) bool { return -1 == (*big.Int)(r).Cmp((*big.Int)(token.(*randomToken))) } // a data structure for organizing the relationship between tokens and hosts type tokenRing struct { partitioner partitioner tokens []token hosts []*HostInfo } func newTokenRing(partitioner string, hosts []*HostInfo) (*tokenRing, error) { tokenRing := &tokenRing{ tokens: []token{}, hosts: []*HostInfo{}, } if strings.HasSuffix(partitioner, "Murmur3Partitioner") { tokenRing.partitioner = murmur3Partitioner{} } else if strings.HasSuffix(partitioner, "OrderedPartitioner") { tokenRing.partitioner = orderedPartitioner{} } else if strings.HasSuffix(partitioner, "RandomPartitioner") { tokenRing.partitioner = randomPartitioner{} } else { return nil, fmt.Errorf("Unsupported partitioner '%s'", partitioner) } for _, host := range hosts { for _, strToken := range host.Tokens() { token := tokenRing.partitioner.ParseString(strToken) tokenRing.tokens = append(tokenRing.tokens, token) tokenRing.hosts = append(tokenRing.hosts, host) } } sort.Sort(tokenRing) return tokenRing, nil } func (t *tokenRing) Len() int { return len(t.tokens) } func (t *tokenRing) Less(i, j int) bool { return t.tokens[i].Less(t.tokens[j]) } func (t *tokenRing) Swap(i, j int) { t.tokens[i], t.hosts[i], t.tokens[j], t.hosts[j] = t.tokens[j], t.hosts[j], t.tokens[i], t.hosts[i] } func (t *tokenRing) String() string { buf := &bytes.Buffer{} buf.WriteString("TokenRing(") if t.partitioner != nil { buf.WriteString(t.partitioner.Name()) } buf.WriteString("){") sep := "" for i := range t.tokens { buf.WriteString(sep) sep = "," buf.WriteString("\n\t[") buf.WriteString(strconv.Itoa(i)) buf.WriteString("]") buf.WriteString(t.tokens[i].String()) buf.WriteString(":") buf.WriteString(t.hosts[i].Peer().String()) } buf.WriteString("\n}") return string(buf.Bytes()) } func (t *tokenRing) GetHostForPartitionKey(partitionKey []byte) *HostInfo { if t == nil { return nil } token := t.partitioner.Hash(partitionKey) return t.GetHostForToken(token) } func (t *tokenRing) GetHostForToken(token token) *HostInfo { if t == nil { return nil } // find the primary replica ringIndex := sort.Search( len(t.tokens), func(i int) bool { return !t.tokens[i].Less(token) }, ) if ringIndex == len(t.tokens) { // wrap around to the first in the ring ringIndex = 0 } host := t.hosts[ringIndex] return host } return nil if there are no tokens (i.e. no hosts available) // Copyright (c) 2015 The gocql Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gocql import ( "bytes" "crypto/md5" "fmt" "math/big" "sort" "strconv" "strings" "github.com/gocql/gocql/internal/murmur" ) // a token partitioner type partitioner interface { Name() string Hash([]byte) token ParseString(string) token } // a token type token interface { fmt.Stringer Less(token) bool } // murmur3 partitioner and token type murmur3Partitioner struct{} type murmur3Token int64 func (p murmur3Partitioner) Name() string { return "Murmur3Partitioner" } func (p murmur3Partitioner) Hash(partitionKey []byte) token { h1 := murmur.Murmur3H1(partitionKey) return murmur3Token(int64(h1)) } // murmur3 little-endian, 128-bit hash, but returns only h1 func (p murmur3Partitioner) ParseString(str string) token { val, _ := strconv.ParseInt(str, 10, 64) return murmur3Token(val) } func (m murmur3Token) String() string { return strconv.FormatInt(int64(m), 10) } func (m murmur3Token) Less(token token) bool { return m < token.(murmur3Token) } // order preserving partitioner and token type orderedPartitioner struct{} type orderedToken []byte func (p orderedPartitioner) Name() string { return "OrderedPartitioner" } func (p orderedPartitioner) Hash(partitionKey []byte) token { // the partition key is the token return orderedToken(partitionKey) } func (p orderedPartitioner) ParseString(str string) token { return orderedToken([]byte(str)) } func (o orderedToken) String() string { return string([]byte(o)) } func (o orderedToken) Less(token token) bool { return -1 == bytes.Compare(o, token.(orderedToken)) } // random partitioner and token type randomPartitioner struct{} type randomToken big.Int func (r randomPartitioner) Name() string { return "RandomPartitioner" } // 2 ** 128 var maxHashInt, _ = new(big.Int).SetString("340282366920938463463374607431768211456", 10) func (p randomPartitioner) Hash(partitionKey []byte) token { sum := md5.Sum(partitionKey) val := new(big.Int) val.SetBytes(sum[:]) if sum[0] > 127 { val.Sub(val, maxHashInt) val.Abs(val) } return (*randomToken)(val) } func (p randomPartitioner) ParseString(str string) token { val := new(big.Int) val.SetString(str, 10) return (*randomToken)(val) } func (r *randomToken) String() string { return (*big.Int)(r).String() } func (r *randomToken) Less(token token) bool { return -1 == (*big.Int)(r).Cmp((*big.Int)(token.(*randomToken))) } // a data structure for organizing the relationship between tokens and hosts type tokenRing struct { partitioner partitioner tokens []token hosts []*HostInfo } func newTokenRing(partitioner string, hosts []*HostInfo) (*tokenRing, error) { tokenRing := &tokenRing{ tokens: []token{}, hosts: []*HostInfo{}, } if strings.HasSuffix(partitioner, "Murmur3Partitioner") { tokenRing.partitioner = murmur3Partitioner{} } else if strings.HasSuffix(partitioner, "OrderedPartitioner") { tokenRing.partitioner = orderedPartitioner{} } else if strings.HasSuffix(partitioner, "RandomPartitioner") { tokenRing.partitioner = randomPartitioner{} } else { return nil, fmt.Errorf("Unsupported partitioner '%s'", partitioner) } for _, host := range hosts { for _, strToken := range host.Tokens() { token := tokenRing.partitioner.ParseString(strToken) tokenRing.tokens = append(tokenRing.tokens, token) tokenRing.hosts = append(tokenRing.hosts, host) } } sort.Sort(tokenRing) return tokenRing, nil } func (t *tokenRing) Len() int { return len(t.tokens) } func (t *tokenRing) Less(i, j int) bool { return t.tokens[i].Less(t.tokens[j]) } func (t *tokenRing) Swap(i, j int) { t.tokens[i], t.hosts[i], t.tokens[j], t.hosts[j] = t.tokens[j], t.hosts[j], t.tokens[i], t.hosts[i] } func (t *tokenRing) String() string { buf := &bytes.Buffer{} buf.WriteString("TokenRing(") if t.partitioner != nil { buf.WriteString(t.partitioner.Name()) } buf.WriteString("){") sep := "" for i := range t.tokens { buf.WriteString(sep) sep = "," buf.WriteString("\n\t[") buf.WriteString(strconv.Itoa(i)) buf.WriteString("]") buf.WriteString(t.tokens[i].String()) buf.WriteString(":") buf.WriteString(t.hosts[i].Peer().String()) } buf.WriteString("\n}") return string(buf.Bytes()) } func (t *tokenRing) GetHostForPartitionKey(partitionKey []byte) *HostInfo { if t == nil { return nil } token := t.partitioner.Hash(partitionKey) return t.GetHostForToken(token) } func (t *tokenRing) GetHostForToken(token token) *HostInfo { if t == nil { return nil } l := len(t.tokens) // no host tokens, no available hosts if l == 0{ return nil } // find the primary replica ringIndex := sort.Search( l, func(i int) bool { return !t.tokens[i].Less(token) }, ) if ringIndex == l { // wrap around to the first in the ring ringIndex = 0 } host := t.hosts[ringIndex] return host }
package chi import ( "bytes" "context" "fmt" "io" "io/ioutil" "net" "net/http" "net/http/httptest" "os" "sync" "testing" "time" ) func TestMuxBasic(t *testing.T) { var count uint64 countermw := func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { count++ next.ServeHTTP(w, r) }) } usermw := func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ctx = context.WithValue(ctx, ctxKey{"user"}, "peter") r = r.WithContext(ctx) next.ServeHTTP(w, r) }) } exmw := func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := context.WithValue(r.Context(), ctxKey{"ex"}, "a") r = r.WithContext(ctx) next.ServeHTTP(w, r) }) } logbuf := bytes.NewBufferString("") logmsg := "logmw test" logmw := func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { logbuf.WriteString(logmsg) next.ServeHTTP(w, r) }) } cxindex := func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() user := ctx.Value(ctxKey{"user"}).(string) w.WriteHeader(200) w.Write([]byte(fmt.Sprintf("hi %s", user))) } ping := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) w.Write([]byte(".")) } headPing := func(w http.ResponseWriter, r *http.Request) { w.Header().Set("X-Ping", "1") w.WriteHeader(200) } createPing := func(w http.ResponseWriter, r *http.Request) { // create .... w.WriteHeader(201) } pingAll := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) w.Write([]byte("ping all")) } pingAll2 := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) w.Write([]byte("ping all2")) } pingOne := func(w http.ResponseWriter, r *http.Request) { idParam := URLParam(r, "id") w.WriteHeader(200) w.Write([]byte(fmt.Sprintf("ping one id: %s", idParam))) } pingWoop := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) w.Write([]byte("woop." + URLParam(r, "iidd"))) } catchAll := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) w.Write([]byte("catchall")) } m := NewRouter() m.Use(countermw) m.Use(usermw) m.Use(exmw) m.Use(logmw) m.Get("/", cxindex) m.Method("GET", "/ping", http.HandlerFunc(ping)) m.MethodFunc("GET", "/pingall", pingAll) m.MethodFunc("get", "/ping/all", pingAll) m.Get("/ping/all2", pingAll2) m.Head("/ping", headPing) m.Post("/ping", createPing) m.Get("/ping/{id}", pingWoop) m.Get("/ping/{id}", pingOne) // expected to overwrite to pingOne handler m.Get("/ping/{iidd}/woop", pingWoop) m.HandleFunc("/admin/*", catchAll) // m.Post("/admin/*", catchAll) ts := httptest.NewServer(m) defer ts.Close() // GET / if _, body := testRequest(t, ts, "GET", "/", nil); body != "hi peter" { t.Fatalf(body) } tlogmsg, _ := logbuf.ReadString(0) if tlogmsg != logmsg { t.Error("expecting log message from middleware:", logmsg) } // GET /ping if _, body := testRequest(t, ts, "GET", "/ping", nil); body != "." { t.Fatalf(body) } // GET /pingall if _, body := testRequest(t, ts, "GET", "/pingall", nil); body != "ping all" { t.Fatalf(body) } // GET /ping/all if _, body := testRequest(t, ts, "GET", "/ping/all", nil); body != "ping all" { t.Fatalf(body) } // GET /ping/all2 if _, body := testRequest(t, ts, "GET", "/ping/all2", nil); body != "ping all2" { t.Fatalf(body) } // GET /ping/123 if _, body := testRequest(t, ts, "GET", "/ping/123", nil); body != "ping one id: 123" { t.Fatalf(body) } // GET /ping/allan if _, body := testRequest(t, ts, "GET", "/ping/allan", nil); body != "ping one id: allan" { t.Fatalf(body) } // GET /ping/1/woop if _, body := testRequest(t, ts, "GET", "/ping/1/woop", nil); body != "woop.1" { t.Fatalf(body) } // HEAD /ping resp, err := http.Head(ts.URL + "/ping") if err != nil { t.Fatal(err) } if resp.StatusCode != 200 { t.Error("head failed, should be 200") } if resp.Header.Get("X-Ping") == "" { t.Error("expecting X-Ping header") } // GET /admin/catch-this if _, body := testRequest(t, ts, "GET", "/admin/catch-thazzzzz", nil); body != "catchall" { t.Fatalf(body) } // POST /admin/catch-this resp, err = http.Post(ts.URL+"/admin/casdfsadfs", "text/plain", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } body, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != 200 { t.Error("POST failed, should be 200") } if string(body) != "catchall" { t.Error("expecting response body: 'catchall'") } // Custom http method DIE /ping/1/woop if resp, body := testRequest(t, ts, "DIE", "/ping/1/woop", nil); body != "" || resp.StatusCode != 405 { t.Fatalf(fmt.Sprintf("expecting 405 status and empty body, got %d '%s'", resp.StatusCode, body)) } } func TestMuxMounts(t *testing.T) { r := NewRouter() r.Get("/{hash}", func(w http.ResponseWriter, r *http.Request) { v := URLParam(r, "hash") w.Write([]byte(fmt.Sprintf("/%s", v))) }) r.Route("/{hash}/share", func(r Router) { r.Get("/", func(w http.ResponseWriter, r *http.Request) { v := URLParam(r, "hash") w.Write([]byte(fmt.Sprintf("/%s/share", v))) }) r.Get("/{network}", func(w http.ResponseWriter, r *http.Request) { v := URLParam(r, "hash") n := URLParam(r, "network") w.Write([]byte(fmt.Sprintf("/%s/share/%s", v, n))) }) }) m := NewRouter() m.Mount("/sharing", r) ts := httptest.NewServer(m) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/sharing/aBc", nil); body != "/aBc" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/sharing/aBc/share", nil); body != "/aBc/share" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/sharing/aBc/share/twitter", nil); body != "/aBc/share/twitter" { t.Fatalf(body) } } func TestMuxPlain(t *testing.T) { r := NewRouter() r.Get("/hi", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("bye")) }) r.NotFound(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) w.Write([]byte("nothing here")) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/hi", nil); body != "bye" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/nothing-here", nil); body != "nothing here" { t.Fatalf(body) } } func TestMuxEmptyRoutes(t *testing.T) { mux := NewRouter() apiRouter := NewRouter() // oops, we forgot to declare any route handlers mux.Handle("/api*", apiRouter) if _, body := testHandler(t, mux, "GET", "/", nil); body != "404 page not found\n" { t.Fatalf(body) } if _, body := testHandler(t, apiRouter, "GET", "/", nil); body != "404 page not found\n" { t.Fatalf(body) } } // Test a mux that routes a trailing slash, see also middleware/strip_test.go // for an example of using a middleware to handle trailing slashes. func TestMuxTrailingSlash(t *testing.T) { r := NewRouter() r.NotFound(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) w.Write([]byte("nothing here")) }) subRoutes := NewRouter() indexHandler := func(w http.ResponseWriter, r *http.Request) { accountID := URLParam(r, "accountID") w.Write([]byte(accountID)) } subRoutes.Get("/", indexHandler) r.Mount("/accounts/{accountID}", subRoutes) r.Get("/accounts/{accountID}/", indexHandler) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/accounts/admin", nil); body != "admin" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/accounts/admin/", nil); body != "admin" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/nothing-here", nil); body != "nothing here" { t.Fatalf(body) } } func TestMuxNestedNotFound(t *testing.T) { r := NewRouter() r.Use(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { r = r.WithContext(context.WithValue(r.Context(), ctxKey{"mw"}, "mw")) next.ServeHTTP(w, r) }) }) r.Get("/hi", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("bye")) }) r.With(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { r = r.WithContext(context.WithValue(r.Context(), ctxKey{"with"}, "with")) next.ServeHTTP(w, r) }) }).NotFound(func(w http.ResponseWriter, r *http.Request) { chkMw := r.Context().Value(ctxKey{"mw"}).(string) chkWith := r.Context().Value(ctxKey{"with"}).(string) w.WriteHeader(404) w.Write([]byte(fmt.Sprintf("root 404 %s %s", chkMw, chkWith))) }) sr1 := NewRouter() sr1.Get("/sub", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("sub")) }) sr1.Group(func(sr1 Router) { sr1.Use(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { r = r.WithContext(context.WithValue(r.Context(), ctxKey{"mw2"}, "mw2")) next.ServeHTTP(w, r) }) }) sr1.NotFound(func(w http.ResponseWriter, r *http.Request) { chkMw2 := r.Context().Value(ctxKey{"mw2"}).(string) w.WriteHeader(404) w.Write([]byte(fmt.Sprintf("sub 404 %s", chkMw2))) }) }) sr2 := NewRouter() sr2.Get("/sub", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("sub2")) }) r.Mount("/admin1", sr1) r.Mount("/admin2", sr2) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/hi", nil); body != "bye" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/nothing-here", nil); body != "root 404 mw with" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/admin1/sub", nil); body != "sub" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/admin1/nope", nil); body != "sub 404 mw2" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/admin2/sub", nil); body != "sub2" { t.Fatalf(body) } // Not found pages should bubble up to the root. if _, body := testRequest(t, ts, "GET", "/admin2/nope", nil); body != "root 404 mw with" { t.Fatalf(body) } } func TestMuxNestedMethodNotAllowed(t *testing.T) { r := NewRouter() r.Get("/root", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("root")) }) r.MethodNotAllowed(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(405) w.Write([]byte("root 405")) }) sr1 := NewRouter() sr1.Get("/sub1", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("sub1")) }) sr1.MethodNotAllowed(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(405) w.Write([]byte("sub1 405")) }) sr2 := NewRouter() sr2.Get("/sub2", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("sub2")) }) pathVar := NewRouter() pathVar.Get("/{var}", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("pv")) }) pathVar.MethodNotAllowed(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(405) w.Write([]byte("pv 405")) }) r.Mount("/prefix1", sr1) r.Mount("/prefix2", sr2) r.Mount("/pathVar", pathVar) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/root", nil); body != "root" { t.Fatalf(body) } if _, body := testRequest(t, ts, "PUT", "/root", nil); body != "root 405" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/prefix1/sub1", nil); body != "sub1" { t.Fatalf(body) } if _, body := testRequest(t, ts, "PUT", "/prefix1/sub1", nil); body != "sub1 405" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/prefix2/sub2", nil); body != "sub2" { t.Fatalf(body) } if _, body := testRequest(t, ts, "PUT", "/prefix2/sub2", nil); body != "root 405" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/pathVar/myvar", nil); body != "pv" { t.Fatalf(body) } if _, body := testRequest(t, ts, "DELETE", "/pathVar/myvar", nil); body != "pv 405" { t.Fatalf(body) } } func TestMuxComplicatedNotFound(t *testing.T) { decorateRouter := func(r *Mux) { // Root router with groups r.Get("/auth", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("auth get")) }) r.Route("/public", func(r Router) { r.Get("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("public get")) }) }) // sub router with groups sub0 := NewRouter() sub0.Route("/resource", func(r Router) { r.Get("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("private get")) }) }) r.Mount("/private", sub0) // sub router with groups sub1 := NewRouter() sub1.Route("/resource", func(r Router) { r.Get("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("private get")) }) }) r.With(func(next http.Handler) http.Handler { return next }).Mount("/private_mw", sub1) } testNotFound := func(t *testing.T, r *Mux) { ts := httptest.NewServer(r) defer ts.Close() // check that we didn't break correct routes if _, body := testRequest(t, ts, "GET", "/auth", nil); body != "auth get" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/public", nil); body != "public get" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/public/", nil); body != "public get" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/private/resource", nil); body != "private get" { t.Fatalf(body) } // check custom not-found on all levels if _, body := testRequest(t, ts, "GET", "/nope", nil); body != "custom not-found" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/public/nope", nil); body != "custom not-found" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/private/nope", nil); body != "custom not-found" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/private/resource/nope", nil); body != "custom not-found" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/private_mw/nope", nil); body != "custom not-found" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/private_mw/resource/nope", nil); body != "custom not-found" { t.Fatalf(body) } // check custom not-found on trailing slash routes if _, body := testRequest(t, ts, "GET", "/auth/", nil); body != "custom not-found" { t.Fatalf(body) } } t.Run("pre", func(t *testing.T) { r := NewRouter() r.NotFound(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("custom not-found")) }) decorateRouter(r) testNotFound(t, r) }) t.Run("post", func(t *testing.T) { r := NewRouter() decorateRouter(r) r.NotFound(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("custom not-found")) }) testNotFound(t, r) }) } func TestMuxWith(t *testing.T) { var cmwInit1, cmwHandler1 uint64 var cmwInit2, cmwHandler2 uint64 mw1 := func(next http.Handler) http.Handler { cmwInit1++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cmwHandler1++ r = r.WithContext(context.WithValue(r.Context(), ctxKey{"inline1"}, "yes")) next.ServeHTTP(w, r) }) } mw2 := func(next http.Handler) http.Handler { cmwInit2++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cmwHandler2++ r = r.WithContext(context.WithValue(r.Context(), ctxKey{"inline2"}, "yes")) next.ServeHTTP(w, r) }) } r := NewRouter() r.Get("/hi", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("bye")) }) r.With(mw1).With(mw2).Get("/inline", func(w http.ResponseWriter, r *http.Request) { v1 := r.Context().Value(ctxKey{"inline1"}).(string) v2 := r.Context().Value(ctxKey{"inline2"}).(string) w.Write([]byte(fmt.Sprintf("inline %s %s", v1, v2))) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/hi", nil); body != "bye" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/inline", nil); body != "inline yes yes" { t.Fatalf(body) } if cmwInit1 != 1 { t.Fatalf("expecting cmwInit1 to be 1, got %d", cmwInit1) } if cmwHandler1 != 1 { t.Fatalf("expecting cmwHandler1 to be 1, got %d", cmwHandler1) } if cmwInit2 != 1 { t.Fatalf("expecting cmwInit2 to be 1, got %d", cmwInit2) } if cmwHandler2 != 1 { t.Fatalf("expecting cmwHandler2 to be 1, got %d", cmwHandler2) } } func TestRouterFromMuxWith(t *testing.T) { t.Parallel() r := NewRouter() with := r.With(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) }) }) with.Get("/with_middleware", func(w http.ResponseWriter, r *http.Request) {}) ts := httptest.NewServer(with) defer ts.Close() // Without the fix this test was committed with, this causes a panic. testRequest(t, ts, http.MethodGet, "/with_middleware", nil) } func TestMuxMiddlewareStack(t *testing.T) { var stdmwInit, stdmwHandler uint64 stdmw := func(next http.Handler) http.Handler { stdmwInit++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { stdmwHandler++ next.ServeHTTP(w, r) }) } _ = stdmw var ctxmwInit, ctxmwHandler uint64 ctxmw := func(next http.Handler) http.Handler { ctxmwInit++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctxmwHandler++ ctx := r.Context() ctx = context.WithValue(ctx, ctxKey{"count.ctxmwHandler"}, ctxmwHandler) r = r.WithContext(ctx) next.ServeHTTP(w, r) }) } var inCtxmwInit, inCtxmwHandler uint64 inCtxmw := func(next http.Handler) http.Handler { inCtxmwInit++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { inCtxmwHandler++ next.ServeHTTP(w, r) }) } r := NewRouter() r.Use(stdmw) r.Use(ctxmw) r.Use(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/ping" { w.Write([]byte("pong")) return } next.ServeHTTP(w, r) }) }) var handlerCount uint64 r.With(inCtxmw).Get("/", func(w http.ResponseWriter, r *http.Request) { handlerCount++ ctx := r.Context() ctxmwHandlerCount := ctx.Value(ctxKey{"count.ctxmwHandler"}).(uint64) w.Write([]byte(fmt.Sprintf("inits:%d reqs:%d ctxValue:%d", ctxmwInit, handlerCount, ctxmwHandlerCount))) }) r.Get("/hi", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("wooot")) }) ts := httptest.NewServer(r) defer ts.Close() testRequest(t, ts, "GET", "/", nil) testRequest(t, ts, "GET", "/", nil) var body string _, body = testRequest(t, ts, "GET", "/", nil) if body != "inits:1 reqs:3 ctxValue:3" { t.Fatalf("got: '%s'", body) } _, body = testRequest(t, ts, "GET", "/ping", nil) if body != "pong" { t.Fatalf("got: '%s'", body) } } func TestMuxRouteGroups(t *testing.T) { var stdmwInit, stdmwHandler uint64 stdmw := func(next http.Handler) http.Handler { stdmwInit++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { stdmwHandler++ next.ServeHTTP(w, r) }) } var stdmwInit2, stdmwHandler2 uint64 stdmw2 := func(next http.Handler) http.Handler { stdmwInit2++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { stdmwHandler2++ next.ServeHTTP(w, r) }) } r := NewRouter() r.Group(func(r Router) { r.Use(stdmw) r.Get("/group", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("root group")) }) }) r.Group(func(r Router) { r.Use(stdmw2) r.Get("/group2", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("root group2")) }) }) ts := httptest.NewServer(r) defer ts.Close() // GET /group _, body := testRequest(t, ts, "GET", "/group", nil) if body != "root group" { t.Fatalf("got: '%s'", body) } if stdmwInit != 1 || stdmwHandler != 1 { t.Logf("stdmw counters failed, should be 1:1, got %d:%d", stdmwInit, stdmwHandler) } // GET /group2 _, body = testRequest(t, ts, "GET", "/group2", nil) if body != "root group2" { t.Fatalf("got: '%s'", body) } if stdmwInit2 != 1 || stdmwHandler2 != 1 { t.Fatalf("stdmw2 counters failed, should be 1:1, got %d:%d", stdmwInit2, stdmwHandler2) } } func TestMuxBig(t *testing.T) { r := bigMux() ts := httptest.NewServer(r) defer ts.Close() var body, expected string _, body = testRequest(t, ts, "GET", "/favicon.ico", nil) if body != "fav" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/hubs/4/view", nil) if body != "/hubs/4/view reqid:1 session:anonymous" { t.Fatalf("got '%v'", body) } _, body = testRequest(t, ts, "GET", "/hubs/4/view/index.html", nil) if body != "/hubs/4/view/index.html reqid:1 session:anonymous" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "POST", "/hubs/ethereumhub/view/index.html", nil) if body != "/hubs/ethereumhub/view/index.html reqid:1 session:anonymous" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/", nil) if body != "/ reqid:1 session:elvis" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/suggestions", nil) if body != "/suggestions reqid:1 session:elvis" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/woot/444/hiiii", nil) if body != "/woot/444/hiiii" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/hubs/123", nil) expected = "/hubs/123 reqid:1 session:elvis" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/hubs/123/touch", nil) if body != "/hubs/123/touch reqid:1 session:elvis" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/hubs/123/webhooks", nil) if body != "/hubs/123/webhooks reqid:1 session:elvis" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/hubs/123/posts", nil) if body != "/hubs/123/posts reqid:1 session:elvis" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/folders", nil) if body != "404 page not found\n" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/folders/", nil) if body != "/folders/ reqid:1 session:elvis" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/folders/public", nil) if body != "/folders/public reqid:1 session:elvis" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/folders/nothing", nil) if body != "404 page not found\n" { t.Fatalf("got '%s'", body) } } func bigMux() Router { var r *Mux var sr3 *Mux // var sr1, sr2, sr3, sr4, sr5, sr6 *Mux r = NewRouter() r.Use(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := context.WithValue(r.Context(), ctxKey{"requestID"}, "1") next.ServeHTTP(w, r.WithContext(ctx)) }) }) r.Use(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) }) }) r.Group(func(r Router) { r.Use(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := context.WithValue(r.Context(), ctxKey{"session.user"}, "anonymous") next.ServeHTTP(w, r.WithContext(ctx)) }) }) r.Get("/favicon.ico", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("fav")) }) r.Get("/hubs/{hubID}/view", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s/view reqid:%s session:%s", URLParam(r, "hubID"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) r.Get("/hubs/{hubID}/view/*", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s/view/%s reqid:%s session:%s", URLParamFromCtx(ctx, "hubID"), URLParam(r, "*"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) r.Post("/hubs/{hubSlug}/view/*", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s/view/%s reqid:%s session:%s", URLParamFromCtx(ctx, "hubSlug"), URLParam(r, "*"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) }) r.Group(func(r Router) { r.Use(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := context.WithValue(r.Context(), ctxKey{"session.user"}, "elvis") next.ServeHTTP(w, r.WithContext(ctx)) }) }) r.Get("/", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/ reqid:%s session:%s", ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) r.Get("/suggestions", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/suggestions reqid:%s session:%s", ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) r.Get("/woot/{wootID}/*", func(w http.ResponseWriter, r *http.Request) { s := fmt.Sprintf("/woot/%s/%s", URLParam(r, "wootID"), URLParam(r, "*")) w.Write([]byte(s)) }) r.Route("/hubs", func(r Router) { _ = r.(*Mux) // sr1 r.Route("/{hubID}", func(r Router) { _ = r.(*Mux) // sr2 r.Get("/", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s reqid:%s session:%s", URLParam(r, "hubID"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) r.Get("/touch", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s/touch reqid:%s session:%s", URLParam(r, "hubID"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) sr3 = NewRouter() sr3.Get("/", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s/webhooks reqid:%s session:%s", URLParam(r, "hubID"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) sr3.Route("/{webhookID}", func(r Router) { _ = r.(*Mux) // sr4 r.Get("/", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s/webhooks/%s reqid:%s session:%s", URLParam(r, "hubID"), URLParam(r, "webhookID"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) }) r.Mount("/webhooks", Chain(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), ctxKey{"hook"}, true))) }) }).Handler(sr3)) r.Route("/posts", func(r Router) { _ = r.(*Mux) // sr5 r.Get("/", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s/posts reqid:%s session:%s", URLParam(r, "hubID"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) }) }) }) r.Route("/folders/", func(r Router) { _ = r.(*Mux) // sr6 r.Get("/", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/folders/ reqid:%s session:%s", ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) r.Get("/public", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/folders/public reqid:%s session:%s", ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) }) }) return r } func TestMuxSubroutesBasic(t *testing.T) { hIndex := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("index")) }) hArticlesList := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("articles-list")) }) hSearchArticles := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("search-articles")) }) hGetArticle := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(fmt.Sprintf("get-article:%s", URLParam(r, "id")))) }) hSyncArticle := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(fmt.Sprintf("sync-article:%s", URLParam(r, "id")))) }) r := NewRouter() // var rr1, rr2 *Mux r.Get("/", hIndex) r.Route("/articles", func(r Router) { // rr1 = r.(*Mux) r.Get("/", hArticlesList) r.Get("/search", hSearchArticles) r.Route("/{id}", func(r Router) { // rr2 = r.(*Mux) r.Get("/", hGetArticle) r.Get("/sync", hSyncArticle) }) }) // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // debugPrintTree(0, 0, r.tree, 0) // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // debugPrintTree(0, 0, rr1.tree, 0) // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // debugPrintTree(0, 0, rr2.tree, 0) // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") ts := httptest.NewServer(r) defer ts.Close() var body, expected string _, body = testRequest(t, ts, "GET", "/", nil) expected = "index" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/articles", nil) expected = "articles-list" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/articles/search", nil) expected = "search-articles" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/articles/123", nil) expected = "get-article:123" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/articles/123/sync", nil) expected = "sync-article:123" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } } func TestMuxSubroutes(t *testing.T) { hHubView1 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("hub1")) }) hHubView2 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("hub2")) }) hHubView3 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("hub3")) }) hAccountView1 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("account1")) }) hAccountView2 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("account2")) }) r := NewRouter() r.Get("/hubs/{hubID}/view", hHubView1) r.Get("/hubs/{hubID}/view/*", hHubView2) sr := NewRouter() sr.Get("/", hHubView3) r.Mount("/hubs/{hubID}/users", sr) r.Get("/hubs/{hubID}/users/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("hub3 override")) }) sr3 := NewRouter() sr3.Get("/", hAccountView1) sr3.Get("/hi", hAccountView2) // var sr2 *Mux r.Route("/accounts/{accountID}", func(r Router) { _ = r.(*Mux) // sr2 // r.Get("/", hAccountView1) r.Mount("/", sr3) }) // This is the same as the r.Route() call mounted on sr2 // sr2 := NewRouter() // sr2.Mount("/", sr3) // r.Mount("/accounts/{accountID}", sr2) ts := httptest.NewServer(r) defer ts.Close() var body, expected string _, body = testRequest(t, ts, "GET", "/hubs/123/view", nil) expected = "hub1" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/hubs/123/view/index.html", nil) expected = "hub2" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/hubs/123/users", nil) expected = "hub3" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/hubs/123/users/", nil) expected = "hub3 override" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/accounts/44", nil) expected = "account1" if body != expected { t.Fatalf("request:%s expected:%s got:%s", "GET /accounts/44", expected, body) } _, body = testRequest(t, ts, "GET", "/accounts/44/hi", nil) expected = "account2" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } // Test that we're building the routingPatterns properly router := r req, _ := http.NewRequest("GET", "/accounts/44/hi", nil) rctx := NewRouteContext() req = req.WithContext(context.WithValue(req.Context(), RouteCtxKey, rctx)) w := httptest.NewRecorder() router.ServeHTTP(w, req) body = w.Body.String() expected = "account2" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } routePatterns := rctx.RoutePatterns if len(rctx.RoutePatterns) != 3 { t.Fatalf("expected 3 routing patterns, got:%d", len(rctx.RoutePatterns)) } expected = "/accounts/{accountID}/*" if routePatterns[0] != expected { t.Fatalf("routePattern, expected:%s got:%s", expected, routePatterns[0]) } expected = "/*" if routePatterns[1] != expected { t.Fatalf("routePattern, expected:%s got:%s", expected, routePatterns[1]) } expected = "/hi" if routePatterns[2] != expected { t.Fatalf("routePattern, expected:%s got:%s", expected, routePatterns[2]) } } func TestSingleHandler(t *testing.T) { h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { name := URLParam(r, "name") w.Write([]byte("hi " + name)) }) r, _ := http.NewRequest("GET", "/", nil) rctx := NewRouteContext() r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx)) rctx.URLParams.Add("name", "joe") w := httptest.NewRecorder() h.ServeHTTP(w, r) body := w.Body.String() expected := "hi joe" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } } // TODO: a Router wrapper test.. // // type ACLMux struct { // *Mux // XX string // } // // func NewACLMux() *ACLMux { // return &ACLMux{Mux: NewRouter(), XX: "hihi"} // } // // // TODO: this should be supported... // func TestWoot(t *testing.T) { // var r Router = NewRouter() // // var r2 Router = NewACLMux() //NewRouter() // r2.Get("/hi", func(w http.ResponseWriter, r *http.Request) { // w.Write([]byte("hi")) // }) // // r.Mount("/", r2) // } func TestServeHTTPExistingContext(t *testing.T) { r := NewRouter() r.Get("/hi", func(w http.ResponseWriter, r *http.Request) { s, _ := r.Context().Value(ctxKey{"testCtx"}).(string) w.Write([]byte(s)) }) r.NotFound(func(w http.ResponseWriter, r *http.Request) { s, _ := r.Context().Value(ctxKey{"testCtx"}).(string) w.WriteHeader(404) w.Write([]byte(s)) }) testcases := []struct { Method string Path string Ctx context.Context ExpectedStatus int ExpectedBody string }{ { Method: "GET", Path: "/hi", Ctx: context.WithValue(context.Background(), ctxKey{"testCtx"}, "hi ctx"), ExpectedStatus: 200, ExpectedBody: "hi ctx", }, { Method: "GET", Path: "/hello", Ctx: context.WithValue(context.Background(), ctxKey{"testCtx"}, "nothing here ctx"), ExpectedStatus: 404, ExpectedBody: "nothing here ctx", }, } for _, tc := range testcases { resp := httptest.NewRecorder() req, err := http.NewRequest(tc.Method, tc.Path, nil) if err != nil { t.Fatalf("%v", err) } req = req.WithContext(tc.Ctx) r.ServeHTTP(resp, req) b, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("%v", err) } if resp.Code != tc.ExpectedStatus { t.Fatalf("%v != %v", tc.ExpectedStatus, resp.Code) } if string(b) != tc.ExpectedBody { t.Fatalf("%s != %s", tc.ExpectedBody, b) } } } func TestNestedGroups(t *testing.T) { handlerPrintCounter := func(w http.ResponseWriter, r *http.Request) { counter, _ := r.Context().Value(ctxKey{"counter"}).(int) w.Write([]byte(fmt.Sprintf("%v", counter))) } mwIncreaseCounter := func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() counter, _ := ctx.Value(ctxKey{"counter"}).(int) counter++ ctx = context.WithValue(ctx, ctxKey{"counter"}, counter) next.ServeHTTP(w, r.WithContext(ctx)) }) } // Each route represents value of its counter (number of applied middlewares). r := NewRouter() // counter == 0 r.Get("/0", handlerPrintCounter) r.Group(func(r Router) { r.Use(mwIncreaseCounter) // counter == 1 r.Get("/1", handlerPrintCounter) // r.Handle(GET, "/2", Chain(mwIncreaseCounter).HandlerFunc(handlerPrintCounter)) r.With(mwIncreaseCounter).Get("/2", handlerPrintCounter) r.Group(func(r Router) { r.Use(mwIncreaseCounter, mwIncreaseCounter) // counter == 3 r.Get("/3", handlerPrintCounter) }) r.Route("/", func(r Router) { r.Use(mwIncreaseCounter, mwIncreaseCounter) // counter == 3 // r.Handle(GET, "/4", Chain(mwIncreaseCounter).HandlerFunc(handlerPrintCounter)) r.With(mwIncreaseCounter).Get("/4", handlerPrintCounter) r.Group(func(r Router) { r.Use(mwIncreaseCounter, mwIncreaseCounter) // counter == 5 r.Get("/5", handlerPrintCounter) // r.Handle(GET, "/6", Chain(mwIncreaseCounter).HandlerFunc(handlerPrintCounter)) r.With(mwIncreaseCounter).Get("/6", handlerPrintCounter) }) }) }) ts := httptest.NewServer(r) defer ts.Close() for _, route := range []string{"0", "1", "2", "3", "4", "5", "6"} { if _, body := testRequest(t, ts, "GET", "/"+route, nil); body != route { t.Errorf("expected %v, got %v", route, body) } } } func TestMiddlewarePanicOnLateUse(t *testing.T) { handler := func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("hello\n")) } mw := func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) }) } defer func() { if recover() == nil { t.Error("expected panic()") } }() r := NewRouter() r.Get("/", handler) r.Use(mw) // Too late to apply middleware, we're expecting panic(). } func TestMountingExistingPath(t *testing.T) { handler := func(w http.ResponseWriter, r *http.Request) {} defer func() { if recover() == nil { t.Error("expected panic()") } }() r := NewRouter() r.Get("/", handler) r.Mount("/hi", http.HandlerFunc(handler)) r.Mount("/hi", http.HandlerFunc(handler)) } func TestMountingSimilarPattern(t *testing.T) { r := NewRouter() r.Get("/hi", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("bye")) }) r2 := NewRouter() r2.Get("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("foobar")) }) r3 := NewRouter() r3.Get("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("foo")) }) r.Mount("/foobar", r2) r.Mount("/foo", r3) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/hi", nil); body != "bye" { t.Fatalf(body) } } func TestMuxEmptyParams(t *testing.T) { r := NewRouter() r.Get(`/users/{x}/{y}/{z}`, func(w http.ResponseWriter, r *http.Request) { x := URLParam(r, "x") y := URLParam(r, "y") z := URLParam(r, "z") w.Write([]byte(fmt.Sprintf("%s-%s-%s", x, y, z))) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/users/a/b/c", nil); body != "a-b-c" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/users///c", nil); body != "--c" { t.Fatalf(body) } } func TestMuxMissingParams(t *testing.T) { r := NewRouter() r.Get(`/user/{userId:\d+}`, func(w http.ResponseWriter, r *http.Request) { userID := URLParam(r, "userId") w.Write([]byte(fmt.Sprintf("userId = '%s'", userID))) }) r.NotFound(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) w.Write([]byte("nothing here")) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/user/123", nil); body != "userId = '123'" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/user/", nil); body != "nothing here" { t.Fatalf(body) } } func TestMuxWildcardRoute(t *testing.T) { handler := func(w http.ResponseWriter, r *http.Request) {} defer func() { if recover() == nil { t.Error("expected panic()") } }() r := NewRouter() r.Get("/*/wildcard/must/be/at/end", handler) } func TestMuxWildcardRouteCheckTwo(t *testing.T) { handler := func(w http.ResponseWriter, r *http.Request) {} defer func() { if recover() == nil { t.Error("expected panic()") } }() r := NewRouter() r.Get("/*/wildcard/{must}/be/at/end", handler) } func TestMuxRegexp(t *testing.T) { r := NewRouter() r.Route("/{param:[0-9]*}/test", func(r Router) { r.Get("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(fmt.Sprintf("Hi: %s", URLParam(r, "param")))) }) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "//test", nil); body != "Hi: " { t.Fatalf(body) } } func TestMuxRegexp2(t *testing.T) { r := NewRouter() r.Get("/foo-{suffix:[a-z]{2,3}}.json", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(URLParam(r, "suffix"))) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/foo-.json", nil); body != "" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/foo-abc.json", nil); body != "abc" { t.Fatalf(body) } } func TestMuxRegexp3(t *testing.T) { r := NewRouter() r.Get("/one/{firstId:[a-z0-9-]+}/{secondId:[a-z]+}/first", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("first")) }) r.Get("/one/{firstId:[a-z0-9-_]+}/{secondId:[0-9]+}/second", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("second")) }) r.Delete("/one/{firstId:[a-z0-9-_]+}/{secondId:[0-9]+}/second", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("third")) }) r.Route("/one", func(r Router) { r.Get("/{dns:[a-z-0-9_]+}", func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte("_")) }) r.Get("/{dns:[a-z-0-9_]+}/info", func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte("_")) }) r.Delete("/{id:[0-9]+}", func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte("forth")) }) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/one/hello/peter/first", nil); body != "first" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/one/hithere/123/second", nil); body != "second" { t.Fatalf(body) } if _, body := testRequest(t, ts, "DELETE", "/one/hithere/123/second", nil); body != "third" { t.Fatalf(body) } if _, body := testRequest(t, ts, "DELETE", "/one/123", nil); body != "forth" { t.Fatalf(body) } } func TestMuxSubrouterWildcardParam(t *testing.T) { h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "param:%v *:%v", URLParam(r, "param"), URLParam(r, "*")) }) r := NewRouter() r.Get("/bare/{param}", h) r.Get("/bare/{param}/*", h) r.Route("/case0", func(r Router) { r.Get("/{param}", h) r.Get("/{param}/*", h) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/bare/hi", nil); body != "param:hi *:" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/bare/hi/yes", nil); body != "param:hi *:yes" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/case0/hi", nil); body != "param:hi *:" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/case0/hi/yes", nil); body != "param:hi *:yes" { t.Fatalf(body) } } func TestMuxContextIsThreadSafe(t *testing.T) { router := NewRouter() router.Get("/{id}", func(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithTimeout(r.Context(), 1*time.Millisecond) defer cancel() <-ctx.Done() }) wg := sync.WaitGroup{} for i := 0; i < 100; i++ { wg.Add(1) go func() { defer wg.Done() for j := 0; j < 10000; j++ { w := httptest.NewRecorder() r, err := http.NewRequest("GET", "/ok", nil) if err != nil { t.Fatal(err) } ctx, cancel := context.WithCancel(r.Context()) r = r.WithContext(ctx) go func() { cancel() }() router.ServeHTTP(w, r) } }() } wg.Wait() } func TestEscapedURLParams(t *testing.T) { m := NewRouter() m.Get("/api/{identifier}/{region}/{size}/{rotation}/*", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) rctx := RouteContext(r.Context()) if rctx == nil { t.Error("no context") return } identifier := URLParam(r, "identifier") if identifier != "http:%2f%2fexample.com%2fimage.png" { t.Errorf("identifier path parameter incorrect %s", identifier) return } region := URLParam(r, "region") if region != "full" { t.Errorf("region path parameter incorrect %s", region) return } size := URLParam(r, "size") if size != "max" { t.Errorf("size path parameter incorrect %s", size) return } rotation := URLParam(r, "rotation") if rotation != "0" { t.Errorf("rotation path parameter incorrect %s", rotation) return } w.Write([]byte("success")) }) ts := httptest.NewServer(m) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/api/http:%2f%2fexample.com%2fimage.png/full/max/0/color.png", nil); body != "success" { t.Fatalf(body) } } func TestMuxMatch(t *testing.T) { r := NewRouter() r.Get("/hi", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("X-Test", "yes") w.Write([]byte("bye")) }) r.Route("/articles", func(r Router) { r.Get("/{id}", func(w http.ResponseWriter, r *http.Request) { id := URLParam(r, "id") w.Header().Set("X-Article", id) w.Write([]byte("article:" + id)) }) }) r.Route("/users", func(r Router) { r.Head("/{id}", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("X-User", "-") w.Write([]byte("user")) }) r.Get("/{id}", func(w http.ResponseWriter, r *http.Request) { id := URLParam(r, "id") w.Header().Set("X-User", id) w.Write([]byte("user:" + id)) }) }) tctx := NewRouteContext() tctx.Reset() if r.Match(tctx, "GET", "/users/1") == false { t.Fatal("expecting to find match for route:", "GET", "/users/1") } tctx.Reset() if r.Match(tctx, "HEAD", "/articles/10") == true { t.Fatal("not expecting to find match for route:", "HEAD", "/articles/10") } } func TestServerBaseContext(t *testing.T) { r := NewRouter() r.Get("/", func(w http.ResponseWriter, r *http.Request) { baseYes := r.Context().Value(ctxKey{"base"}).(string) if _, ok := r.Context().Value(http.ServerContextKey).(*http.Server); !ok { panic("missing server context") } if _, ok := r.Context().Value(http.LocalAddrContextKey).(net.Addr); !ok { panic("missing local addr context") } w.Write([]byte(baseYes)) }) // Setup http Server with a base context ctx := context.WithValue(context.Background(), ctxKey{"base"}, "yes") ts := httptest.NewServer(r) ts.Config.BaseContext = func(_ net.Listener) context.Context { return ctx } defer ts.Close() if _, body := testRequest(t, ts, "GET", "/", nil); body != "yes" { t.Fatalf(body) } } func testRequest(t *testing.T, ts *httptest.Server, method, path string, body io.Reader) (*http.Response, string) { req, err := http.NewRequest(method, ts.URL+path, body) if err != nil { t.Fatal(err) return nil, "" } resp, err := http.DefaultClient.Do(req) if err != nil { t.Fatal(err) return nil, "" } respBody, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatal(err) return nil, "" } defer resp.Body.Close() return resp, string(respBody) } func testHandler(t *testing.T, h http.Handler, method, path string, body io.Reader) (*http.Response, string) { r, _ := http.NewRequest(method, path, body) w := httptest.NewRecorder() h.ServeHTTP(w, r) return w.Result(), w.Body.String() } type testFileSystem struct { open func(name string) (http.File, error) } func (fs *testFileSystem) Open(name string) (http.File, error) { return fs.open(name) } type testFile struct { name string contents []byte } func (tf *testFile) Close() error { return nil } func (tf *testFile) Read(p []byte) (n int, err error) { copy(p, tf.contents) return len(p), nil } func (tf *testFile) Seek(offset int64, whence int) (int64, error) { return 0, nil } func (tf *testFile) Readdir(count int) ([]os.FileInfo, error) { stat, _ := tf.Stat() return []os.FileInfo{stat}, nil } func (tf *testFile) Stat() (os.FileInfo, error) { return &testFileInfo{tf.name, int64(len(tf.contents))}, nil } type testFileInfo struct { name string size int64 } func (tfi *testFileInfo) Name() string { return tfi.name } func (tfi *testFileInfo) Size() int64 { return tfi.size } func (tfi *testFileInfo) Mode() os.FileMode { return 0755 } func (tfi *testFileInfo) ModTime() time.Time { return time.Now() } func (tfi *testFileInfo) IsDir() bool { return false } func (tfi *testFileInfo) Sys() interface{} { return nil } type ctxKey struct { name string } func (k ctxKey) String() string { return "context value " + k.name } func BenchmarkMux(b *testing.B) { h1 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) h2 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) h3 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) h4 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) h5 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) h6 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) mx := NewRouter() mx.Get("/", h1) mx.Get("/hi", h2) mx.Get("/sup/{id}/and/{this}", h3) mx.Get("/sup/{id}/{bar:foo}/{this}", h3) mx.Route("/sharing/{x}/{hash}", func(mx Router) { mx.Get("/", h4) // subrouter-1 mx.Get("/{network}", h5) // subrouter-1 mx.Get("/twitter", h5) mx.Route("/direct", func(mx Router) { mx.Get("/", h6) // subrouter-2 mx.Get("/download", h6) }) }) routes := []string{ "/", "/hi", "/sup/123/and/this", "/sup/123/foo/this", "/sharing/z/aBc", // subrouter-1 "/sharing/z/aBc/twitter", // subrouter-1 "/sharing/z/aBc/direct", // subrouter-2 "/sharing/z/aBc/direct/download", // subrouter-2 } for _, path := range routes { b.Run("route:"+path, func(b *testing.B) { w := httptest.NewRecorder() r, _ := http.NewRequest("GET", path, nil) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { mx.ServeHTTP(w, r) } }) } } Fix data race in TestServerBaseContext (#575) Update the test server config before starting it, to fix data race in TestServerBaseContext: $ go test -race ================== WARNING: DATA RACE Read at 0x00c00041c068 by goroutine 500: net/http.(*Server).Serve() /usr/lib/golang/src/net/http/server.go:2919 +0x325 net/http/httptest.(*Server).goServe.func1() /usr/lib/golang/src/net/http/httptest/server.go:308 +0xd3 Previous write at 0x00c00041c068 by goroutine 1298: github.com/go-chi/chi.TestServerBaseContext() /home/rantala/git/chi/mux_test.go:1687 +0x202 testing.tRunner() /usr/lib/golang/src/testing/testing.go:1050 +0x1eb Goroutine 500 (running) created at: net/http/httptest.(*Server).goServe() /usr/lib/golang/src/net/http/httptest/server.go:306 +0x69 net/http/httptest.(*Server).Start() /usr/lib/golang/src/net/http/httptest/server.go:132 +0x15c net/http/httptest.NewServer() /usr/lib/golang/src/net/http/httptest/server.go:105 +0x1a2 github.com/go-chi/chi.TestServerBaseContext() /home/rantala/git/chi/mux_test.go:1686 +0x144 testing.tRunner() /usr/lib/golang/src/testing/testing.go:1050 +0x1eb Goroutine 1298 (running) created at: testing.(*T).Run() /usr/lib/golang/src/testing/testing.go:1095 +0x537 testing.runTests.func1() /usr/lib/golang/src/testing/testing.go:1339 +0xa6 testing.tRunner() /usr/lib/golang/src/testing/testing.go:1050 +0x1eb testing.runTests() /usr/lib/golang/src/testing/testing.go:1337 +0x594 testing.(*M).Run() /usr/lib/golang/src/testing/testing.go:1252 +0x2ff main.main() _testmain.go:128 +0x223 ================== --- FAIL: TestServerBaseContext (0.00s) testing.go:965: race detected during execution of test FAIL package chi import ( "bytes" "context" "fmt" "io" "io/ioutil" "net" "net/http" "net/http/httptest" "os" "sync" "testing" "time" ) func TestMuxBasic(t *testing.T) { var count uint64 countermw := func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { count++ next.ServeHTTP(w, r) }) } usermw := func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ctx = context.WithValue(ctx, ctxKey{"user"}, "peter") r = r.WithContext(ctx) next.ServeHTTP(w, r) }) } exmw := func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := context.WithValue(r.Context(), ctxKey{"ex"}, "a") r = r.WithContext(ctx) next.ServeHTTP(w, r) }) } logbuf := bytes.NewBufferString("") logmsg := "logmw test" logmw := func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { logbuf.WriteString(logmsg) next.ServeHTTP(w, r) }) } cxindex := func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() user := ctx.Value(ctxKey{"user"}).(string) w.WriteHeader(200) w.Write([]byte(fmt.Sprintf("hi %s", user))) } ping := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) w.Write([]byte(".")) } headPing := func(w http.ResponseWriter, r *http.Request) { w.Header().Set("X-Ping", "1") w.WriteHeader(200) } createPing := func(w http.ResponseWriter, r *http.Request) { // create .... w.WriteHeader(201) } pingAll := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) w.Write([]byte("ping all")) } pingAll2 := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) w.Write([]byte("ping all2")) } pingOne := func(w http.ResponseWriter, r *http.Request) { idParam := URLParam(r, "id") w.WriteHeader(200) w.Write([]byte(fmt.Sprintf("ping one id: %s", idParam))) } pingWoop := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) w.Write([]byte("woop." + URLParam(r, "iidd"))) } catchAll := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) w.Write([]byte("catchall")) } m := NewRouter() m.Use(countermw) m.Use(usermw) m.Use(exmw) m.Use(logmw) m.Get("/", cxindex) m.Method("GET", "/ping", http.HandlerFunc(ping)) m.MethodFunc("GET", "/pingall", pingAll) m.MethodFunc("get", "/ping/all", pingAll) m.Get("/ping/all2", pingAll2) m.Head("/ping", headPing) m.Post("/ping", createPing) m.Get("/ping/{id}", pingWoop) m.Get("/ping/{id}", pingOne) // expected to overwrite to pingOne handler m.Get("/ping/{iidd}/woop", pingWoop) m.HandleFunc("/admin/*", catchAll) // m.Post("/admin/*", catchAll) ts := httptest.NewServer(m) defer ts.Close() // GET / if _, body := testRequest(t, ts, "GET", "/", nil); body != "hi peter" { t.Fatalf(body) } tlogmsg, _ := logbuf.ReadString(0) if tlogmsg != logmsg { t.Error("expecting log message from middleware:", logmsg) } // GET /ping if _, body := testRequest(t, ts, "GET", "/ping", nil); body != "." { t.Fatalf(body) } // GET /pingall if _, body := testRequest(t, ts, "GET", "/pingall", nil); body != "ping all" { t.Fatalf(body) } // GET /ping/all if _, body := testRequest(t, ts, "GET", "/ping/all", nil); body != "ping all" { t.Fatalf(body) } // GET /ping/all2 if _, body := testRequest(t, ts, "GET", "/ping/all2", nil); body != "ping all2" { t.Fatalf(body) } // GET /ping/123 if _, body := testRequest(t, ts, "GET", "/ping/123", nil); body != "ping one id: 123" { t.Fatalf(body) } // GET /ping/allan if _, body := testRequest(t, ts, "GET", "/ping/allan", nil); body != "ping one id: allan" { t.Fatalf(body) } // GET /ping/1/woop if _, body := testRequest(t, ts, "GET", "/ping/1/woop", nil); body != "woop.1" { t.Fatalf(body) } // HEAD /ping resp, err := http.Head(ts.URL + "/ping") if err != nil { t.Fatal(err) } if resp.StatusCode != 200 { t.Error("head failed, should be 200") } if resp.Header.Get("X-Ping") == "" { t.Error("expecting X-Ping header") } // GET /admin/catch-this if _, body := testRequest(t, ts, "GET", "/admin/catch-thazzzzz", nil); body != "catchall" { t.Fatalf(body) } // POST /admin/catch-this resp, err = http.Post(ts.URL+"/admin/casdfsadfs", "text/plain", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } body, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != 200 { t.Error("POST failed, should be 200") } if string(body) != "catchall" { t.Error("expecting response body: 'catchall'") } // Custom http method DIE /ping/1/woop if resp, body := testRequest(t, ts, "DIE", "/ping/1/woop", nil); body != "" || resp.StatusCode != 405 { t.Fatalf(fmt.Sprintf("expecting 405 status and empty body, got %d '%s'", resp.StatusCode, body)) } } func TestMuxMounts(t *testing.T) { r := NewRouter() r.Get("/{hash}", func(w http.ResponseWriter, r *http.Request) { v := URLParam(r, "hash") w.Write([]byte(fmt.Sprintf("/%s", v))) }) r.Route("/{hash}/share", func(r Router) { r.Get("/", func(w http.ResponseWriter, r *http.Request) { v := URLParam(r, "hash") w.Write([]byte(fmt.Sprintf("/%s/share", v))) }) r.Get("/{network}", func(w http.ResponseWriter, r *http.Request) { v := URLParam(r, "hash") n := URLParam(r, "network") w.Write([]byte(fmt.Sprintf("/%s/share/%s", v, n))) }) }) m := NewRouter() m.Mount("/sharing", r) ts := httptest.NewServer(m) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/sharing/aBc", nil); body != "/aBc" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/sharing/aBc/share", nil); body != "/aBc/share" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/sharing/aBc/share/twitter", nil); body != "/aBc/share/twitter" { t.Fatalf(body) } } func TestMuxPlain(t *testing.T) { r := NewRouter() r.Get("/hi", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("bye")) }) r.NotFound(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) w.Write([]byte("nothing here")) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/hi", nil); body != "bye" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/nothing-here", nil); body != "nothing here" { t.Fatalf(body) } } func TestMuxEmptyRoutes(t *testing.T) { mux := NewRouter() apiRouter := NewRouter() // oops, we forgot to declare any route handlers mux.Handle("/api*", apiRouter) if _, body := testHandler(t, mux, "GET", "/", nil); body != "404 page not found\n" { t.Fatalf(body) } if _, body := testHandler(t, apiRouter, "GET", "/", nil); body != "404 page not found\n" { t.Fatalf(body) } } // Test a mux that routes a trailing slash, see also middleware/strip_test.go // for an example of using a middleware to handle trailing slashes. func TestMuxTrailingSlash(t *testing.T) { r := NewRouter() r.NotFound(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) w.Write([]byte("nothing here")) }) subRoutes := NewRouter() indexHandler := func(w http.ResponseWriter, r *http.Request) { accountID := URLParam(r, "accountID") w.Write([]byte(accountID)) } subRoutes.Get("/", indexHandler) r.Mount("/accounts/{accountID}", subRoutes) r.Get("/accounts/{accountID}/", indexHandler) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/accounts/admin", nil); body != "admin" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/accounts/admin/", nil); body != "admin" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/nothing-here", nil); body != "nothing here" { t.Fatalf(body) } } func TestMuxNestedNotFound(t *testing.T) { r := NewRouter() r.Use(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { r = r.WithContext(context.WithValue(r.Context(), ctxKey{"mw"}, "mw")) next.ServeHTTP(w, r) }) }) r.Get("/hi", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("bye")) }) r.With(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { r = r.WithContext(context.WithValue(r.Context(), ctxKey{"with"}, "with")) next.ServeHTTP(w, r) }) }).NotFound(func(w http.ResponseWriter, r *http.Request) { chkMw := r.Context().Value(ctxKey{"mw"}).(string) chkWith := r.Context().Value(ctxKey{"with"}).(string) w.WriteHeader(404) w.Write([]byte(fmt.Sprintf("root 404 %s %s", chkMw, chkWith))) }) sr1 := NewRouter() sr1.Get("/sub", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("sub")) }) sr1.Group(func(sr1 Router) { sr1.Use(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { r = r.WithContext(context.WithValue(r.Context(), ctxKey{"mw2"}, "mw2")) next.ServeHTTP(w, r) }) }) sr1.NotFound(func(w http.ResponseWriter, r *http.Request) { chkMw2 := r.Context().Value(ctxKey{"mw2"}).(string) w.WriteHeader(404) w.Write([]byte(fmt.Sprintf("sub 404 %s", chkMw2))) }) }) sr2 := NewRouter() sr2.Get("/sub", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("sub2")) }) r.Mount("/admin1", sr1) r.Mount("/admin2", sr2) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/hi", nil); body != "bye" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/nothing-here", nil); body != "root 404 mw with" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/admin1/sub", nil); body != "sub" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/admin1/nope", nil); body != "sub 404 mw2" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/admin2/sub", nil); body != "sub2" { t.Fatalf(body) } // Not found pages should bubble up to the root. if _, body := testRequest(t, ts, "GET", "/admin2/nope", nil); body != "root 404 mw with" { t.Fatalf(body) } } func TestMuxNestedMethodNotAllowed(t *testing.T) { r := NewRouter() r.Get("/root", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("root")) }) r.MethodNotAllowed(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(405) w.Write([]byte("root 405")) }) sr1 := NewRouter() sr1.Get("/sub1", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("sub1")) }) sr1.MethodNotAllowed(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(405) w.Write([]byte("sub1 405")) }) sr2 := NewRouter() sr2.Get("/sub2", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("sub2")) }) pathVar := NewRouter() pathVar.Get("/{var}", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("pv")) }) pathVar.MethodNotAllowed(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(405) w.Write([]byte("pv 405")) }) r.Mount("/prefix1", sr1) r.Mount("/prefix2", sr2) r.Mount("/pathVar", pathVar) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/root", nil); body != "root" { t.Fatalf(body) } if _, body := testRequest(t, ts, "PUT", "/root", nil); body != "root 405" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/prefix1/sub1", nil); body != "sub1" { t.Fatalf(body) } if _, body := testRequest(t, ts, "PUT", "/prefix1/sub1", nil); body != "sub1 405" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/prefix2/sub2", nil); body != "sub2" { t.Fatalf(body) } if _, body := testRequest(t, ts, "PUT", "/prefix2/sub2", nil); body != "root 405" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/pathVar/myvar", nil); body != "pv" { t.Fatalf(body) } if _, body := testRequest(t, ts, "DELETE", "/pathVar/myvar", nil); body != "pv 405" { t.Fatalf(body) } } func TestMuxComplicatedNotFound(t *testing.T) { decorateRouter := func(r *Mux) { // Root router with groups r.Get("/auth", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("auth get")) }) r.Route("/public", func(r Router) { r.Get("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("public get")) }) }) // sub router with groups sub0 := NewRouter() sub0.Route("/resource", func(r Router) { r.Get("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("private get")) }) }) r.Mount("/private", sub0) // sub router with groups sub1 := NewRouter() sub1.Route("/resource", func(r Router) { r.Get("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("private get")) }) }) r.With(func(next http.Handler) http.Handler { return next }).Mount("/private_mw", sub1) } testNotFound := func(t *testing.T, r *Mux) { ts := httptest.NewServer(r) defer ts.Close() // check that we didn't break correct routes if _, body := testRequest(t, ts, "GET", "/auth", nil); body != "auth get" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/public", nil); body != "public get" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/public/", nil); body != "public get" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/private/resource", nil); body != "private get" { t.Fatalf(body) } // check custom not-found on all levels if _, body := testRequest(t, ts, "GET", "/nope", nil); body != "custom not-found" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/public/nope", nil); body != "custom not-found" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/private/nope", nil); body != "custom not-found" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/private/resource/nope", nil); body != "custom not-found" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/private_mw/nope", nil); body != "custom not-found" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/private_mw/resource/nope", nil); body != "custom not-found" { t.Fatalf(body) } // check custom not-found on trailing slash routes if _, body := testRequest(t, ts, "GET", "/auth/", nil); body != "custom not-found" { t.Fatalf(body) } } t.Run("pre", func(t *testing.T) { r := NewRouter() r.NotFound(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("custom not-found")) }) decorateRouter(r) testNotFound(t, r) }) t.Run("post", func(t *testing.T) { r := NewRouter() decorateRouter(r) r.NotFound(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("custom not-found")) }) testNotFound(t, r) }) } func TestMuxWith(t *testing.T) { var cmwInit1, cmwHandler1 uint64 var cmwInit2, cmwHandler2 uint64 mw1 := func(next http.Handler) http.Handler { cmwInit1++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cmwHandler1++ r = r.WithContext(context.WithValue(r.Context(), ctxKey{"inline1"}, "yes")) next.ServeHTTP(w, r) }) } mw2 := func(next http.Handler) http.Handler { cmwInit2++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cmwHandler2++ r = r.WithContext(context.WithValue(r.Context(), ctxKey{"inline2"}, "yes")) next.ServeHTTP(w, r) }) } r := NewRouter() r.Get("/hi", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("bye")) }) r.With(mw1).With(mw2).Get("/inline", func(w http.ResponseWriter, r *http.Request) { v1 := r.Context().Value(ctxKey{"inline1"}).(string) v2 := r.Context().Value(ctxKey{"inline2"}).(string) w.Write([]byte(fmt.Sprintf("inline %s %s", v1, v2))) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/hi", nil); body != "bye" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/inline", nil); body != "inline yes yes" { t.Fatalf(body) } if cmwInit1 != 1 { t.Fatalf("expecting cmwInit1 to be 1, got %d", cmwInit1) } if cmwHandler1 != 1 { t.Fatalf("expecting cmwHandler1 to be 1, got %d", cmwHandler1) } if cmwInit2 != 1 { t.Fatalf("expecting cmwInit2 to be 1, got %d", cmwInit2) } if cmwHandler2 != 1 { t.Fatalf("expecting cmwHandler2 to be 1, got %d", cmwHandler2) } } func TestRouterFromMuxWith(t *testing.T) { t.Parallel() r := NewRouter() with := r.With(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) }) }) with.Get("/with_middleware", func(w http.ResponseWriter, r *http.Request) {}) ts := httptest.NewServer(with) defer ts.Close() // Without the fix this test was committed with, this causes a panic. testRequest(t, ts, http.MethodGet, "/with_middleware", nil) } func TestMuxMiddlewareStack(t *testing.T) { var stdmwInit, stdmwHandler uint64 stdmw := func(next http.Handler) http.Handler { stdmwInit++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { stdmwHandler++ next.ServeHTTP(w, r) }) } _ = stdmw var ctxmwInit, ctxmwHandler uint64 ctxmw := func(next http.Handler) http.Handler { ctxmwInit++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctxmwHandler++ ctx := r.Context() ctx = context.WithValue(ctx, ctxKey{"count.ctxmwHandler"}, ctxmwHandler) r = r.WithContext(ctx) next.ServeHTTP(w, r) }) } var inCtxmwInit, inCtxmwHandler uint64 inCtxmw := func(next http.Handler) http.Handler { inCtxmwInit++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { inCtxmwHandler++ next.ServeHTTP(w, r) }) } r := NewRouter() r.Use(stdmw) r.Use(ctxmw) r.Use(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/ping" { w.Write([]byte("pong")) return } next.ServeHTTP(w, r) }) }) var handlerCount uint64 r.With(inCtxmw).Get("/", func(w http.ResponseWriter, r *http.Request) { handlerCount++ ctx := r.Context() ctxmwHandlerCount := ctx.Value(ctxKey{"count.ctxmwHandler"}).(uint64) w.Write([]byte(fmt.Sprintf("inits:%d reqs:%d ctxValue:%d", ctxmwInit, handlerCount, ctxmwHandlerCount))) }) r.Get("/hi", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("wooot")) }) ts := httptest.NewServer(r) defer ts.Close() testRequest(t, ts, "GET", "/", nil) testRequest(t, ts, "GET", "/", nil) var body string _, body = testRequest(t, ts, "GET", "/", nil) if body != "inits:1 reqs:3 ctxValue:3" { t.Fatalf("got: '%s'", body) } _, body = testRequest(t, ts, "GET", "/ping", nil) if body != "pong" { t.Fatalf("got: '%s'", body) } } func TestMuxRouteGroups(t *testing.T) { var stdmwInit, stdmwHandler uint64 stdmw := func(next http.Handler) http.Handler { stdmwInit++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { stdmwHandler++ next.ServeHTTP(w, r) }) } var stdmwInit2, stdmwHandler2 uint64 stdmw2 := func(next http.Handler) http.Handler { stdmwInit2++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { stdmwHandler2++ next.ServeHTTP(w, r) }) } r := NewRouter() r.Group(func(r Router) { r.Use(stdmw) r.Get("/group", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("root group")) }) }) r.Group(func(r Router) { r.Use(stdmw2) r.Get("/group2", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("root group2")) }) }) ts := httptest.NewServer(r) defer ts.Close() // GET /group _, body := testRequest(t, ts, "GET", "/group", nil) if body != "root group" { t.Fatalf("got: '%s'", body) } if stdmwInit != 1 || stdmwHandler != 1 { t.Logf("stdmw counters failed, should be 1:1, got %d:%d", stdmwInit, stdmwHandler) } // GET /group2 _, body = testRequest(t, ts, "GET", "/group2", nil) if body != "root group2" { t.Fatalf("got: '%s'", body) } if stdmwInit2 != 1 || stdmwHandler2 != 1 { t.Fatalf("stdmw2 counters failed, should be 1:1, got %d:%d", stdmwInit2, stdmwHandler2) } } func TestMuxBig(t *testing.T) { r := bigMux() ts := httptest.NewServer(r) defer ts.Close() var body, expected string _, body = testRequest(t, ts, "GET", "/favicon.ico", nil) if body != "fav" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/hubs/4/view", nil) if body != "/hubs/4/view reqid:1 session:anonymous" { t.Fatalf("got '%v'", body) } _, body = testRequest(t, ts, "GET", "/hubs/4/view/index.html", nil) if body != "/hubs/4/view/index.html reqid:1 session:anonymous" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "POST", "/hubs/ethereumhub/view/index.html", nil) if body != "/hubs/ethereumhub/view/index.html reqid:1 session:anonymous" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/", nil) if body != "/ reqid:1 session:elvis" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/suggestions", nil) if body != "/suggestions reqid:1 session:elvis" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/woot/444/hiiii", nil) if body != "/woot/444/hiiii" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/hubs/123", nil) expected = "/hubs/123 reqid:1 session:elvis" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/hubs/123/touch", nil) if body != "/hubs/123/touch reqid:1 session:elvis" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/hubs/123/webhooks", nil) if body != "/hubs/123/webhooks reqid:1 session:elvis" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/hubs/123/posts", nil) if body != "/hubs/123/posts reqid:1 session:elvis" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/folders", nil) if body != "404 page not found\n" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/folders/", nil) if body != "/folders/ reqid:1 session:elvis" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/folders/public", nil) if body != "/folders/public reqid:1 session:elvis" { t.Fatalf("got '%s'", body) } _, body = testRequest(t, ts, "GET", "/folders/nothing", nil) if body != "404 page not found\n" { t.Fatalf("got '%s'", body) } } func bigMux() Router { var r *Mux var sr3 *Mux // var sr1, sr2, sr3, sr4, sr5, sr6 *Mux r = NewRouter() r.Use(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := context.WithValue(r.Context(), ctxKey{"requestID"}, "1") next.ServeHTTP(w, r.WithContext(ctx)) }) }) r.Use(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) }) }) r.Group(func(r Router) { r.Use(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := context.WithValue(r.Context(), ctxKey{"session.user"}, "anonymous") next.ServeHTTP(w, r.WithContext(ctx)) }) }) r.Get("/favicon.ico", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("fav")) }) r.Get("/hubs/{hubID}/view", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s/view reqid:%s session:%s", URLParam(r, "hubID"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) r.Get("/hubs/{hubID}/view/*", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s/view/%s reqid:%s session:%s", URLParamFromCtx(ctx, "hubID"), URLParam(r, "*"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) r.Post("/hubs/{hubSlug}/view/*", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s/view/%s reqid:%s session:%s", URLParamFromCtx(ctx, "hubSlug"), URLParam(r, "*"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) }) r.Group(func(r Router) { r.Use(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := context.WithValue(r.Context(), ctxKey{"session.user"}, "elvis") next.ServeHTTP(w, r.WithContext(ctx)) }) }) r.Get("/", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/ reqid:%s session:%s", ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) r.Get("/suggestions", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/suggestions reqid:%s session:%s", ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) r.Get("/woot/{wootID}/*", func(w http.ResponseWriter, r *http.Request) { s := fmt.Sprintf("/woot/%s/%s", URLParam(r, "wootID"), URLParam(r, "*")) w.Write([]byte(s)) }) r.Route("/hubs", func(r Router) { _ = r.(*Mux) // sr1 r.Route("/{hubID}", func(r Router) { _ = r.(*Mux) // sr2 r.Get("/", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s reqid:%s session:%s", URLParam(r, "hubID"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) r.Get("/touch", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s/touch reqid:%s session:%s", URLParam(r, "hubID"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) sr3 = NewRouter() sr3.Get("/", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s/webhooks reqid:%s session:%s", URLParam(r, "hubID"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) sr3.Route("/{webhookID}", func(r Router) { _ = r.(*Mux) // sr4 r.Get("/", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s/webhooks/%s reqid:%s session:%s", URLParam(r, "hubID"), URLParam(r, "webhookID"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) }) r.Mount("/webhooks", Chain(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), ctxKey{"hook"}, true))) }) }).Handler(sr3)) r.Route("/posts", func(r Router) { _ = r.(*Mux) // sr5 r.Get("/", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/hubs/%s/posts reqid:%s session:%s", URLParam(r, "hubID"), ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) }) }) }) r.Route("/folders/", func(r Router) { _ = r.(*Mux) // sr6 r.Get("/", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/folders/ reqid:%s session:%s", ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) r.Get("/public", func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s := fmt.Sprintf("/folders/public reqid:%s session:%s", ctx.Value(ctxKey{"requestID"}), ctx.Value(ctxKey{"session.user"})) w.Write([]byte(s)) }) }) }) return r } func TestMuxSubroutesBasic(t *testing.T) { hIndex := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("index")) }) hArticlesList := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("articles-list")) }) hSearchArticles := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("search-articles")) }) hGetArticle := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(fmt.Sprintf("get-article:%s", URLParam(r, "id")))) }) hSyncArticle := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(fmt.Sprintf("sync-article:%s", URLParam(r, "id")))) }) r := NewRouter() // var rr1, rr2 *Mux r.Get("/", hIndex) r.Route("/articles", func(r Router) { // rr1 = r.(*Mux) r.Get("/", hArticlesList) r.Get("/search", hSearchArticles) r.Route("/{id}", func(r Router) { // rr2 = r.(*Mux) r.Get("/", hGetArticle) r.Get("/sync", hSyncArticle) }) }) // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // debugPrintTree(0, 0, r.tree, 0) // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // debugPrintTree(0, 0, rr1.tree, 0) // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // debugPrintTree(0, 0, rr2.tree, 0) // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") ts := httptest.NewServer(r) defer ts.Close() var body, expected string _, body = testRequest(t, ts, "GET", "/", nil) expected = "index" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/articles", nil) expected = "articles-list" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/articles/search", nil) expected = "search-articles" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/articles/123", nil) expected = "get-article:123" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/articles/123/sync", nil) expected = "sync-article:123" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } } func TestMuxSubroutes(t *testing.T) { hHubView1 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("hub1")) }) hHubView2 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("hub2")) }) hHubView3 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("hub3")) }) hAccountView1 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("account1")) }) hAccountView2 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("account2")) }) r := NewRouter() r.Get("/hubs/{hubID}/view", hHubView1) r.Get("/hubs/{hubID}/view/*", hHubView2) sr := NewRouter() sr.Get("/", hHubView3) r.Mount("/hubs/{hubID}/users", sr) r.Get("/hubs/{hubID}/users/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("hub3 override")) }) sr3 := NewRouter() sr3.Get("/", hAccountView1) sr3.Get("/hi", hAccountView2) // var sr2 *Mux r.Route("/accounts/{accountID}", func(r Router) { _ = r.(*Mux) // sr2 // r.Get("/", hAccountView1) r.Mount("/", sr3) }) // This is the same as the r.Route() call mounted on sr2 // sr2 := NewRouter() // sr2.Mount("/", sr3) // r.Mount("/accounts/{accountID}", sr2) ts := httptest.NewServer(r) defer ts.Close() var body, expected string _, body = testRequest(t, ts, "GET", "/hubs/123/view", nil) expected = "hub1" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/hubs/123/view/index.html", nil) expected = "hub2" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/hubs/123/users", nil) expected = "hub3" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/hubs/123/users/", nil) expected = "hub3 override" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/accounts/44", nil) expected = "account1" if body != expected { t.Fatalf("request:%s expected:%s got:%s", "GET /accounts/44", expected, body) } _, body = testRequest(t, ts, "GET", "/accounts/44/hi", nil) expected = "account2" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } // Test that we're building the routingPatterns properly router := r req, _ := http.NewRequest("GET", "/accounts/44/hi", nil) rctx := NewRouteContext() req = req.WithContext(context.WithValue(req.Context(), RouteCtxKey, rctx)) w := httptest.NewRecorder() router.ServeHTTP(w, req) body = w.Body.String() expected = "account2" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } routePatterns := rctx.RoutePatterns if len(rctx.RoutePatterns) != 3 { t.Fatalf("expected 3 routing patterns, got:%d", len(rctx.RoutePatterns)) } expected = "/accounts/{accountID}/*" if routePatterns[0] != expected { t.Fatalf("routePattern, expected:%s got:%s", expected, routePatterns[0]) } expected = "/*" if routePatterns[1] != expected { t.Fatalf("routePattern, expected:%s got:%s", expected, routePatterns[1]) } expected = "/hi" if routePatterns[2] != expected { t.Fatalf("routePattern, expected:%s got:%s", expected, routePatterns[2]) } } func TestSingleHandler(t *testing.T) { h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { name := URLParam(r, "name") w.Write([]byte("hi " + name)) }) r, _ := http.NewRequest("GET", "/", nil) rctx := NewRouteContext() r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx)) rctx.URLParams.Add("name", "joe") w := httptest.NewRecorder() h.ServeHTTP(w, r) body := w.Body.String() expected := "hi joe" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } } // TODO: a Router wrapper test.. // // type ACLMux struct { // *Mux // XX string // } // // func NewACLMux() *ACLMux { // return &ACLMux{Mux: NewRouter(), XX: "hihi"} // } // // // TODO: this should be supported... // func TestWoot(t *testing.T) { // var r Router = NewRouter() // // var r2 Router = NewACLMux() //NewRouter() // r2.Get("/hi", func(w http.ResponseWriter, r *http.Request) { // w.Write([]byte("hi")) // }) // // r.Mount("/", r2) // } func TestServeHTTPExistingContext(t *testing.T) { r := NewRouter() r.Get("/hi", func(w http.ResponseWriter, r *http.Request) { s, _ := r.Context().Value(ctxKey{"testCtx"}).(string) w.Write([]byte(s)) }) r.NotFound(func(w http.ResponseWriter, r *http.Request) { s, _ := r.Context().Value(ctxKey{"testCtx"}).(string) w.WriteHeader(404) w.Write([]byte(s)) }) testcases := []struct { Method string Path string Ctx context.Context ExpectedStatus int ExpectedBody string }{ { Method: "GET", Path: "/hi", Ctx: context.WithValue(context.Background(), ctxKey{"testCtx"}, "hi ctx"), ExpectedStatus: 200, ExpectedBody: "hi ctx", }, { Method: "GET", Path: "/hello", Ctx: context.WithValue(context.Background(), ctxKey{"testCtx"}, "nothing here ctx"), ExpectedStatus: 404, ExpectedBody: "nothing here ctx", }, } for _, tc := range testcases { resp := httptest.NewRecorder() req, err := http.NewRequest(tc.Method, tc.Path, nil) if err != nil { t.Fatalf("%v", err) } req = req.WithContext(tc.Ctx) r.ServeHTTP(resp, req) b, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("%v", err) } if resp.Code != tc.ExpectedStatus { t.Fatalf("%v != %v", tc.ExpectedStatus, resp.Code) } if string(b) != tc.ExpectedBody { t.Fatalf("%s != %s", tc.ExpectedBody, b) } } } func TestNestedGroups(t *testing.T) { handlerPrintCounter := func(w http.ResponseWriter, r *http.Request) { counter, _ := r.Context().Value(ctxKey{"counter"}).(int) w.Write([]byte(fmt.Sprintf("%v", counter))) } mwIncreaseCounter := func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() counter, _ := ctx.Value(ctxKey{"counter"}).(int) counter++ ctx = context.WithValue(ctx, ctxKey{"counter"}, counter) next.ServeHTTP(w, r.WithContext(ctx)) }) } // Each route represents value of its counter (number of applied middlewares). r := NewRouter() // counter == 0 r.Get("/0", handlerPrintCounter) r.Group(func(r Router) { r.Use(mwIncreaseCounter) // counter == 1 r.Get("/1", handlerPrintCounter) // r.Handle(GET, "/2", Chain(mwIncreaseCounter).HandlerFunc(handlerPrintCounter)) r.With(mwIncreaseCounter).Get("/2", handlerPrintCounter) r.Group(func(r Router) { r.Use(mwIncreaseCounter, mwIncreaseCounter) // counter == 3 r.Get("/3", handlerPrintCounter) }) r.Route("/", func(r Router) { r.Use(mwIncreaseCounter, mwIncreaseCounter) // counter == 3 // r.Handle(GET, "/4", Chain(mwIncreaseCounter).HandlerFunc(handlerPrintCounter)) r.With(mwIncreaseCounter).Get("/4", handlerPrintCounter) r.Group(func(r Router) { r.Use(mwIncreaseCounter, mwIncreaseCounter) // counter == 5 r.Get("/5", handlerPrintCounter) // r.Handle(GET, "/6", Chain(mwIncreaseCounter).HandlerFunc(handlerPrintCounter)) r.With(mwIncreaseCounter).Get("/6", handlerPrintCounter) }) }) }) ts := httptest.NewServer(r) defer ts.Close() for _, route := range []string{"0", "1", "2", "3", "4", "5", "6"} { if _, body := testRequest(t, ts, "GET", "/"+route, nil); body != route { t.Errorf("expected %v, got %v", route, body) } } } func TestMiddlewarePanicOnLateUse(t *testing.T) { handler := func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("hello\n")) } mw := func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) }) } defer func() { if recover() == nil { t.Error("expected panic()") } }() r := NewRouter() r.Get("/", handler) r.Use(mw) // Too late to apply middleware, we're expecting panic(). } func TestMountingExistingPath(t *testing.T) { handler := func(w http.ResponseWriter, r *http.Request) {} defer func() { if recover() == nil { t.Error("expected panic()") } }() r := NewRouter() r.Get("/", handler) r.Mount("/hi", http.HandlerFunc(handler)) r.Mount("/hi", http.HandlerFunc(handler)) } func TestMountingSimilarPattern(t *testing.T) { r := NewRouter() r.Get("/hi", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("bye")) }) r2 := NewRouter() r2.Get("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("foobar")) }) r3 := NewRouter() r3.Get("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("foo")) }) r.Mount("/foobar", r2) r.Mount("/foo", r3) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/hi", nil); body != "bye" { t.Fatalf(body) } } func TestMuxEmptyParams(t *testing.T) { r := NewRouter() r.Get(`/users/{x}/{y}/{z}`, func(w http.ResponseWriter, r *http.Request) { x := URLParam(r, "x") y := URLParam(r, "y") z := URLParam(r, "z") w.Write([]byte(fmt.Sprintf("%s-%s-%s", x, y, z))) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/users/a/b/c", nil); body != "a-b-c" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/users///c", nil); body != "--c" { t.Fatalf(body) } } func TestMuxMissingParams(t *testing.T) { r := NewRouter() r.Get(`/user/{userId:\d+}`, func(w http.ResponseWriter, r *http.Request) { userID := URLParam(r, "userId") w.Write([]byte(fmt.Sprintf("userId = '%s'", userID))) }) r.NotFound(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) w.Write([]byte("nothing here")) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/user/123", nil); body != "userId = '123'" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/user/", nil); body != "nothing here" { t.Fatalf(body) } } func TestMuxWildcardRoute(t *testing.T) { handler := func(w http.ResponseWriter, r *http.Request) {} defer func() { if recover() == nil { t.Error("expected panic()") } }() r := NewRouter() r.Get("/*/wildcard/must/be/at/end", handler) } func TestMuxWildcardRouteCheckTwo(t *testing.T) { handler := func(w http.ResponseWriter, r *http.Request) {} defer func() { if recover() == nil { t.Error("expected panic()") } }() r := NewRouter() r.Get("/*/wildcard/{must}/be/at/end", handler) } func TestMuxRegexp(t *testing.T) { r := NewRouter() r.Route("/{param:[0-9]*}/test", func(r Router) { r.Get("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(fmt.Sprintf("Hi: %s", URLParam(r, "param")))) }) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "//test", nil); body != "Hi: " { t.Fatalf(body) } } func TestMuxRegexp2(t *testing.T) { r := NewRouter() r.Get("/foo-{suffix:[a-z]{2,3}}.json", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(URLParam(r, "suffix"))) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/foo-.json", nil); body != "" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/foo-abc.json", nil); body != "abc" { t.Fatalf(body) } } func TestMuxRegexp3(t *testing.T) { r := NewRouter() r.Get("/one/{firstId:[a-z0-9-]+}/{secondId:[a-z]+}/first", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("first")) }) r.Get("/one/{firstId:[a-z0-9-_]+}/{secondId:[0-9]+}/second", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("second")) }) r.Delete("/one/{firstId:[a-z0-9-_]+}/{secondId:[0-9]+}/second", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("third")) }) r.Route("/one", func(r Router) { r.Get("/{dns:[a-z-0-9_]+}", func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte("_")) }) r.Get("/{dns:[a-z-0-9_]+}/info", func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte("_")) }) r.Delete("/{id:[0-9]+}", func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte("forth")) }) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/one/hello/peter/first", nil); body != "first" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/one/hithere/123/second", nil); body != "second" { t.Fatalf(body) } if _, body := testRequest(t, ts, "DELETE", "/one/hithere/123/second", nil); body != "third" { t.Fatalf(body) } if _, body := testRequest(t, ts, "DELETE", "/one/123", nil); body != "forth" { t.Fatalf(body) } } func TestMuxSubrouterWildcardParam(t *testing.T) { h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "param:%v *:%v", URLParam(r, "param"), URLParam(r, "*")) }) r := NewRouter() r.Get("/bare/{param}", h) r.Get("/bare/{param}/*", h) r.Route("/case0", func(r Router) { r.Get("/{param}", h) r.Get("/{param}/*", h) }) ts := httptest.NewServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/bare/hi", nil); body != "param:hi *:" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/bare/hi/yes", nil); body != "param:hi *:yes" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/case0/hi", nil); body != "param:hi *:" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/case0/hi/yes", nil); body != "param:hi *:yes" { t.Fatalf(body) } } func TestMuxContextIsThreadSafe(t *testing.T) { router := NewRouter() router.Get("/{id}", func(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithTimeout(r.Context(), 1*time.Millisecond) defer cancel() <-ctx.Done() }) wg := sync.WaitGroup{} for i := 0; i < 100; i++ { wg.Add(1) go func() { defer wg.Done() for j := 0; j < 10000; j++ { w := httptest.NewRecorder() r, err := http.NewRequest("GET", "/ok", nil) if err != nil { t.Fatal(err) } ctx, cancel := context.WithCancel(r.Context()) r = r.WithContext(ctx) go func() { cancel() }() router.ServeHTTP(w, r) } }() } wg.Wait() } func TestEscapedURLParams(t *testing.T) { m := NewRouter() m.Get("/api/{identifier}/{region}/{size}/{rotation}/*", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) rctx := RouteContext(r.Context()) if rctx == nil { t.Error("no context") return } identifier := URLParam(r, "identifier") if identifier != "http:%2f%2fexample.com%2fimage.png" { t.Errorf("identifier path parameter incorrect %s", identifier) return } region := URLParam(r, "region") if region != "full" { t.Errorf("region path parameter incorrect %s", region) return } size := URLParam(r, "size") if size != "max" { t.Errorf("size path parameter incorrect %s", size) return } rotation := URLParam(r, "rotation") if rotation != "0" { t.Errorf("rotation path parameter incorrect %s", rotation) return } w.Write([]byte("success")) }) ts := httptest.NewServer(m) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/api/http:%2f%2fexample.com%2fimage.png/full/max/0/color.png", nil); body != "success" { t.Fatalf(body) } } func TestMuxMatch(t *testing.T) { r := NewRouter() r.Get("/hi", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("X-Test", "yes") w.Write([]byte("bye")) }) r.Route("/articles", func(r Router) { r.Get("/{id}", func(w http.ResponseWriter, r *http.Request) { id := URLParam(r, "id") w.Header().Set("X-Article", id) w.Write([]byte("article:" + id)) }) }) r.Route("/users", func(r Router) { r.Head("/{id}", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("X-User", "-") w.Write([]byte("user")) }) r.Get("/{id}", func(w http.ResponseWriter, r *http.Request) { id := URLParam(r, "id") w.Header().Set("X-User", id) w.Write([]byte("user:" + id)) }) }) tctx := NewRouteContext() tctx.Reset() if r.Match(tctx, "GET", "/users/1") == false { t.Fatal("expecting to find match for route:", "GET", "/users/1") } tctx.Reset() if r.Match(tctx, "HEAD", "/articles/10") == true { t.Fatal("not expecting to find match for route:", "HEAD", "/articles/10") } } func TestServerBaseContext(t *testing.T) { r := NewRouter() r.Get("/", func(w http.ResponseWriter, r *http.Request) { baseYes := r.Context().Value(ctxKey{"base"}).(string) if _, ok := r.Context().Value(http.ServerContextKey).(*http.Server); !ok { panic("missing server context") } if _, ok := r.Context().Value(http.LocalAddrContextKey).(net.Addr); !ok { panic("missing local addr context") } w.Write([]byte(baseYes)) }) // Setup http Server with a base context ctx := context.WithValue(context.Background(), ctxKey{"base"}, "yes") ts := httptest.NewUnstartedServer(r) ts.Config.BaseContext = func(_ net.Listener) context.Context { return ctx } ts.Start() defer ts.Close() if _, body := testRequest(t, ts, "GET", "/", nil); body != "yes" { t.Fatalf(body) } } func testRequest(t *testing.T, ts *httptest.Server, method, path string, body io.Reader) (*http.Response, string) { req, err := http.NewRequest(method, ts.URL+path, body) if err != nil { t.Fatal(err) return nil, "" } resp, err := http.DefaultClient.Do(req) if err != nil { t.Fatal(err) return nil, "" } respBody, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatal(err) return nil, "" } defer resp.Body.Close() return resp, string(respBody) } func testHandler(t *testing.T, h http.Handler, method, path string, body io.Reader) (*http.Response, string) { r, _ := http.NewRequest(method, path, body) w := httptest.NewRecorder() h.ServeHTTP(w, r) return w.Result(), w.Body.String() } type testFileSystem struct { open func(name string) (http.File, error) } func (fs *testFileSystem) Open(name string) (http.File, error) { return fs.open(name) } type testFile struct { name string contents []byte } func (tf *testFile) Close() error { return nil } func (tf *testFile) Read(p []byte) (n int, err error) { copy(p, tf.contents) return len(p), nil } func (tf *testFile) Seek(offset int64, whence int) (int64, error) { return 0, nil } func (tf *testFile) Readdir(count int) ([]os.FileInfo, error) { stat, _ := tf.Stat() return []os.FileInfo{stat}, nil } func (tf *testFile) Stat() (os.FileInfo, error) { return &testFileInfo{tf.name, int64(len(tf.contents))}, nil } type testFileInfo struct { name string size int64 } func (tfi *testFileInfo) Name() string { return tfi.name } func (tfi *testFileInfo) Size() int64 { return tfi.size } func (tfi *testFileInfo) Mode() os.FileMode { return 0755 } func (tfi *testFileInfo) ModTime() time.Time { return time.Now() } func (tfi *testFileInfo) IsDir() bool { return false } func (tfi *testFileInfo) Sys() interface{} { return nil } type ctxKey struct { name string } func (k ctxKey) String() string { return "context value " + k.name } func BenchmarkMux(b *testing.B) { h1 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) h2 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) h3 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) h4 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) h5 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) h6 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) mx := NewRouter() mx.Get("/", h1) mx.Get("/hi", h2) mx.Get("/sup/{id}/and/{this}", h3) mx.Get("/sup/{id}/{bar:foo}/{this}", h3) mx.Route("/sharing/{x}/{hash}", func(mx Router) { mx.Get("/", h4) // subrouter-1 mx.Get("/{network}", h5) // subrouter-1 mx.Get("/twitter", h5) mx.Route("/direct", func(mx Router) { mx.Get("/", h6) // subrouter-2 mx.Get("/download", h6) }) }) routes := []string{ "/", "/hi", "/sup/123/and/this", "/sup/123/foo/this", "/sharing/z/aBc", // subrouter-1 "/sharing/z/aBc/twitter", // subrouter-1 "/sharing/z/aBc/direct", // subrouter-2 "/sharing/z/aBc/direct/download", // subrouter-2 } for _, path := range routes { b.Run("route:"+path, func(b *testing.B) { w := httptest.NewRecorder() r, _ := http.NewRequest("GET", path, nil) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { mx.ServeHTTP(w, r) } }) } }
package notebook // Notebook is an interface which represents // a collection of notes type Notebook interface { // List returns the list of notes holding in the notebook List() Notes // Get returns a note from the notebook according to its identifier Get([]byte) (Note, error) // Set adds or updates a note in the notebook Set([]byte, []byte) error // Delete deletes a note from the notebook according to its identifier Delete([]byte) error } fix(notebook): fixes the type used to identify notes from an array of byte to a string package notebook // Notebook is an interface which represents // a collection of notes type Notebook interface { // List returns the list of notes holding in the notebook List() Notes // Get returns a note from the notebook according to its name Get(string) (Note, error) // Set adds or updates a note in the notebook Set(string, []byte) error // Delete deletes a note from the notebook according to its name Delete(string) error }
package main import ( // stdlib packages "encoding/json" "flag" "fmt" "log" "os" "runtime" "strconv" "sync" // custom packages "config" "modules/amqpmodule" es "modules/elasticsearchmodule" ) const rxQueue = "conn_scan_results_queue" const esIndex = "observer" const esType = "connection" var broker *amqpmodule.Broker //the 2 following structs represent the cipherscan output. type ScanInfo struct { Target string `json:"target"` Timestamp string `json:"utctimestamp"` ServerSide string `json:"serverside"` CurvesFallback string `json:"curves_fallback"` CipherSuites []Ciphersuite `json:"ciphersuite"` } type Ciphersuite struct { Cipher string `json:"cipher"` Protocols []string `json:"protocols"` PubKey []string `json:"pubkey"` SigAlg []string `json:"sigalg"` Trusted string `json:"trusted"` TicketHint string `json:"ticket_hint"` OCSPStapling string `json:"ocsp_stapling"` PFS string `json:"pfs"` Curves []string `json:"curves,omitempty"` } //the following structs represent the output we want to provide to DB. type ConnectionInfo struct { ConnectionTimestamp string `json:"connectionTimestamp"` ServerSide bool `json:"serverside"` CipherSuites []ConnectionCiphersuite `json:"ciphersuite"` CurvesFallback bool `json:"curvesFallback"` } type ConnectionCiphersuite struct { Cipher string `json:"cipher"` Protocols []string `json:"protocols"` PubKey float64 `json:"pubkey"` SigAlg string `json:"sigalg"` TicketHint string `json:"ticket_hint"` OCSPStapling bool `json:"ocsp_stapling"` PFS string `json:"pfs"` Curves []string `json:"curves"` } func failOnError(err error, msg string) { if err != nil { log.Fatalf("%s: %s", msg, err) panic(fmt.Sprintf("%s: %s", msg, err)) } } func panicIf(err error) bool { if err != nil { log.Println(fmt.Sprintf("%s", err)) return true } return false } func stringtoBool(s string) bool { if s == "True" { return true } else { return false } } func (s ScanInfo) toConnInfo() (ConnectionInfo, error) { c := ConnectionInfo{} var err error c.ConnectionTimestamp = s.Timestamp c.ServerSide = stringtoBool(s.ServerSide) c.CurvesFallback = stringtoBool(s.CurvesFallback) for _, cipher := range s.CipherSuites { newcipher := ConnectionCiphersuite{} newcipher.Cipher = cipher.Cipher newcipher.OCSPStapling = stringtoBool(cipher.OCSPStapling) newcipher.PFS = cipher.PFS newcipher.Protocols = cipher.Protocols if len(cipher.PubKey) > 1 { log.Println("Multiple PubKeys for ", s.Target, " at cipher :", cipher.Cipher) } if len(cipher.PubKey) > 0 { newcipher.PubKey, err = strconv.ParseFloat(cipher.PubKey[0], 64) } else { return c, fmt.Errorf("No Public Keys found") } if len(cipher.SigAlg) > 1 { log.Println("Multiple SigAlgs for ", s.Target, " at cipher :", cipher.Cipher) } if len(cipher.SigAlg) > 0 { newcipher.SigAlg = cipher.SigAlg[0] } else { return c, fmt.Errorf("No Signature Algorithms found") } newcipher.TicketHint = cipher.TicketHint if err != nil { return c, err } newcipher.Curves = append(newcipher.Curves, cipher.Curves...) c.CipherSuites = append(c.CipherSuites, newcipher) } return c, nil } //worker is the main body of the goroutine that handles each received message. func worker(msgs <-chan []byte) { forever := make(chan bool) defer wg.Done() for d := range msgs { info := ScanInfo{} err := json.Unmarshal(d, &info) panicIf(err) if err != nil { continue } c, err := info.toConnInfo() panicIf(err) if err != nil { continue } id := info.Target jsonConn, err := json.Marshal(c) panicIf(err) if err != nil { continue } err = es.Push(esIndex, esType, id, jsonConn) panicIf(err) } <-forever } func printIntro() { fmt.Println(` ################################## # TLSAnalyzer # ################################## `) } var wg sync.WaitGroup func main() { var ( err error ) printIntro() conf := config.AnalyzerConfig{} var cfgFile string flag.StringVar(&cfgFile, "c", "/etc/observer/analyzer.cfg", "Input file csv format") flag.Parse() _, err = os.Stat(cfgFile) failOnError(err, "Missing configuration file from '-c' or /etc/observer/retriever.cfg") conf, err = config.AnalyzerConfigLoad(cfgFile) if err != nil { conf = config.GetAnalyzerDefaults() } cores := runtime.NumCPU() runtime.GOMAXPROCS(cores * conf.General.GoRoutines) err = es.RegisterConnection(conf.General.ElasticSearch) failOnError(err, "Failed to register ElasticSearch") broker, err = amqpmodule.RegisterURL(conf.General.RabbitMQRelay) failOnError(err, "Failed to register RabbitMQ") msgs, err := broker.Consume(rxQueue) for i := 0; i < cores; i++ { wg.Add(1) go worker(msgs) } wg.Wait() } changed ciphersuites from object array to map using algorithm preference as key. This will enable result visualisation in kibana. package main import ( // stdlib packages "encoding/json" "flag" "fmt" "log" "os" "runtime" "strconv" "sync" // custom packages "config" "modules/amqpmodule" es "modules/elasticsearchmodule" ) const rxQueue = "conn_scan_results_queue" const esIndex = "observer" const esType = "connection" var broker *amqpmodule.Broker //the 2 following structs represent the cipherscan output. type ScanInfo struct { Target string `json:"target"` Timestamp string `json:"utctimestamp"` ServerSide string `json:"serverside"` CurvesFallback string `json:"curves_fallback"` CipherSuites []Ciphersuite `json:"ciphersuite"` } type Ciphersuite struct { Cipher string `json:"cipher"` Protocols []string `json:"protocols"` PubKey []string `json:"pubkey"` SigAlg []string `json:"sigalg"` Trusted string `json:"trusted"` TicketHint string `json:"ticket_hint"` OCSPStapling string `json:"ocsp_stapling"` PFS string `json:"pfs"` Curves []string `json:"curves,omitempty"` } //the following structs represent the output we want to provide to DB. type ConnectionInfo struct { ConnectionTimestamp string `json:"connectionTimestamp"` ServerSide bool `json:"serverside"` CipherSuites map[string]ConnectionCiphersuite `json:"ciphersuite"` CurvesFallback bool `json:"curvesFallback"` } type ConnectionCiphersuite struct { Cipher string `json:"cipher"` Protocols []string `json:"protocols"` PubKey float64 `json:"pubkey"` SigAlg string `json:"sigalg"` TicketHint string `json:"ticket_hint"` OCSPStapling bool `json:"ocsp_stapling"` PFS string `json:"pfs"` Curves []string `json:"curves"` } func failOnError(err error, msg string) { if err != nil { log.Fatalf("%s: %s", msg, err) panic(fmt.Sprintf("%s: %s", msg, err)) } } func panicIf(err error) bool { if err != nil { log.Println(fmt.Sprintf("%s", err)) return true } return false } func stringtoBool(s string) bool { if s == "True" { return true } else { return false } } func (s ScanInfo) toConnInfo() (ConnectionInfo, error) { c := ConnectionInfo{} var err error c.ConnectionTimestamp = s.Timestamp c.ServerSide = stringtoBool(s.ServerSide) c.CurvesFallback = stringtoBool(s.CurvesFallback) c.CipherSuites = make(map[string]ConnectionCiphersuite) pos := 1 for _, cipher := range s.CipherSuites { newcipher := ConnectionCiphersuite{} newcipher.Cipher = cipher.Cipher newcipher.OCSPStapling = stringtoBool(cipher.OCSPStapling) newcipher.PFS = cipher.PFS newcipher.Protocols = cipher.Protocols if len(cipher.PubKey) > 1 { log.Println("Multiple PubKeys for ", s.Target, " at cipher :", cipher.Cipher) } if len(cipher.PubKey) > 0 { newcipher.PubKey, err = strconv.ParseFloat(cipher.PubKey[0], 64) } else { return c, fmt.Errorf("No Public Keys found") } if len(cipher.SigAlg) > 1 { log.Println("Multiple SigAlgs for ", s.Target, " at cipher :", cipher.Cipher) } if len(cipher.SigAlg) > 0 { newcipher.SigAlg = cipher.SigAlg[0] } else { return c, fmt.Errorf("No Signature Algorithms found") } newcipher.TicketHint = cipher.TicketHint if err != nil { return c, err } newcipher.Curves = append(newcipher.Curves, cipher.Curves...) c.CipherSuites[strconv.Itoa(pos)] = newcipher pos++ } return c, nil } //worker is the main body of the goroutine that handles each received message. func worker(msgs <-chan []byte) { forever := make(chan bool) defer wg.Done() for d := range msgs { info := ScanInfo{} err := json.Unmarshal(d, &info) panicIf(err) if err != nil { continue } c, err := info.toConnInfo() panicIf(err) if err != nil { continue } id := info.Target jsonConn, err := json.Marshal(c) panicIf(err) if err != nil { continue } err = es.Push(esIndex, esType, id, jsonConn) panicIf(err) } <-forever } func printIntro() { fmt.Println(` ################################## # TLSAnalyzer # ################################## `) } var wg sync.WaitGroup func main() { var ( err error ) printIntro() conf := config.AnalyzerConfig{} var cfgFile string flag.StringVar(&cfgFile, "c", "/etc/observer/analyzer.cfg", "Input file csv format") flag.Parse() _, err = os.Stat(cfgFile) failOnError(err, "Missing configuration file from '-c' or /etc/observer/retriever.cfg") conf, err = config.AnalyzerConfigLoad(cfgFile) if err != nil { conf = config.GetAnalyzerDefaults() } cores := runtime.NumCPU() runtime.GOMAXPROCS(cores * conf.General.GoRoutines) err = es.RegisterConnection(conf.General.ElasticSearch) failOnError(err, "Failed to register ElasticSearch") broker, err = amqpmodule.RegisterURL(conf.General.RabbitMQRelay) failOnError(err, "Failed to register RabbitMQ") msgs, err := broker.Consume(rxQueue) for i := 0; i < cores; i++ { wg.Add(1) go worker(msgs) } wg.Wait() }